diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-01-23 17:21:03 +0100 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-01-23 16:25:15 +0000 |
commit | c551f43206405019121bd2b2c93714319a0a3300 (patch) | |
tree | 1f48c30631c421fd4bbb3c36da20183c8a2ed7d7 /chromium/v8/src | |
parent | 7961cea6d1041e3e454dae6a1da660b453efd238 (diff) | |
download | qtwebengine-chromium-c551f43206405019121bd2b2c93714319a0a3300.tar.gz |
BASELINE: Update Chromium to 79.0.3945.139
Change-Id: I336b7182fab9bca80b709682489c07db112eaca5
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src')
808 files changed, 31462 insertions, 22062 deletions
diff --git a/chromium/v8/src/api/OWNERS b/chromium/v8/src/api/OWNERS index ef5a56dbfce..f51e2203096 100644 --- a/chromium/v8/src/api/OWNERS +++ b/chromium/v8/src/api/OWNERS @@ -1,5 +1,5 @@ file:../../include/OWNERS -clemensh@chromium.org +clemensb@chromium.org ishell@chromium.org jkummerow@chromium.org leszeks@chromium.org diff --git a/chromium/v8/src/api/api.cc b/chromium/v8/src/api/api.cc index 30eceb62233..0d80f986f1f 100644 --- a/chromium/v8/src/api/api.cc +++ b/chromium/v8/src/api/api.cc @@ -127,6 +127,11 @@ #endif // V8_OS_WIN64 #endif // V8_OS_WIN +#define TRACE_BS(...) \ + do { \ + if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + namespace v8 { /* @@ -902,11 +907,6 @@ void V8::SetFlagsFromString(const char* str, size_t length) { i::FlagList::EnforceFlagImplications(); } -void V8::SetFlagsFromString(const char* str, int length) { - CHECK_LE(0, length); - SetFlagsFromString(str, static_cast<size_t>(length)); -} - void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) { i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags); } @@ -2608,7 +2608,7 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript( i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate); i::ScriptStreamingData* data = source->impl(); std::unique_ptr<i::BackgroundCompileTask> task = - base::make_unique<i::BackgroundCompileTask>(data, isolate); + std::make_unique<i::BackgroundCompileTask>(data, isolate); data->task = std::move(task); return new ScriptCompiler::ScriptStreamingTask(data); } @@ -3720,6 +3720,42 @@ void v8::WasmModuleObject::CheckCast(Value* that) { "Could not convert to wasm module object"); } +v8::BackingStore::~BackingStore() { + auto i_this = reinterpret_cast<const i::BackingStore*>(this); + i_this->~BackingStore(); // manually call internal destructor +} + +void* v8::BackingStore::Data() const { + return reinterpret_cast<const i::BackingStore*>(this)->buffer_start(); +} + +size_t v8::BackingStore::ByteLength() const { + return reinterpret_cast<const i::BackingStore*>(this)->byte_length(); +} + +std::shared_ptr<v8::BackingStore> v8::ArrayBuffer::GetBackingStore() { + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); + std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore(); + if (!backing_store) { + backing_store = + i::BackingStore::EmptyBackingStore(i::SharedFlag::kNotShared); + } + i::GlobalBackingStoreRegistry::Register(backing_store); + std::shared_ptr<i::BackingStoreBase> bs_base = backing_store; + return std::static_pointer_cast<v8::BackingStore>(bs_base); +} + +std::shared_ptr<v8::BackingStore> v8::SharedArrayBuffer::GetBackingStore() { + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); + std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore(); + if (!backing_store) { + backing_store = i::BackingStore::EmptyBackingStore(i::SharedFlag::kShared); + } + i::GlobalBackingStoreRegistry::Register(backing_store); + std::shared_ptr<i::BackingStoreBase> bs_base = backing_store; + return std::static_pointer_cast<v8::BackingStore>(bs_base); +} + void v8::ArrayBuffer::CheckCast(Value* that) { i::Handle<i::Object> obj = Utils::OpenHandle(that); Utils::ApiCheck( @@ -5284,7 +5320,7 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string, int end = start + length; if ((length == -1) || (length > str->length() - start)) end = str->length(); if (end < 0) return 0; - i::String::WriteToFlat(*str, buffer, start, end); + if (start < end) i::String::WriteToFlat(*str, buffer, start, end); if (!(options & String::NO_NULL_TERMINATION) && (length == -1 || end - start < length)) { buffer[end - start] = '\0'; @@ -5681,6 +5717,11 @@ void v8::V8::InitializeExternalStartupData(const char* natives_blob, i::InitializeExternalStartupData(natives_blob, snapshot_blob); } +// static +void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) { + i::InitializeExternalStartupDataFromFile(snapshot_blob); +} + const char* v8::V8::GetVersion() { return i::Version::GetVersion(); } template <typename ObjectType> @@ -7047,21 +7088,7 @@ MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() { WasmModuleObject::TransferrableModule WasmModuleObject::GetTransferrableModule() { - if (i::FLAG_wasm_shared_code) { - i::Handle<i::WasmModuleObject> obj = - i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this)); - return TransferrableModule(obj->shared_native_module()); - } else { - CompiledWasmModule compiled_module = GetCompiledModule(); - OwnedBuffer serialized_module = compiled_module.Serialize(); - MemorySpan<const uint8_t> wire_bytes_ref = - compiled_module.GetWireBytesRef(); - size_t wire_size = wire_bytes_ref.size(); - std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]); - memcpy(wire_bytes_copy.get(), wire_bytes_ref.data(), wire_size); - return TransferrableModule(std::move(serialized_module), - {std::move(wire_bytes_copy), wire_size}); - } + return GetCompiledModule(); } CompiledWasmModule WasmModuleObject::GetCompiledModule() { @@ -7073,17 +7100,17 @@ CompiledWasmModule WasmModuleObject::GetCompiledModule() { MaybeLocal<WasmModuleObject> WasmModuleObject::FromTransferrableModule( Isolate* isolate, const WasmModuleObject::TransferrableModule& transferrable_module) { - if (i::FLAG_wasm_shared_code) { - i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - i::Handle<i::WasmModuleObject> module_object = - i_isolate->wasm_engine()->ImportNativeModule( - i_isolate, transferrable_module.shared_module_); - return Local<WasmModuleObject>::Cast( - Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object))); - } else { - return Deserialize(isolate, AsReference(transferrable_module.serialized_), - AsReference(transferrable_module.wire_bytes_)); - } + return FromCompiledModule(isolate, transferrable_module); +} + +MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule( + Isolate* isolate, const CompiledWasmModule& compiled_module) { + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + i::Handle<i::WasmModuleObject> module_object = + i_isolate->wasm_engine()->ImportNativeModule( + i_isolate, Utils::Open(compiled_module)); + return Local<WasmModuleObject>::Cast( + Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object))); } MaybeLocal<WasmModuleObject> WasmModuleObject::Deserialize( @@ -7196,20 +7223,78 @@ bool v8::ArrayBuffer::IsDetachable() const { return Utils::OpenHandle(this)->is_detachable(); } -v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { - i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", - "ArrayBuffer already externalized"); - self->set_is_external(true); +namespace { +// The backing store deleter just deletes the indirection, which downrefs +// the shared pointer. It will get collected normally. +void BackingStoreDeleter(void* buffer, size_t length, void* info) { + std::shared_ptr<i::BackingStore>* bs_indirection = + reinterpret_cast<std::shared_ptr<i::BackingStore>*>(info); + if (bs_indirection) { + i::BackingStore* backing_store = bs_indirection->get(); + TRACE_BS("API:delete bs=%p mem=%p (length=%zu)\n", backing_store, + backing_store->buffer_start(), backing_store->byte_length()); + USE(backing_store); + } + delete bs_indirection; +} - const v8::ArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); +void* MakeDeleterData(std::shared_ptr<i::BackingStore> backing_store) { + if (!backing_store) return nullptr; + TRACE_BS("API:extern bs=%p mem=%p (length=%zu)\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length()); + return new std::shared_ptr<i::BackingStore>(backing_store); +} - // A regular copy is good enough. No move semantics needed. - return contents; +std::shared_ptr<i::BackingStore> LookupOrCreateBackingStore( + i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared, + ArrayBufferCreationMode mode) { + // "internalized" means that the storage was allocated by the + // ArrayBufferAllocator and thus should be freed upon destruction. + bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized; + + // Try to lookup a previously-registered backing store in the global + // registry. If found, use that instead of wrapping an embedder allocation. + std::shared_ptr<i::BackingStore> backing_store = + i::GlobalBackingStoreRegistry::Lookup(data, byte_length); + + if (backing_store) { + // Check invariants for a previously-found backing store. + + // 1. We cannot allow an embedder to first allocate a backing store that + // should not be freed upon destruct, and then allocate an alias that should + // destruct it. The other order is fine. + bool changing_destruct_mode = + free_on_destruct && !backing_store->free_on_destruct(); + Utils::ApiCheck( + !changing_destruct_mode, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that should not be freed on destruct"); + + // 2. We cannot allow embedders to use the same backing store for both + // SharedArrayBuffers and regular ArrayBuffers. + bool changing_shared_flag = + (shared == i::SharedFlag::kShared) != backing_store->is_shared(); + Utils::ApiCheck( + !changing_shared_flag, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that does not match shared flag"); + } else { + // No previous backing store found. + backing_store = i::BackingStore::WrapAllocation( + i_isolate, data, byte_length, shared, free_on_destruct); + + // The embedder already has a direct pointer to the buffer start, so + // globally register the backing store in case they come back with the + // same buffer start and the backing store is marked as free_on_destruct. + i::GlobalBackingStoreRegistry::Register(backing_store); + } + return backing_store; } +std::shared_ptr<i::BackingStore> ToInternal( + std::shared_ptr<i::BackingStoreBase> backing_store) { + return std::static_pointer_cast<i::BackingStore>(backing_store); +} +} // namespace + v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, void* allocation_base, size_t allocation_length, @@ -7226,29 +7311,70 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, DCHECK_LE(byte_length_, allocation_length_); } -void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) { - internal::wasm::WasmEngine* engine = - reinterpret_cast<internal::wasm::WasmEngine*>(info); - CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer)); +v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { + return GetContents(true); } -void ArrayBufferDeleter(void* buffer, size_t length, void* info) { - v8::ArrayBuffer::Allocator* allocator = - reinterpret_cast<v8::ArrayBuffer::Allocator*>(info); - allocator->Free(buffer, length); +void v8::ArrayBuffer::Externalize( + const std::shared_ptr<BackingStore>& backing_store) { + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); + Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", + "ArrayBuffer already externalized"); + self->set_is_external(true); + DCHECK_EQ(self->backing_store(), backing_store->Data()); } v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::ArrayBuffer; + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() ? Allocator::AllocationMode::kReservation - : Allocator::AllocationMode::kNormal, - self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter, - self->is_wasm_memory() - ? static_cast<void*>(self->GetIsolate()->wasm_engine()) - : static_cast<void*>(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", + "ArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7273,30 +7399,56 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) { i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle<i::JSArrayBuffer> obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) { + i::MaybeHandle<i::JSArrayBuffer> result = + i_isolate->factory()->NewJSArrayBufferAndBackingStore( + byte_length, i::InitializedFlag::kZeroInitialized); + + i::Handle<i::JSArrayBuffer> array_buffer; + if (!result.ToHandle(&array_buffer)) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New"); } - return Utils::ToLocal(obj); + + return Utils::ToLocal(array_buffer); } Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data, size_t byte_length, ArrayBufferCreationMode mode) { // Embedders must guarantee that the external backing store is valid. - CHECK(byte_length == 0 || data != nullptr); + CHECK_IMPLIES(byte_length != 0, data != nullptr); CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength); i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + + std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode); + + i::Handle<i::JSArrayBuffer> obj = + i_isolate->factory()->NewJSArrayBuffer(std::move(backing_store)); + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } + return Utils::ToLocal(obj); +} + +Local<ArrayBuffer> v8::ArrayBuffer::New( + Isolate* isolate, std::shared_ptr<BackingStore> backing_store) { + CHECK_IMPLIES(backing_store->ByteLength() != 0, + backing_store->Data() != nullptr); + CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength); + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + LOG_API(i_isolate, ArrayBuffer, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::shared_ptr<i::BackingStore> i_backing_store( + ToInternal(std::move(backing_store))); + Utils::ApiCheck( + !i_backing_store->is_shared(), "v8_ArrayBuffer_New", + "Cannot construct ArrayBuffer with a BackingStore of SharedArrayBuffer"); i::Handle<i::JSArrayBuffer> obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length); + i_isolate->factory()->NewJSArrayBuffer(std::move(i_backing_store)); return Utils::ToLocal(obj); } @@ -7339,9 +7491,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) { bool v8::ArrayBufferView::HasBuffer() const { i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this); - i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()), - self->GetIsolate()); - return buffer->backing_store() != nullptr; + if (!self->IsJSTypedArray()) return true; + auto typed_array = i::Handle<i::JSTypedArray>::cast(self); + return !typed_array->is_on_heap(); } size_t v8::ArrayBufferView::ByteOffset() { @@ -7437,13 +7589,16 @@ i::Handle<i::JSArrayBuffer> SetupSharedArrayBuffer( i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + + std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kShared, mode); + i::Handle<i::JSArrayBuffer> obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - bool is_wasm_memory = - i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length, i::SharedFlag::kShared, is_wasm_memory); + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); + + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } return obj; } @@ -7453,20 +7608,6 @@ bool v8::SharedArrayBuffer::IsExternal() const { return Utils::OpenHandle(this)->is_external(); } -v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { - i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", - "SharedArrayBuffer already externalized"); - self->set_is_external(true); - - const v8::SharedArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); - - // A regular copy is good enough. No move semantics needed. - return contents; -} - v8::SharedArrayBuffer::Contents::Contents( void* data, size_t byte_length, void* allocation_base, size_t allocation_length, Allocator::AllocationMode allocation_mode, @@ -7482,20 +7623,72 @@ v8::SharedArrayBuffer::Contents::Contents( DCHECK_LE(byte_length_, allocation_length_); } +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { + return GetContents(true); +} + +void v8::SharedArrayBuffer::Externalize( + const std::shared_ptr<BackingStore>& backing_store) { + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); + Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", + "SharedArrayBuffer already externalized"); + self->set_is_external(true); + + DCHECK_EQ(self->backing_store(), backing_store->Data()); +} + v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents( + bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::SharedArrayBuffer; + i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() - ? ArrayBuffer::Allocator::AllocationMode::kReservation - : ArrayBuffer::Allocator::AllocationMode::kNormal, - self->is_wasm_memory() - ? reinterpret_cast<Contents::DeleterCallback>(WasmMemoryDeleter) - : reinterpret_cast<Contents::DeleterCallback>(ArrayBufferDeleter), - self->is_wasm_memory() - ? static_cast<void*>(self->GetIsolate()->wasm_engine()) - : static_cast<void*>(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", + "SharedArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7510,14 +7703,19 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate, i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle<i::JSArrayBuffer> obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true, - i::SharedFlag::kShared)) { + + std::unique_ptr<i::BackingStore> backing_store = + i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared, + i::InitializedFlag::kZeroInitialized); + + if (!backing_store) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New"); } + + i::Handle<i::JSArrayBuffer> obj = + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); return Utils::ToLocalShared(obj); } @@ -7530,6 +7728,24 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New( } Local<SharedArrayBuffer> v8::SharedArrayBuffer::New( + Isolate* isolate, std::shared_ptr<BackingStore> backing_store) { + CHECK(i::FLAG_harmony_sharedarraybuffer); + CHECK_IMPLIES(backing_store->ByteLength() != 0, + backing_store->Data() != nullptr); + CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength); + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + LOG_API(i_isolate, SharedArrayBuffer, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::shared_ptr<i::BackingStore> i_backing_store(ToInternal(backing_store)); + Utils::ApiCheck( + i_backing_store->is_shared(), "v8_SharedArrayBuffer_New", + "Cannot construct SharedArrayBuffer with BackingStore of ArrayBuffer"); + i::Handle<i::JSArrayBuffer> obj = + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(i_backing_store)); + return Utils::ToLocalShared(obj); +} + +Local<SharedArrayBuffer> v8::SharedArrayBuffer::New( Isolate* isolate, const SharedArrayBuffer::Contents& contents, ArrayBufferCreationMode mode) { i::Handle<i::JSArrayBuffer> buffer = SetupSharedArrayBuffer( @@ -8212,6 +8428,15 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( return true; } +v8::MaybeLocal<v8::Promise> Isolate::MeasureMemory( + v8::Local<v8::Context> context, MeasureMemoryMode mode) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::Handle<i::NativeContext> native_context = + handle(Utils::OpenHandle(*context)->native_context(), isolate); + return v8::Utils::PromiseToLocal( + isolate->heap()->MeasureMemory(native_context, mode)); +} + void Isolate::GetStackSample(const RegisterState& state, void** frames, size_t frames_limit, SampleInfo* sample_info) { RegisterState regs = state; @@ -9039,9 +9264,9 @@ bool debug::Script::GetPossibleBreakpoints( i::Handle<i::Script> script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM && this->SourceMappingURL().IsEmpty()) { - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - return module_object.GetPossibleBreakpoints(start, end, locations); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + return i::WasmModuleObject::GetPossibleBreakpoints(native_module, start, + end, locations); } i::Script::InitLineEnds(script); @@ -9090,8 +9315,9 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const { i::Handle<i::Script> script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM) { if (this->SourceMappingURL().IsEmpty()) { - return i::WasmModuleObject::cast(script->wasm_module_object()) - .GetFunctionOffset(location.GetLineNumber()) + + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); + return i::wasm::GetWasmFunctionOffset(module, location.GetLineNumber()) + location.GetColumnNumber(); } DCHECK_EQ(0, location.GetLineNumber()); @@ -9179,9 +9405,8 @@ int debug::WasmScript::NumFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle<i::Script> script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_GE(i::kMaxInt, module->functions.size()); return static_cast<int>(module->functions.size()); } @@ -9190,21 +9415,26 @@ int debug::WasmScript::NumImportedFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle<i::Script> script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_GE(i::kMaxInt, module->num_imported_functions); return static_cast<int>(module->num_imported_functions); } +MemorySpan<const uint8_t> debug::WasmScript::Bytecode() const { + i::Handle<i::Script> script = Utils::OpenHandle(this); + i::Vector<const uint8_t> wire_bytes = + script->wasm_native_module()->wire_bytes(); + return {wire_bytes.begin(), wire_bytes.size()}; +} + std::pair<int, int> debug::WasmScript::GetFunctionRange( int function_index) const { i::DisallowHeapAllocation no_gc; i::Handle<i::Script> script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); const i::wasm::WasmFunction& func = module->functions[function_index]; @@ -9218,14 +9448,12 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) { i::DisallowHeapAllocation no_gc; i::Handle<i::Script> script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); const i::wasm::WasmFunction& func = module->functions[function_index]; - i::wasm::ModuleWireBytes wire_bytes( - module_object.native_module()->wire_bytes()); + i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes()); i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func); // TODO(herhut): Maybe also take module, name and signature into account. return i::StringHasher::HashSequentialString(function_bytes.begin(), @@ -9237,9 +9465,10 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction( i::DisallowHeapAllocation no_gc; i::Handle<i::Script> script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - return module_object.DisassembleFunction(function_index); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); + i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes()); + return DisassembleWasmFunction(module, wire_bytes, function_index); } debug::Location::Location(int line_number, int column_number) @@ -9415,7 +9644,7 @@ debug::ConsoleCallArguments::ConsoleCallArguments( } debug::ConsoleCallArguments::ConsoleCallArguments( - internal::BuiltinArguments& args) + const internal::BuiltinArguments& args) : v8::FunctionCallbackInfo<v8::Value>( nullptr, // Drop the first argument (receiver, i.e. the "console" object). @@ -9478,14 +9707,14 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast( MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate, v8::Local<v8::String> source, - bool throw_on_side_effect) { + EvaluateGlobalMode mode) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value); Local<Value> result; - has_pending_exception = !ToLocal<Value>( - i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source), - throw_on_side_effect), - &result); + has_pending_exception = + !ToLocal<Value>(i::DebugEvaluate::Global( + internal_isolate, Utils::OpenHandle(*source), mode), + &result); RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -9910,10 +10139,6 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) { use_precise_sampling); } -void CpuProfiler::CollectSample() { - reinterpret_cast<i::CpuProfiler*>(this)->CollectSample(); -} - void CpuProfiler::StartProfiling(Local<String> title, CpuProfilingOptions options) { reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling( @@ -9941,12 +10166,6 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) { *Utils::OpenHandle(*title))); } -void CpuProfiler::SetIdle(bool is_idle) { - i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this); - i::Isolate* isolate = profiler->isolate(); - isolate->SetIdle(is_idle); -} - void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) { reinterpret_cast<i::Isolate*>(isolate) ->set_detailed_source_positions_for_profiling(true); @@ -9986,6 +10205,10 @@ const char* CodeEvent::GetComment() { return reinterpret_cast<i::CodeEvent*>(this)->comment; } +uintptr_t CodeEvent::GetPreviousCodeStartAddress() { + return reinterpret_cast<i::CodeEvent*>(this)->previous_code_start_address; +} + const char* CodeEvent::GetCodeEventTypeName(CodeEventType code_event_type) { switch (code_event_type) { case kUnknownType: @@ -10280,17 +10503,6 @@ void Testing::DeoptimizeAll(Isolate* isolate) { i::Deoptimizer::DeoptimizeAll(i_isolate); } -void EmbedderHeapTracer::TracePrologue(TraceFlags flags) { -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated" -#endif - TracePrologue(); -#if __clang__ -#pragma clang diagnostic pop -#endif -} - void EmbedderHeapTracer::TraceEpilogue(TraceSummary* trace_summary) { #if __clang__ #pragma clang diagnostic push @@ -10346,11 +10558,21 @@ void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) { } void EmbedderHeapTracer::RegisterEmbedderReference( - const TracedGlobal<v8::Value>& ref) { + const TracedReferenceBase<v8::Data>& ref) { + if (ref.IsEmpty()) return; + + i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap(); + heap->RegisterExternallyReferencedObject( + reinterpret_cast<i::Address*>(ref.val_)); +} + +void EmbedderHeapTracer::RegisterEmbedderReference( + const TracedReferenceBase<v8::Value>& ref) { if (ref.IsEmpty()) return; i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap(); - heap->RegisterExternallyReferencedObject(reinterpret_cast<i::Address*>(*ref)); + heap->RegisterExternallyReferencedObject( + reinterpret_cast<i::Address*>(ref.val_)); } void EmbedderHeapTracer::IterateTracedGlobalHandles( @@ -10360,6 +10582,26 @@ void EmbedderHeapTracer::IterateTracedGlobalHandles( isolate->global_handles()->IterateTracedNodes(visitor); } +bool EmbedderHeapTracer::IsRootForNonTracingGC( + const v8::TracedReference<v8::Value>& handle) { + return true; +} + +bool EmbedderHeapTracer::IsRootForNonTracingGC( + const v8::TracedGlobal<v8::Value>& handle) { + return true; +} + +void EmbedderHeapTracer::ResetHandleInNonTracingGC( + const v8::TracedReference<v8::Value>& handle) { + UNREACHABLE(); +} + +void EmbedderHeapTracer::ResetHandleInNonTracingGC( + const v8::TracedGlobal<v8::Value>& handle) { + UNREACHABLE(); +} + namespace internal { const size_t HandleScopeImplementer::kEnteredContextsOffset = @@ -10450,9 +10692,10 @@ char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) { return storage + ArchiveSpacePerThread(); } -DeferredHandles* HandleScopeImplementer::Detach(Address* prev_limit) { - DeferredHandles* deferred = - new DeferredHandles(isolate()->handle_scope_data()->next, isolate()); +std::unique_ptr<DeferredHandles> HandleScopeImplementer::Detach( + Address* prev_limit) { + std::unique_ptr<DeferredHandles> deferred( + new DeferredHandles(isolate()->handle_scope_data()->next, isolate())); while (!blocks_.empty()) { Address* block_start = blocks_.back(); @@ -10561,3 +10804,5 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info, } // namespace internal } // namespace v8 + +#undef TRACE_BS diff --git a/chromium/v8/src/api/api.h b/chromium/v8/src/api/api.h index 21bbb3a1015..a518670c798 100644 --- a/chromium/v8/src/api/api.h +++ b/chromium/v8/src/api/api.h @@ -5,6 +5,8 @@ #ifndef V8_API_API_H_ #define V8_API_API_H_ +#include <memory> + #include "include/v8-testing.h" #include "src/execution/isolate.h" #include "src/heap/factory.h" @@ -274,6 +276,11 @@ class Utils { return CompiledWasmModule{std::move(native_module)}; } + static inline const std::shared_ptr<i::wasm::NativeModule>& Open( + const CompiledWasmModule& compiled_module) { + return compiled_module.native_module_; + } + private: static void ReportApiFailure(const char* location, const char* message); }; @@ -431,7 +438,7 @@ class HandleScopeImplementer { } void BeginDeferredScope(); - DeferredHandles* Detach(Address* prev_limit); + std::unique_ptr<DeferredHandles> Detach(Address* prev_limit); Isolate* isolate_; DetachableVector<Address*> blocks_; diff --git a/chromium/v8/src/asmjs/OWNERS b/chromium/v8/src/asmjs/OWNERS index 08f39f8d6a2..072ba582113 100644 --- a/chromium/v8/src/asmjs/OWNERS +++ b/chromium/v8/src/asmjs/OWNERS @@ -1,5 +1,5 @@ ahaas@chromium.org -clemensh@chromium.org +clemensb@chromium.org mstarzinger@chromium.org titzer@chromium.org diff --git a/chromium/v8/src/asmjs/asm-js.cc b/chromium/v8/src/asmjs/asm-js.cc index 7433b6a12cb..22714ac16bd 100644 --- a/chromium/v8/src/asmjs/asm-js.cc +++ b/chromium/v8/src/asmjs/asm-js.cc @@ -322,7 +322,7 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) { std::unique_ptr<UnoptimizedCompilationJob> AsmJs::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator) { - return base::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator); + return std::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator); } namespace { @@ -387,7 +387,18 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate, ReportInstantiationFailure(script, position, "Requires heap buffer"); return MaybeHandle<Object>(); } - wasm_engine->memory_tracker()->MarkWasmMemoryNotGrowable(memory); + // AsmJs memory must be an ArrayBuffer. + if (memory->is_shared()) { + ReportInstantiationFailure(script, position, + "Invalid heap type: SharedArrayBuffer"); + return MaybeHandle<Object>(); + } + // Mark the buffer as being used as an asm.js memory. This implies two + // things: 1) if the buffer is from a Wasm memory, that memory can no longer + // be grown, since that would detach this buffer, and 2) the buffer cannot + // be postMessage()'d, as that also detaches the buffer. + memory->set_is_asmjs_memory(true); + memory->set_is_detachable(false); size_t size = memory->byte_length(); // Check the asm.js heap size against the valid limits. if (!IsValidAsmjsMemorySize(size)) { diff --git a/chromium/v8/src/asmjs/asm-js.h b/chromium/v8/src/asmjs/asm-js.h index 3e714cba7a6..80a75d03729 100644 --- a/chromium/v8/src/asmjs/asm-js.h +++ b/chromium/v8/src/asmjs/asm-js.h @@ -7,6 +7,8 @@ // Clients of this interface shouldn't depend on lots of asmjs internals. // Do not include anything from src/asmjs here! +#include <memory> + #include "src/common/globals.h" namespace v8 { diff --git a/chromium/v8/src/asmjs/asm-parser.cc b/chromium/v8/src/asmjs/asm-parser.cc index 6ac39dc89cc..33872399262 100644 --- a/chromium/v8/src/asmjs/asm-parser.cc +++ b/chromium/v8/src/asmjs/asm-parser.cc @@ -387,8 +387,8 @@ void AsmJsParser::ValidateModule() { uint32_t import_index = module_builder_->AddGlobalImport( global_import.import_name, global_import.value_type, false /* mutability */); - start->EmitWithI32V(kExprGetGlobal, import_index); - start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info)); + start->EmitWithI32V(kExprGlobalGet, import_index); + start->EmitWithI32V(kExprGlobalSet, VarIndex(global_import.var_info)); } start->Emit(kExprEnd); FunctionSig::Builder b(zone(), 0, 0); @@ -952,8 +952,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count, } else { FAIL("Bad local variable definition"); } - current_function_builder_->EmitWithI32V(kExprGetGlobal, - VarIndex(sinfo)); + current_function_builder_->EmitWithI32V(kExprGlobalGet, + VarIndex(sinfo)); current_function_builder_->EmitSetLocal(info->index); } else if (sinfo->type->IsA(stdlib_fround_)) { EXPECT_TOKEN('('); @@ -1447,7 +1447,7 @@ AsmType* AsmJsParser::Identifier() { if (info->kind != VarKind::kGlobal) { FAILn("Undefined global variable"); } - current_function_builder_->EmitWithI32V(kExprGetGlobal, VarIndex(info)); + current_function_builder_->EmitWithI32V(kExprGlobalGet, VarIndex(info)); return info->type; } UNREACHABLE(); @@ -1558,8 +1558,8 @@ AsmType* AsmJsParser::AssignmentExpression() { if (info->kind == VarKind::kLocal) { current_function_builder_->EmitTeeLocal(info->index); } else if (info->kind == VarKind::kGlobal) { - current_function_builder_->EmitWithU32V(kExprSetGlobal, VarIndex(info)); - current_function_builder_->EmitWithU32V(kExprGetGlobal, VarIndex(info)); + current_function_builder_->EmitWithU32V(kExprGlobalSet, VarIndex(info)); + current_function_builder_->EmitWithU32V(kExprGlobalGet, VarIndex(info)); } else { UNREACHABLE(); } @@ -2489,7 +2489,7 @@ void AsmJsParser::ValidateFloatCoercion() { // because imported functions are not allowed to have float return type. call_coercion_position_ = scanner_.Position(); AsmType* ret; - RECURSE(ret = ValidateExpression()); + RECURSE(ret = AssignmentExpression()); if (ret->IsA(AsmType::Floatish())) { // Do nothing, as already a float. } else if (ret->IsA(AsmType::DoubleQ())) { diff --git a/chromium/v8/src/ast/ast-function-literal-id-reindexer.cc b/chromium/v8/src/ast/ast-function-literal-id-reindexer.cc index 95bd94d8d4d..b583b5e4214 100644 --- a/chromium/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/chromium/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -46,15 +46,28 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { if (expr->instance_members_initializer_function() != nullptr) { Visit(expr->instance_members_initializer_function()); } - ZonePtrList<ClassLiteral::Property>* props = expr->properties(); + ZonePtrList<ClassLiteral::Property>* private_members = + expr->private_members(); + for (int i = 0; i < private_members->length(); ++i) { + ClassLiteralProperty* prop = private_members->at(i); + + // Private fields have their key and value present in + // instance_members_initializer_function, so they will + // already have been visited. + if (prop->value()->IsFunctionLiteral()) { + Visit(prop->value()); + } else { + CheckVisited(prop->value()); + } + } + ZonePtrList<ClassLiteral::Property>* props = expr->public_members(); for (int i = 0; i < props->length(); ++i) { ClassLiteralProperty* prop = props->at(i); - // Private fields and public fields with computed names have both their key + // Public fields with computed names have their key // and value present in instance_members_initializer_function, so they will // already have been visited. - if ((prop->is_computed_name() || prop->is_private()) && - !prop->value()->IsFunctionLiteral()) { + if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) { if (!prop->key()->IsLiteral()) { CheckVisited(prop->key()); } diff --git a/chromium/v8/src/ast/ast-traversal-visitor.h b/chromium/v8/src/ast/ast-traversal-visitor.h index 2796e59a8db..a52c5f946e0 100644 --- a/chromium/v8/src/ast/ast-traversal-visitor.h +++ b/chromium/v8/src/ast/ast-traversal-visitor.h @@ -490,7 +490,13 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) { if (expr->instance_members_initializer_function() != nullptr) { RECURSE_EXPRESSION(Visit(expr->instance_members_initializer_function())); } - ZonePtrList<ClassLiteral::Property>* props = expr->properties(); + ZonePtrList<ClassLiteral::Property>* private_members = + expr->private_members(); + for (int i = 0; i < private_members->length(); ++i) { + ClassLiteralProperty* prop = private_members->at(i); + RECURSE_EXPRESSION(Visit(prop->value())); + } + ZonePtrList<ClassLiteral::Property>* props = expr->public_members(); for (int i = 0; i < props->length(); ++i) { ClassLiteralProperty* prop = props->at(i); if (!prop->key()->IsLiteral()) { diff --git a/chromium/v8/src/ast/ast.cc b/chromium/v8/src/ast/ast.cc index 4b6c4805ded..130d34dffa5 100644 --- a/chromium/v8/src/ast/ast.cc +++ b/chromium/v8/src/ast/ast.cc @@ -293,6 +293,10 @@ bool FunctionLiteral::requires_brand_initialization() const { return outer->AsClassScope()->brand() != nullptr; } +bool FunctionLiteral::private_name_lookup_skips_outer_class() const { + return scope()->private_name_lookup_skips_outer_class(); +} + ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value, Kind kind, bool is_computed_name) : LiteralProperty(key, value, is_computed_name), @@ -886,7 +890,7 @@ Handle<Object> Literal::BuildValue(Isolate* isolate) const { case kSmi: return handle(Smi::FromInt(smi_), isolate); case kHeapNumber: - return isolate->factory()->NewNumber(number_, AllocationType::kOld); + return isolate->factory()->NewNumber<AllocationType::kOld>(number_); case kString: return string_->string(); case kSymbol: diff --git a/chromium/v8/src/ast/ast.h b/chromium/v8/src/ast/ast.h index ced9f775dd5..d706dbc37fb 100644 --- a/chromium/v8/src/ast/ast.h +++ b/chromium/v8/src/ast/ast.h @@ -724,11 +724,14 @@ class BreakStatement final : public JumpStatement { class ReturnStatement final : public JumpStatement { public: - enum Type { kNormal, kAsyncReturn }; + enum Type { kNormal, kAsyncReturn, kSyntheticAsyncReturn }; Expression* expression() const { return expression_; } Type type() const { return TypeField::decode(bit_field_); } - bool is_async_return() const { return type() == kAsyncReturn; } + bool is_async_return() const { return type() != kNormal; } + bool is_synthetic_async_return() const { + return type() == kSyntheticAsyncReturn; + } int end_position() const { return end_position_; } @@ -745,7 +748,7 @@ class ReturnStatement final : public JumpStatement { Expression* expression_; int end_position_; - using TypeField = JumpStatement::NextBitField<Type, 1>; + using TypeField = JumpStatement::NextBitField<Type, 2>; }; @@ -917,6 +920,10 @@ class TryCatchStatement final : public TryStatement { outer_catch_prediction != HandlerTable::UNCAUGHT; } + bool is_try_catch_for_async() { + return catch_prediction_ == HandlerTable::ASYNC_AWAIT; + } + private: friend class AstNodeFactory; @@ -2343,6 +2350,8 @@ class FunctionLiteral final : public Expression { bool requires_brand_initialization() const; + bool private_name_lookup_skips_outer_class() const; + ProducedPreparseData* produced_preparse_data() const { return produced_preparse_data_; } @@ -2481,10 +2490,10 @@ class ClassLiteral final : public Expression { using Property = ClassLiteralProperty; ClassScope* scope() const { return scope_; } - Variable* class_variable() const { return class_variable_; } Expression* extends() const { return extends_; } FunctionLiteral* constructor() const { return constructor_; } - ZonePtrList<Property>* properties() const { return properties_; } + ZonePtrList<Property>* public_members() const { return public_members_; } + ZonePtrList<Property>* private_members() const { return private_members_; } int start_position() const { return position(); } int end_position() const { return end_position_; } bool has_name_static_property() const { @@ -2497,6 +2506,9 @@ class ClassLiteral final : public Expression { bool is_anonymous_expression() const { return IsAnonymousExpression::decode(bit_field_); } + bool has_private_methods() const { + return HasPrivateMethods::decode(bit_field_); + } bool IsAnonymousFunctionDefinition() const { return is_anonymous_expression(); } @@ -2512,39 +2524,43 @@ class ClassLiteral final : public Expression { private: friend class AstNodeFactory; - ClassLiteral(ClassScope* scope, Variable* class_variable, Expression* extends, - FunctionLiteral* constructor, ZonePtrList<Property>* properties, + ClassLiteral(ClassScope* scope, Expression* extends, + FunctionLiteral* constructor, + ZonePtrList<Property>* public_members, + ZonePtrList<Property>* private_members, FunctionLiteral* static_fields_initializer, FunctionLiteral* instance_members_initializer_function, int start_position, int end_position, bool has_name_static_property, bool has_static_computed_names, - bool is_anonymous) + bool is_anonymous, bool has_private_methods) : Expression(start_position, kClassLiteral), end_position_(end_position), scope_(scope), - class_variable_(class_variable), extends_(extends), constructor_(constructor), - properties_(properties), + public_members_(public_members), + private_members_(private_members), static_fields_initializer_(static_fields_initializer), instance_members_initializer_function_( instance_members_initializer_function) { bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) | HasStaticComputedNames::encode(has_static_computed_names) | - IsAnonymousExpression::encode(is_anonymous); + IsAnonymousExpression::encode(is_anonymous) | + HasPrivateMethods::encode(has_private_methods); } int end_position_; ClassScope* scope_; - Variable* class_variable_; Expression* extends_; FunctionLiteral* constructor_; - ZonePtrList<Property>* properties_; + ZonePtrList<Property>* public_members_; + ZonePtrList<Property>* private_members_; FunctionLiteral* static_fields_initializer_; FunctionLiteral* instance_members_initializer_function_; using HasNameStaticProperty = Expression::NextBitField<bool, 1>; using HasStaticComputedNames = HasNameStaticProperty::Next<bool, 1>; using IsAnonymousExpression = HasStaticComputedNames::Next<bool, 1>; + using HasPrivateMethods = IsAnonymousExpression::Next<bool, 1>; }; @@ -2885,6 +2901,12 @@ class AstNodeFactory final { expression, ReturnStatement::kAsyncReturn, pos, end_position); } + ReturnStatement* NewSyntheticAsyncReturnStatement( + Expression* expression, int pos, int end_position = kNoSourcePosition) { + return new (zone_) ReturnStatement( + expression, ReturnStatement::kSyntheticAsyncReturn, pos, end_position); + } + WithStatement* NewWithStatement(Scope* scope, Expression* expression, Statement* statement, @@ -3244,18 +3266,19 @@ class AstNodeFactory final { } ClassLiteral* NewClassLiteral( - ClassScope* scope, Variable* variable, Expression* extends, - FunctionLiteral* constructor, - ZonePtrList<ClassLiteral::Property>* properties, + ClassScope* scope, Expression* extends, FunctionLiteral* constructor, + ZonePtrList<ClassLiteral::Property>* public_members, + ZonePtrList<ClassLiteral::Property>* private_members, FunctionLiteral* static_fields_initializer, FunctionLiteral* instance_members_initializer_function, int start_position, int end_position, bool has_name_static_property, - bool has_static_computed_names, bool is_anonymous) { + bool has_static_computed_names, bool is_anonymous, + bool has_private_methods) { return new (zone_) ClassLiteral( - scope, variable, extends, constructor, properties, + scope, extends, constructor, public_members, private_members, static_fields_initializer, instance_members_initializer_function, start_position, end_position, has_name_static_property, - has_static_computed_names, is_anonymous); + has_static_computed_names, is_anonymous, has_private_methods); } NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name, diff --git a/chromium/v8/src/ast/modules.cc b/chromium/v8/src/ast/modules.cc index dbd20f50a80..9c122fca869 100644 --- a/chromium/v8/src/ast/modules.cc +++ b/chromium/v8/src/ast/modules.cc @@ -84,11 +84,11 @@ void SourceTextModuleDescriptor::AddStarExport( } namespace { -Handle<HeapObject> ToStringOrUndefined(Isolate* isolate, - const AstRawString* s) { - return (s == nullptr) - ? Handle<HeapObject>::cast(isolate->factory()->undefined_value()) - : Handle<HeapObject>::cast(s->string()); +Handle<PrimitiveHeapObject> ToStringOrUndefined(Isolate* isolate, + const AstRawString* s) { + return (s == nullptr) ? Handle<PrimitiveHeapObject>::cast( + isolate->factory()->undefined_value()) + : Handle<PrimitiveHeapObject>::cast(s->string()); } } // namespace diff --git a/chromium/v8/src/ast/prettyprinter.cc b/chromium/v8/src/ast/prettyprinter.cc index 581517ee4ec..5bf9362fb8c 100644 --- a/chromium/v8/src/ast/prettyprinter.cc +++ b/chromium/v8/src/ast/prettyprinter.cc @@ -217,8 +217,11 @@ void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) { void CallPrinter::VisitClassLiteral(ClassLiteral* node) { if (node->extends()) Find(node->extends()); - for (int i = 0; i < node->properties()->length(); i++) { - Find(node->properties()->at(i)->value()); + for (int i = 0; i < node->public_members()->length(); i++) { + Find(node->public_members()->at(i)->value()); + } + for (int i = 0; i < node->private_members()->length(); i++) { + Find(node->private_members()->at(i)->value()); } } @@ -1106,7 +1109,8 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) { PrintIndentedVisit("INSTANCE MEMBERS INITIALIZER", node->instance_members_initializer_function()); } - PrintClassProperties(node->properties()); + PrintClassProperties(node->private_members()); + PrintClassProperties(node->public_members()); } void AstPrinter::VisitInitializeClassMembersStatement( diff --git a/chromium/v8/src/ast/prettyprinter.h b/chromium/v8/src/ast/prettyprinter.h index 322fd9fb143..795436d4222 100644 --- a/chromium/v8/src/ast/prettyprinter.h +++ b/chromium/v8/src/ast/prettyprinter.h @@ -5,10 +5,12 @@ #ifndef V8_AST_PRETTYPRINTER_H_ #define V8_AST_PRETTYPRINTER_H_ +#include <memory> + #include "src/ast/ast.h" #include "src/base/compiler-specific.h" -#include "src/utils/allocation.h" #include "src/objects/function-kind.h" +#include "src/utils/allocation.h" namespace v8 { namespace internal { diff --git a/chromium/v8/src/ast/scopes.cc b/chromium/v8/src/ast/scopes.cc index c8002dd088c..3e1f8f53ae2 100644 --- a/chromium/v8/src/ast/scopes.cc +++ b/chromium/v8/src/ast/scopes.cc @@ -40,7 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - bool* was_added) { + IsStaticFlag is_static_flag, bool* was_added) { // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. // FIXME(marja): fix the type of Lookup. @@ -51,8 +51,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - Variable* variable = new (zone) Variable( - scope, name, mode, kind, initialization_flag, maybe_assigned_flag); + Variable* variable = + new (zone) Variable(scope, name, mode, kind, initialization_flag, + maybe_assigned_flag, is_static_flag); p->value = variable; } return reinterpret_cast<Variable*>(p->value); @@ -102,6 +103,9 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type) DCHECK_NE(SCRIPT_SCOPE, scope_type); SetDefaults(); set_language_mode(outer_scope->language_mode()); + private_name_lookup_skips_outer_class_ = + outer_scope->is_class_scope() && + outer_scope->AsClassScope()->IsParsingHeritage(); outer_scope_->AddInnerScope(this); } @@ -140,14 +144,18 @@ ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info, set_language_mode(LanguageMode::kStrict); } -ClassScope::ClassScope(Zone* zone, Scope* outer_scope) - : Scope(zone, outer_scope, CLASS_SCOPE) { +ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous) + : Scope(zone, outer_scope, CLASS_SCOPE), + rare_data_and_is_parsing_heritage_(nullptr), + is_anonymous_class_(is_anonymous) { set_language_mode(LanguageMode::kStrict); } -ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory, +ClassScope::ClassScope(Isolate* isolate, Zone* zone, + AstValueFactory* ast_value_factory, Handle<ScopeInfo> scope_info) - : Scope(zone, CLASS_SCOPE, scope_info) { + : Scope(zone, CLASS_SCOPE, scope_info), + rare_data_and_is_parsing_heritage_(nullptr) { set_language_mode(LanguageMode::kStrict); if (scope_info->HasClassBrand()) { Variable* brand = @@ -155,6 +163,25 @@ ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory, DCHECK_NOT_NULL(brand); EnsureRareData()->brand = brand; } + + // If the class variable is context-allocated and its index is + // saved for deserialization, deserialize it. + if (scope_info->HasSavedClassVariableIndex()) { + int index = scope_info->SavedClassVariableContextLocalIndex(); + DCHECK_GE(index, 0); + DCHECK_LT(index, scope_info->ContextLocalCount()); + String name = scope_info->ContextLocalName(index); + DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst); + DCHECK_EQ(scope_info->ContextLocalInitFlag(index), + InitializationFlag::kNeedsInitialization); + DCHECK_EQ(scope_info->ContextLocalMaybeAssignedFlag(index), + MaybeAssignedFlag::kMaybeAssigned); + Variable* var = DeclareClassVariable( + ast_value_factory, ast_value_factory->GetString(handle(name, isolate)), + kNoSourcePosition); + var->AllocateTo(VariableLocation::CONTEXT, + Context::MIN_CONTEXT_SLOTS + index); + } } Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info) @@ -171,6 +198,8 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info) set_language_mode(scope_info->language_mode()); num_heap_slots_ = scope_info->ContextLength(); DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_); + private_name_lookup_skips_outer_class_ = + scope_info->PrivateNameLookupSkipsOuterClass(); // We don't really need to use the preparsed scope data; this is just to // shorten the recursion in SetMustUsePreparseData. must_use_preparsed_scope_data_ = true; @@ -222,6 +251,7 @@ void DeclarationScope::SetDefaults() { has_this_reference_ = false; has_this_declaration_ = (is_function_scope() && !is_arrow_scope()) || is_module_scope(); + needs_private_name_context_chain_recalc_ = false; has_rest_ = false; receiver_ = nullptr; new_target_ = nullptr; @@ -270,6 +300,8 @@ void Scope::SetDefaults() { is_declaration_scope_ = false; + private_name_lookup_skips_outer_class_ = false; + must_use_preparsed_scope_data_ = false; } @@ -343,8 +375,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, outer_scope = new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate)); } else if (scope_info.scope_type() == CLASS_SCOPE) { - outer_scope = new (zone) - ClassScope(zone, ast_value_factory, handle(scope_info, isolate)); + outer_scope = new (zone) ClassScope(isolate, zone, ast_value_factory, + handle(scope_info, isolate)); } else if (scope_info.scope_type() == BLOCK_SCOPE) { if (scope_info.is_declaration_scope()) { outer_scope = new (zone) @@ -546,7 +578,8 @@ bool DeclarationScope::Analyze(ParseInfo* info) { if (scope->must_use_preparsed_scope_data_) { DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE); allow_deref.emplace(); - info->consumed_preparse_data()->RestoreScopeAllocationData(scope); + info->consumed_preparse_data()->RestoreScopeAllocationData( + scope, info->ast_value_factory()); } if (!scope->AllocateVariables(info)) return false; @@ -787,11 +820,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + IsStaticFlag is_static_flag; { location = VariableLocation::CONTEXT; index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + &init_flag, &maybe_assigned_flag, + &is_static_flag); found = index >= 0; } @@ -816,9 +851,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { } bool was_added; - Variable* var = - cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - init_flag, maybe_assigned_flag, &was_added); + Variable* var = cache->variables_.Declare( + zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag, + IsStaticFlag::kNotStatic, &was_added); DCHECK(was_added); var->AllocateTo(location, index); return var; @@ -1047,7 +1082,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, bool was_added; return cache->variables_.Declare( zone(), this, name, VariableMode::kDynamicGlobal, kind, - kCreatedInitialized, kNotAssigned, &was_added); + kCreatedInitialized, kNotAssigned, IsStaticFlag::kNotStatic, &was_added); // TODO(neis): Mark variable as maybe-assigned? } @@ -1165,9 +1200,9 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) { // to ensure that UpdateNeedsHoleCheck() can detect import variables. if (is_module_scope()) AsModuleScope()->AllocateModuleVariables(); - ClassScope* closest_class_scope = GetClassScope(); - if (closest_class_scope != nullptr && - !closest_class_scope->ResolvePrivateNames(info)) { + PrivateNameScopeIterator private_name_scope_iter(this); + if (!private_name_scope_iter.Done() && + !private_name_scope_iter.GetScope()->ResolvePrivateNames(info)) { DCHECK(info->pending_error_handler()->has_pending_error()); return false; } @@ -1177,7 +1212,7 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) { return false; } - // // Don't allocate variables of preparsed scopes. + // Don't allocate variables of preparsed scopes. if (!was_lazily_parsed()) AllocateVariablesRecursively(); return true; @@ -1254,17 +1289,6 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const { return result; } -ClassScope* Scope::GetClassScope() { - Scope* scope = this; - while (scope != nullptr && !scope->is_class_scope()) { - scope = scope->outer_scope(); - } - if (scope != nullptr && scope->is_class_scope()) { - return scope->AsClassScope(); - } - return nullptr; -} - DeclarationScope* Scope::GetDeclarationScope() { Scope* scope = this; while (!scope->is_declaration_scope()) { @@ -1688,11 +1712,17 @@ void Scope::Print(int n) { if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) { Indent(n1, "// scope needs home object\n"); } + if (private_name_lookup_skips_outer_class()) { + Indent(n1, "// scope skips outer class for #-names\n"); + } if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n"); if (is_declaration_scope()) { DeclarationScope* scope = AsDeclarationScope(); if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n"); if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n"); + if (scope->needs_private_name_context_chain_recalc()) { + Indent(n1, "// needs #-name context chain recalc\n"); + } } if (num_stack_slots_ > 0) { Indent(n1, "// "); @@ -1729,15 +1759,24 @@ void Scope::Print(int n) { if (is_class_scope()) { ClassScope* class_scope = AsClassScope(); - if (class_scope->rare_data_ != nullptr) { + if (class_scope->GetRareData() != nullptr) { PrintMap(n1, "// private name vars:\n", - &(class_scope->rare_data_->private_name_map), true, function); + &(class_scope->GetRareData()->private_name_map), true, function); Variable* brand = class_scope->brand(); if (brand != nullptr) { Indent(n1, "// brand var:\n"); PrintVar(n1, brand); } } + if (class_scope->class_variable() != nullptr) { + Indent(n1, "// class var"); + PrintF("%s%s:\n", + class_scope->class_variable()->is_used() ? ", used" : ", unused", + class_scope->should_save_class_variable_index() + ? ", index saved" + : ", index not saved"); + PrintVar(n1, class_scope->class_variable()); + } } // Print inner scopes (disable by providing negative n). @@ -1780,9 +1819,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { // Declare a new non-local. DCHECK(IsDynamicVariableMode(mode)); bool was_added; - Variable* var = - variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - kCreatedInitialized, kNotAssigned, &was_added); + Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + kCreatedInitialized, kNotAssigned, + IsStaticFlag::kNotStatic, &was_added); // Allocate it by giving it a dynamic lookup. var->AllocateTo(VariableLocation::LOOKUP, -1); return var; @@ -2103,8 +2142,7 @@ bool Scope::MustAllocateInContext(Variable* var) { if (mode == VariableMode::kTemporary) return false; if (is_catch_scope()) return true; if (is_script_scope() || is_eval_scope()) { - if (IsLexicalVariableMode(mode) || - IsPrivateMethodOrAccessorVariableMode(mode)) { + if (IsLexicalVariableMode(mode)) { return true; } } @@ -2308,6 +2346,47 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate, } } +void DeclarationScope::RecalcPrivateNameContextChain() { + // The outermost scope in a class heritage expression is marked to skip the + // class scope during private name resolution. It is possible, however, that + // either the class scope won't require a Context and ScopeInfo, or the + // outermost scope in the heritage position won't. Simply copying the bit from + // full parse into the ScopeInfo will break lazy compilation. In the former + // case the scope that is marked to skip its outer scope will incorrectly skip + // a different class scope than the one we intended to skip. In the latter + // case variables resolved through an inner scope will incorrectly check the + // class scope since we lost the skip bit from the outermost heritage scope. + // + // This method fixes both cases by, in outermost to innermost order, copying + // the value of the skip bit from outer scopes that don't require a Context. + DCHECK(needs_private_name_context_chain_recalc_); + this->ForEach([](Scope* scope) { + Scope* outer = scope->outer_scope(); + if (!outer) return Iteration::kDescend; + if (!outer->NeedsContext()) { + scope->private_name_lookup_skips_outer_class_ = + outer->private_name_lookup_skips_outer_class(); + } + if (!scope->is_function_scope() || + scope->AsDeclarationScope()->ShouldEagerCompile()) { + return Iteration::kDescend; + } + return Iteration::kContinue; + }); +} + +void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() { + DCHECK_EQ(GetClosureScope(), this); + DeclarationScope* scope; + for (scope = this; scope != nullptr; + scope = scope->outer_scope() != nullptr + ? scope->outer_scope()->GetClosureScope() + : nullptr) { + if (scope->needs_private_name_context_chain_recalc_) return; + scope->needs_private_name_context_chain_recalc_ = true; + } +} + // static void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) { DeclarationScope* scope = info->literal()->scope(); @@ -2318,6 +2397,9 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) { outer_scope = scope->outer_scope_->scope_info_; } + if (scope->needs_private_name_context_chain_recalc()) { + scope->RecalcPrivateNameContextChain(); + } scope->AllocateScopeInfosRecursively(isolate, outer_scope); // The debugger expects all shared function infos to contain a scope info. @@ -2359,14 +2441,20 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { } Variable* ClassScope::DeclarePrivateName(const AstRawString* name, - VariableMode mode, bool* was_added) { + VariableMode mode, + IsStaticFlag is_static_flag, + bool* was_added) { Variable* result = EnsureRareData()->private_name_map.Declare( zone(), this, name, mode, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, - MaybeAssignedFlag::kMaybeAssigned, was_added); + MaybeAssignedFlag::kMaybeAssigned, is_static_flag, was_added); if (*was_added) { locals_.Add(result); - } else if (IsComplementaryAccessorPair(result->mode(), mode)) { + has_static_private_methods_ |= + (result->is_static() && + IsPrivateMethodOrAccessorVariableMode(result->mode())); + } else if (IsComplementaryAccessorPair(result->mode(), mode) && + result->is_static_flag() == is_static_flag) { *was_added = true; result->set_mode(VariableMode::kPrivateGetterAndSetter); } @@ -2375,38 +2463,42 @@ Variable* ClassScope::DeclarePrivateName(const AstRawString* name, } Variable* ClassScope::LookupLocalPrivateName(const AstRawString* name) { - if (rare_data_ == nullptr) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr) { return nullptr; } - return rare_data_->private_name_map.Lookup(name); + return rare_data->private_name_map.Lookup(name); } UnresolvedList::Iterator ClassScope::GetUnresolvedPrivateNameTail() { - if (rare_data_ == nullptr) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr) { return UnresolvedList::Iterator(); } - return rare_data_->unresolved_private_names.end(); + return rare_data->unresolved_private_names.end(); } void ClassScope::ResetUnresolvedPrivateNameTail(UnresolvedList::Iterator tail) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.end() == tail) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || + rare_data->unresolved_private_names.end() == tail) { return; } bool tail_is_empty = tail == UnresolvedList::Iterator(); if (tail_is_empty) { // If the saved tail is empty, the list used to be empty, so clear it. - rare_data_->unresolved_private_names.Clear(); + rare_data->unresolved_private_names.Clear(); } else { - rare_data_->unresolved_private_names.Rewind(tail); + rare_data->unresolved_private_names.Rewind(tail); } } void ClassScope::MigrateUnresolvedPrivateNameTail( AstNodeFactory* ast_node_factory, UnresolvedList::Iterator tail) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.end() == tail) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || + rare_data->unresolved_private_names.end() == tail) { return; } UnresolvedList migrated_names; @@ -2415,9 +2507,9 @@ void ClassScope::MigrateUnresolvedPrivateNameTail( // migrate everything after the head. bool tail_is_empty = tail == UnresolvedList::Iterator(); UnresolvedList::Iterator it = - tail_is_empty ? rare_data_->unresolved_private_names.begin() : tail; + tail_is_empty ? rare_data->unresolved_private_names.begin() : tail; - for (; it != rare_data_->unresolved_private_names.end(); ++it) { + for (; it != rare_data->unresolved_private_names.end(); ++it) { VariableProxy* proxy = *it; VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); migrated_names.Add(copy); @@ -2425,20 +2517,11 @@ void ClassScope::MigrateUnresolvedPrivateNameTail( // Replace with the migrated copies. if (tail_is_empty) { - rare_data_->unresolved_private_names.Clear(); + rare_data->unresolved_private_names.Clear(); } else { - rare_data_->unresolved_private_names.Rewind(tail); + rare_data->unresolved_private_names.Rewind(tail); } - rare_data_->unresolved_private_names.Append(std::move(migrated_names)); -} - -void ClassScope::AddUnresolvedPrivateName(VariableProxy* proxy) { - // During a reparse, already_resolved_ may be true here, because - // the class scope is deserialized while the function scope inside may - // be new. - DCHECK(!proxy->is_resolved()); - DCHECK(proxy->IsPrivateName()); - EnsureRareData()->unresolved_private_names.Add(proxy); + rare_data->unresolved_private_names.Append(std::move(migrated_names)); } Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { @@ -2450,8 +2533,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + IsStaticFlag is_static_flag; + int index = + ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag, + &maybe_assigned_flag, &is_static_flag); if (index < 0) { return nullptr; } @@ -2463,7 +2548,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { // Add the found private name to the map to speed up subsequent // lookups for the same name. bool was_added; - Variable* var = DeclarePrivateName(name, mode, &was_added); + Variable* var = DeclarePrivateName(name, mode, is_static_flag, &was_added); DCHECK(was_added); var->AllocateTo(VariableLocation::CONTEXT, index); return var; @@ -2472,15 +2557,14 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) { DCHECK(!proxy->is_resolved()); - for (Scope* scope = this; !scope->is_script_scope(); - scope = scope->outer_scope_) { - if (!scope->is_class_scope()) continue; // Only search in class scopes - ClassScope* class_scope = scope->AsClassScope(); + for (PrivateNameScopeIterator scope_iter(this); !scope_iter.Done(); + scope_iter.Next()) { + ClassScope* scope = scope_iter.GetScope(); // Try finding it in the private name map first, if it can't be found, // try the deseralized scope info. - Variable* var = class_scope->LookupLocalPrivateName(proxy->raw_name()); - if (var == nullptr && !class_scope->scope_info_.is_null()) { - var = class_scope->LookupPrivateNameInScopeInfo(proxy->raw_name()); + Variable* var = scope->LookupLocalPrivateName(proxy->raw_name()); + if (var == nullptr && !scope->scope_info_.is_null()) { + var = scope->LookupPrivateNameInScopeInfo(proxy->raw_name()); } if (var != nullptr) { return var; @@ -2490,22 +2574,24 @@ Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) { } bool ClassScope::ResolvePrivateNames(ParseInfo* info) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.is_empty()) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) { return true; } - UnresolvedList& list = rare_data_->unresolved_private_names; + UnresolvedList& list = rare_data->unresolved_private_names; for (VariableProxy* proxy : list) { Variable* var = LookupPrivateName(proxy); if (var == nullptr) { + // It's only possible to fail to resolve private names here if + // this is at the top level or the private name is accessed through eval. + DCHECK(info->is_eval() || outer_scope_->is_script_scope()); Scanner::Location loc = proxy->location(); info->pending_error_handler()->ReportMessageAt( loc.beg_pos, loc.end_pos, MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name()); return false; } else { - var->set_is_used(); proxy->BindTo(var); } } @@ -2517,20 +2603,20 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) { } VariableProxy* ClassScope::ResolvePrivateNamesPartially() { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.is_empty()) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) { return nullptr; } - ClassScope* outer_class_scope = - outer_scope_ == nullptr ? nullptr : outer_scope_->GetClassScope(); - UnresolvedList& unresolved = rare_data_->unresolved_private_names; - bool has_private_names = rare_data_->private_name_map.capacity() > 0; + PrivateNameScopeIterator private_name_scope_iter(this); + private_name_scope_iter.Next(); + UnresolvedList& unresolved = rare_data->unresolved_private_names; + bool has_private_names = rare_data->private_name_map.capacity() > 0; // If the class itself does not have private names, nor does it have - // an outer class scope, then we are certain any private name access + // an outer private name scope, then we are certain any private name access // inside cannot be resolved. - if (!has_private_names && outer_class_scope == nullptr && + if (!has_private_names && private_name_scope_iter.Done() && !unresolved.is_empty()) { return unresolved.first(); } @@ -2548,21 +2634,27 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() { if (var != nullptr) { var->set_is_used(); proxy->BindTo(var); + // If the variable being accessed is a static private method, we need to + // save the class variable in the context to check that the receiver is + // the class during runtime. + has_explicit_static_private_methods_access_ |= + (var->is_static() && + IsPrivateMethodOrAccessorVariableMode(var->mode())); } } // If the current scope does not have declared private names, // try looking from the outer class scope later. if (var == nullptr) { - // There's no outer class scope so we are certain that the variable + // There's no outer private name scope so we are certain that the variable // cannot be resolved later. - if (outer_class_scope == nullptr) { + if (private_name_scope_iter.Done()) { return proxy; } - // The private name may be found later in the outer class scope, - // so push it to the outer sopce. - outer_class_scope->AddUnresolvedPrivateName(proxy); + // The private name may be found later in the outer private name scope, so + // push it to the outer sopce. + private_name_scope_iter.AddUnresolvedPrivateName(proxy); } proxy = next; @@ -2573,14 +2665,16 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() { } Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory, + IsStaticFlag is_static_flag, int class_token_pos) { - DCHECK_IMPLIES(rare_data_ != nullptr, rare_data_->brand == nullptr); + DCHECK_IMPLIES(GetRareData() != nullptr, GetRareData()->brand == nullptr); bool was_added; Variable* brand = Declare(zone(), ast_value_factory->dot_brand_string(), VariableMode::kConst, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, MaybeAssignedFlag::kMaybeAssigned, &was_added); DCHECK(was_added); + brand->set_is_static_flag(is_static_flag); brand->ForceContextAllocation(); brand->set_is_used(); EnsureRareData()->brand = brand; @@ -2588,5 +2682,61 @@ Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory, return brand; } +Variable* ClassScope::DeclareClassVariable(AstValueFactory* ast_value_factory, + const AstRawString* name, + int class_token_pos) { + DCHECK_NULL(class_variable_); + bool was_added; + class_variable_ = + Declare(zone(), name == nullptr ? ast_value_factory->dot_string() : name, + VariableMode::kConst, NORMAL_VARIABLE, + InitializationFlag::kNeedsInitialization, + MaybeAssignedFlag::kMaybeAssigned, &was_added); + DCHECK(was_added); + class_variable_->set_initializer_position(class_token_pos); + return class_variable_; +} + +PrivateNameScopeIterator::PrivateNameScopeIterator(Scope* start) + : start_scope_(start), current_scope_(start) { + if (!start->is_class_scope() || start->AsClassScope()->IsParsingHeritage()) { + Next(); + } +} + +void PrivateNameScopeIterator::Next() { + DCHECK(!Done()); + Scope* inner = current_scope_; + Scope* scope = inner->outer_scope(); + while (scope != nullptr) { + if (scope->is_class_scope()) { + if (!inner->private_name_lookup_skips_outer_class()) { + current_scope_ = scope; + return; + } + skipped_any_scopes_ = true; + } + inner = scope; + scope = scope->outer_scope(); + } + current_scope_ = nullptr; +} + +void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) { + // During a reparse, current_scope_->already_resolved_ may be true here, + // because the class scope is deserialized while the function scope inside may + // be new. + DCHECK(!proxy->is_resolved()); + DCHECK(proxy->IsPrivateName()); + GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy); + // Any closure scope that contain uses of private names that skips over a + // class scope due to heritage expressions need private name context chain + // recalculation, since not all scopes require a Context or ScopeInfo. See + // comment in DeclarationScope::RecalcPrivateNameContextChain. + if (V8_UNLIKELY(skipped_any_scopes_)) { + start_scope_->GetClosureScope()->RecordNeedsPrivateNameContextChainRecalc(); + } +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/ast/scopes.h b/chromium/v8/src/ast/scopes.h index 73e6e8fd897..30838db28b5 100644 --- a/chromium/v8/src/ast/scopes.h +++ b/chromium/v8/src/ast/scopes.h @@ -44,7 +44,7 @@ class VariableMap : public ZoneHashMap { VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - bool* was_added); + IsStaticFlag is_static_flag, bool* was_added); V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); void Remove(Variable* var); @@ -360,6 +360,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { bool is_class_scope() const { return scope_type_ == CLASS_SCOPE; } bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; } + bool private_name_lookup_skips_outer_class() const { + return private_name_lookup_skips_outer_class_; + } bool IsAsmModule() const; // Returns true if this scope or any inner scopes that might be eagerly // compiled are asm modules. @@ -464,10 +467,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // sloppy eval call. One if this->sloppy_eval_can_extend_vars(). int ContextChainLengthUntilOutermostSloppyEval() const; - // Find the closest class scope in the current scope and outer scopes. If no - // class scope is found, nullptr will be returned. - ClassScope* GetClassScope(); - // Find the first function, script, eval or (declaration) block scope. This is // the scope where var declarations will be hoisted to in the implementation. DeclarationScope* GetDeclarationScope(); @@ -557,9 +556,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { Variable* Declare(Zone* zone, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, bool* was_added) { - Variable* result = - variables_.Declare(zone, this, name, mode, kind, initialization_flag, - maybe_assigned_flag, was_added); + // Static variables can only be declared using ClassScope methods. + Variable* result = variables_.Declare( + zone, this, name, mode, kind, initialization_flag, maybe_assigned_flag, + IsStaticFlag::kNotStatic, was_added); if (*was_added) locals_.Add(result); return result; } @@ -713,7 +713,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // This scope's declarations might not be executed in order (e.g., switch). bool scope_nonlinear_ : 1; bool is_hidden_ : 1; - // Temporary workaround that allows masking of 'this' in debug-evalute scopes. + // Temporary workaround that allows masking of 'this' in debug-evaluate + // scopes. bool is_debug_evaluate_scope_ : 1; // True if one of the inner scopes or the scope itself calls eval. @@ -723,6 +724,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // True if it holds 'var' declarations. bool is_declaration_scope_ : 1; + // True if the outer scope is a class scope and should be skipped when + // resolving private names, i.e. if the scope is in a class heritage + // expression. + bool private_name_lookup_skips_outer_class_ : 1; + bool must_use_preparsed_scope_data_ : 1; }; @@ -859,6 +865,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { return IsClassMembersInitializerFunction(function_kind()); } + void set_is_async_module() { + DCHECK(IsModule(function_kind_)); + function_kind_ = kAsyncModule; + } + void DeclareThis(AstValueFactory* ast_value_factory); void DeclareArguments(AstValueFactory* ast_value_factory); void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory); @@ -1082,6 +1093,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { GetReceiverScope()->receiver()->ForceContextAllocation(); } + bool needs_private_name_context_chain_recalc() const { + return needs_private_name_context_chain_recalc_; + } + void RecordNeedsPrivateNameContextChainRecalc(); + private: V8_INLINE void AllocateParameter(Variable* var, int index); @@ -1099,6 +1115,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { void SetDefaults(); + // Recalculate the private name context chain from the existing skip bit in + // preparation for AllocateScopeInfos. Because the private name scope is + // implemented with a skip bit for scopes in heritage position, that bit may + // need to be recomputed due scopes that do not need contexts. + void RecalcPrivateNameContextChain(); + bool has_simple_parameters_ : 1; // This scope contains an "use asm" annotation. bool is_asm_module_ : 1; @@ -1120,9 +1142,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { bool has_checked_syntax_ : 1; bool has_this_reference_ : 1; bool has_this_declaration_ : 1; + bool needs_private_name_context_chain_recalc_ : 1; // If the scope is a function scope, this is the function kind. - const FunctionKind function_kind_; + FunctionKind function_kind_; int num_parameters_ = 0; @@ -1220,17 +1243,26 @@ class ModuleScope final : public DeclarationScope { class V8_EXPORT_PRIVATE ClassScope : public Scope { public: - ClassScope(Zone* zone, Scope* outer_scope); + ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous); // Deserialization. - ClassScope(Zone* zone, AstValueFactory* ast_value_factory, + ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory, Handle<ScopeInfo> scope_info); + struct HeritageParsingScope { + explicit HeritageParsingScope(ClassScope* class_scope) + : class_scope_(class_scope) { + class_scope_->SetIsParsingHeritage(true); + } + ~HeritageParsingScope() { class_scope_->SetIsParsingHeritage(false); } + + private: + ClassScope* class_scope_; + }; + // Declare a private name in the private name map and add it to the // local variables of this scope. Variable* DeclarePrivateName(const AstRawString* name, VariableMode mode, - bool* was_added); - - void AddUnresolvedPrivateName(VariableProxy* proxy); + IsStaticFlag is_static_flag, bool* was_added); // Try resolving all unresolved private names found in the current scope. // Called from DeclarationScope::AllocateVariables() when reparsing a @@ -1261,13 +1293,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { void MigrateUnresolvedPrivateNameTail(AstNodeFactory* ast_node_factory, UnresolvedList::Iterator tail); Variable* DeclareBrandVariable(AstValueFactory* ast_value_factory, + IsStaticFlag is_static_flag, int class_token_pos); + + Variable* DeclareClassVariable(AstValueFactory* ast_value_factory, + const AstRawString* name, int class_token_pos); + Variable* brand() { - return rare_data_ == nullptr ? nullptr : rare_data_->brand; + return GetRareData() == nullptr ? nullptr : GetRareData()->brand; + } + + Variable* class_variable() { return class_variable_; } + + V8_INLINE bool IsParsingHeritage() { + return rare_data_and_is_parsing_heritage_.GetPayload(); + } + + // Only maintained when the scope is parsed, not when the scope is + // deserialized. + bool has_static_private_methods() const { + return has_static_private_methods_; + } + + // Returns whether the index of class variable of this class scope should be + // recorded in the ScopeInfo. + // If any inner scope accesses static private names directly, the class + // variable will be forced to be context-allocated. + // The inner scope may also calls eval which may results in access to + // static private names. + // Only maintained when the scope is parsed. + bool should_save_class_variable_index() const { + return should_save_class_variable_index_ || + has_explicit_static_private_methods_access_ || + (has_static_private_methods_ && inner_scope_calls_eval_); + } + + // Only maintained when the scope is parsed. + bool is_anonymous_class() const { return is_anonymous_class_; } + + // Overriden during reparsing + void set_should_save_class_variable_index() { + should_save_class_variable_index_ = true; } private: friend class Scope; + friend class PrivateNameScopeIterator; + // Find the private name declared in the private name map first, // if it cannot be found there, try scope info if there is any. // Returns nullptr if it cannot be found. @@ -1285,14 +1357,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { Variable* brand = nullptr; }; + V8_INLINE RareData* GetRareData() { + return rare_data_and_is_parsing_heritage_.GetPointer(); + } V8_INLINE RareData* EnsureRareData() { - if (rare_data_ == nullptr) { - rare_data_ = new (zone_) RareData(zone_); + if (GetRareData() == nullptr) { + rare_data_and_is_parsing_heritage_.SetPointer(new (zone_) + RareData(zone_)); } - return rare_data_; + return GetRareData(); + } + V8_INLINE void SetIsParsingHeritage(bool v) { + rare_data_and_is_parsing_heritage_.SetPayload(v); } - RareData* rare_data_ = nullptr; + PointerWithPayload<RareData, bool, 1> rare_data_and_is_parsing_heritage_; + Variable* class_variable_ = nullptr; + // These are only maintained when the scope is parsed, not when the + // scope is deserialized. + bool has_static_private_methods_ = false; + bool has_explicit_static_private_methods_access_ = false; + bool is_anonymous_class_ = false; + // This is only maintained during reparsing, restored from the + // preparsed data. + bool should_save_class_variable_index_ = false; +}; + +// Iterate over the private name scope chain. The iteration proceeds from the +// innermost private name scope outwards. +class PrivateNameScopeIterator { + public: + explicit PrivateNameScopeIterator(Scope* start); + + bool Done() const { return current_scope_ == nullptr; } + void Next(); + + // Add an unresolved private name to the current scope. + void AddUnresolvedPrivateName(VariableProxy* proxy); + + ClassScope* GetScope() const { + DCHECK(!Done()); + return current_scope_->AsClassScope(); + } + + private: + bool skipped_any_scopes_ = false; + Scope* start_scope_; + Scope* current_scope_; }; } // namespace internal diff --git a/chromium/v8/src/ast/source-range-ast-visitor.cc b/chromium/v8/src/ast/source-range-ast-visitor.cc index d171e305875..74709916159 100644 --- a/chromium/v8/src/ast/source-range-ast-visitor.cc +++ b/chromium/v8/src/ast/source-range-ast-visitor.cc @@ -25,12 +25,25 @@ void SourceRangeAstVisitor::VisitBlock(Block* stmt) { } } +void SourceRangeAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) { + AstTraversalVisitor::VisitSwitchStatement(stmt); + ZonePtrList<CaseClause>* clauses = stmt->cases(); + for (CaseClause* clause : *clauses) { + MaybeRemoveLastContinuationRange(clause->statements()); + } +} + void SourceRangeAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) { AstTraversalVisitor::VisitFunctionLiteral(expr); ZonePtrList<Statement>* stmts = expr->body(); MaybeRemoveLastContinuationRange(stmts); } +void SourceRangeAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) { + AstTraversalVisitor::VisitTryCatchStatement(stmt); + MaybeRemoveContinuationRangeOfAsyncReturn(stmt); +} + bool SourceRangeAstVisitor::VisitNode(AstNode* node) { AstNodeSourceRanges* range = source_range_map_->Find(node); @@ -51,11 +64,8 @@ bool SourceRangeAstVisitor::VisitNode(AstNode* node) { return true; } -void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( - ZonePtrList<Statement>* statements) { - if (statements->is_empty()) return; - - Statement* last_statement = statements->last(); +void SourceRangeAstVisitor::MaybeRemoveContinuationRange( + Statement* last_statement) { AstNodeSourceRanges* last_range = nullptr; if (last_statement->IsExpressionStatement() && @@ -75,5 +85,38 @@ void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( } } +void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( + ZonePtrList<Statement>* statements) { + if (statements->is_empty()) return; + MaybeRemoveContinuationRange(statements->last()); +} + +namespace { +Statement* FindLastNonSyntheticReturn(ZonePtrList<Statement>* statements) { + for (int i = statements->length() - 1; i >= 0; --i) { + Statement* stmt = statements->at(i); + if (!stmt->IsReturnStatement()) break; + if (stmt->AsReturnStatement()->is_synthetic_async_return()) continue; + return stmt; + } + return nullptr; +} +} // namespace + +void SourceRangeAstVisitor::MaybeRemoveContinuationRangeOfAsyncReturn( + TryCatchStatement* try_catch_stmt) { + // Detect try-catch inserted by NewTryCatchStatementForAsyncAwait in the + // parser (issued for async functions, including async generators), and + // remove the continuation ranges of return statements corresponding to + // returns at function end in the untransformed source. + if (try_catch_stmt->is_try_catch_for_async()) { + Statement* last_non_synthetic = + FindLastNonSyntheticReturn(try_catch_stmt->try_block()->statements()); + if (last_non_synthetic) { + MaybeRemoveContinuationRange(last_non_synthetic); + } + } +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/ast/source-range-ast-visitor.h b/chromium/v8/src/ast/source-range-ast-visitor.h index 4ea36a947f5..8b6b99c4346 100644 --- a/chromium/v8/src/ast/source-range-ast-visitor.h +++ b/chromium/v8/src/ast/source-range-ast-visitor.h @@ -34,10 +34,14 @@ class SourceRangeAstVisitor final friend class AstTraversalVisitor<SourceRangeAstVisitor>; void VisitBlock(Block* stmt); + void VisitSwitchStatement(SwitchStatement* stmt); void VisitFunctionLiteral(FunctionLiteral* expr); bool VisitNode(AstNode* node); + void VisitTryCatchStatement(TryCatchStatement* stmt); + void MaybeRemoveContinuationRange(Statement* last_statement); void MaybeRemoveLastContinuationRange(ZonePtrList<Statement>* stmts); + void MaybeRemoveContinuationRangeOfAsyncReturn(TryCatchStatement* stmt); SourceRangeMap* source_range_map_ = nullptr; std::unordered_set<int> continuation_positions_; diff --git a/chromium/v8/src/ast/variables.h b/chromium/v8/src/ast/variables.h index 1ff6f9f4228..7be99adc7c1 100644 --- a/chromium/v8/src/ast/variables.h +++ b/chromium/v8/src/ast/variables.h @@ -21,7 +21,8 @@ class Variable final : public ZoneObject { public: Variable(Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, + IsStaticFlag is_static_flag = IsStaticFlag::kNotStatic) : scope_(scope), name_(name), local_if_not_shadowed_(nullptr), @@ -35,10 +36,13 @@ class Variable final : public ZoneObject { ForceContextAllocationField::encode(false) | ForceHoleInitializationField::encode(false) | LocationField::encode(VariableLocation::UNALLOCATED) | - VariableKindField::encode(kind)) { + VariableKindField::encode(kind) | + IsStaticFlagField::encode(is_static_flag)) { // Var declared variables never need initialization. DCHECK(!(mode == VariableMode::kVar && initialization_flag == kNeedsInitialization)); + DCHECK_IMPLIES(is_static_flag == IsStaticFlag::kStatic, + IsConstVariableMode(mode)); } explicit Variable(Variable* other); @@ -59,6 +63,14 @@ class Variable final : public ZoneObject { void set_mode(VariableMode mode) { bit_field_ = VariableModeField::update(bit_field_, mode); } + void set_is_static_flag(IsStaticFlag is_static_flag) { + bit_field_ = IsStaticFlagField::update(bit_field_, is_static_flag); + } + IsStaticFlag is_static_flag() const { + return IsStaticFlagField::decode(bit_field_); + } + bool is_static() const { return is_static_flag() == IsStaticFlag::kStatic; } + bool has_forced_context_allocation() const { return ForceContextAllocationField::decode(bit_field_); } @@ -72,6 +84,9 @@ class Variable final : public ZoneObject { MaybeAssignedFlag maybe_assigned() const { return MaybeAssignedFlagField::decode(bit_field_); } + void clear_maybe_assigned() { + bit_field_ = MaybeAssignedFlagField::update(bit_field_, kNotAssigned); + } void SetMaybeAssigned() { if (mode() == VariableMode::kConst) return; @@ -249,6 +264,7 @@ class Variable final : public ZoneObject { using ForceHoleInitializationField = InitializationFlagField::Next<bool, 1>; using MaybeAssignedFlagField = ForceHoleInitializationField::Next<MaybeAssignedFlag, 1>; + using IsStaticFlagField = MaybeAssignedFlagField::Next<IsStaticFlag, 1>; Variable** next() { return &next_; } friend List; diff --git a/chromium/v8/src/base/OWNERS b/chromium/v8/src/base/OWNERS index 9c6fd3c859a..3654b400ada 100644 --- a/chromium/v8/src/base/OWNERS +++ b/chromium/v8/src/base/OWNERS @@ -1,4 +1,4 @@ -clemensh@chromium.org +clemensb@chromium.org mlippautz@chromium.org # COMPONENT: Blink>JavaScript diff --git a/chromium/v8/src/base/adapters.h b/chromium/v8/src/base/adapters.h deleted file mode 100644 index f684b52ccb6..00000000000 --- a/chromium/v8/src/base/adapters.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Slightly adapted for inclusion in V8. -// Copyright 2014 the V8 project authors. All rights reserved. - -#ifndef V8_BASE_ADAPTERS_H_ -#define V8_BASE_ADAPTERS_H_ - -#include <iterator> - -#include "src/base/macros.h" - -namespace v8 { -namespace base { - -// Internal adapter class for implementing base::Reversed. -template <typename T> -class ReversedAdapter { - public: - using Iterator = - std::reverse_iterator<decltype(std::begin(std::declval<T>()))>; - - explicit ReversedAdapter(T& t) : t_(t) {} - ReversedAdapter(const ReversedAdapter& ra) V8_NOEXCEPT = default; - - // TODO(clemensh): Use std::rbegin/std::rend once we have C++14 support. - Iterator begin() const { return Iterator(std::end(t_)); } - Iterator end() const { return Iterator(std::begin(t_)); } - - private: - T& t_; - - DISALLOW_ASSIGN(ReversedAdapter); -}; - -// Reversed returns a container adapter usable in a range-based "for" statement -// for iterating a reversible container in reverse order. -// -// Example: -// -// std::vector<int> v = ...; -// for (int i : base::Reversed(v)) { -// // iterates through v from back to front -// } -template <typename T> -ReversedAdapter<T> Reversed(T&& t) { - return ReversedAdapter<T>(t); -} - -} // namespace base -} // namespace v8 - -#endif // V8_BASE_ADAPTERS_H_ diff --git a/chromium/v8/src/base/cpu.cc b/chromium/v8/src/base/cpu.cc index 6ab0ffee29e..4f4ac2b3282 100644 --- a/chromium/v8/src/base/cpu.cc +++ b/chromium/v8/src/base/cpu.cc @@ -9,6 +9,7 @@ #endif #if V8_OS_LINUX #include <linux/auxvec.h> // AT_HWCAP +extern "C" char** environ; #endif #if V8_GLIBC_PREREQ(2, 16) #include <sys/auxv.h> // getauxval() @@ -16,7 +17,7 @@ #if V8_OS_QNX #include <sys/syspage.h> // cpuinfo #endif -#if V8_OS_LINUX && V8_HOST_ARCH_PPC +#if (V8_OS_LINUX && V8_HOST_ARCH_PPC) || V8_OS_ANDROID #include <elf.h> #endif #if V8_OS_AIX @@ -109,28 +110,25 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #define HWCAP_LPAE (1 << 20) static uint32_t ReadELFHWCaps() { - uint32_t result = 0; #if V8_GLIBC_PREREQ(2, 16) - result = static_cast<uint32_t>(getauxval(AT_HWCAP)); + return static_cast<uint32_t>(getauxval(AT_HWCAP)); #else - // Read the ELF HWCAP flags by parsing /proc/self/auxv. - FILE* fp = fopen("/proc/self/auxv", "r"); - if (fp != nullptr) { - struct { uint32_t tag; uint32_t value; } entry; - for (;;) { - size_t n = fread(&entry, sizeof(entry), 1, fp); - if (n == 0 || (entry.tag == 0 && entry.value == 0)) { - break; - } - if (entry.tag == AT_HWCAP) { - result = entry.value; - break; - } + char** head = environ; + while (*head++ != nullptr) { + } +#ifdef __LP64__ + using elf_auxv_t = Elf64_auxv_t; +#else + using elf_auxv_t = Elf32_auxv_t; +#endif + for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head); + entry->a_type != AT_NULL; ++entry) { + if (entry->a_type == AT_HWCAP) { + return entry->a_un.a_val; } - fclose(fp); } + return 0u; #endif - return result; } #endif // V8_HOST_ARCH_ARM @@ -608,33 +606,28 @@ CPU::CPU() #ifndef USE_SIMULATOR #if V8_OS_LINUX - // Read processor info from /proc/self/auxv. char* auxv_cpu_type = nullptr; - FILE* fp = fopen("/proc/self/auxv", "r"); - if (fp != nullptr) { + char** head = environ; + while (*head++ != nullptr) { + } #if V8_TARGET_ARCH_PPC64 - Elf64_auxv_t entry; + using elf_auxv_t = Elf64_auxv_t; #else - Elf32_auxv_t entry; + using elf_auxv_t = Elf32_auxv_t; #endif - for (;;) { - size_t n = fread(&entry, sizeof(entry), 1, fp); - if (n == 0 || entry.a_type == AT_NULL) { + for (elf_auxv_t* entry = reinterpret_cast<elf_auxv_t*>(head); + entry->a_type != AT_NULL; ++entry) { + switch (entry->a_type) { + case AT_PLATFORM: + auxv_cpu_type = reinterpret_cast<char*>(entry->a_un.a_val); + break; + case AT_ICACHEBSIZE: + icache_line_size_ = entry->a_un.a_val; + break; + case AT_DCACHEBSIZE: + dcache_line_size_ = entry->a_un.a_val; break; - } - switch (entry.a_type) { - case AT_PLATFORM: - auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val); - break; - case AT_ICACHEBSIZE: - icache_line_size_ = entry.a_un.a_val; - break; - case AT_DCACHEBSIZE: - dcache_line_size_ = entry.a_un.a_val; - break; - } } - fclose(fp); } part_ = -1; diff --git a/chromium/v8/src/base/file-utils.cc b/chromium/v8/src/base/file-utils.cc index 31b1b411908..6e1c4921440 100644 --- a/chromium/v8/src/base/file-utils.cc +++ b/chromium/v8/src/base/file-utils.cc @@ -12,24 +12,18 @@ namespace v8 { namespace base { -char* RelativePath(char** buffer, const char* exec_path, const char* name) { +std::unique_ptr<char[]> RelativePath(const char* exec_path, const char* name) { DCHECK(exec_path); - int path_separator = static_cast<int>(strlen(exec_path)) - 1; - while (path_separator >= 0 && - !OS::isDirectorySeparator(exec_path[path_separator])) { - path_separator--; + size_t basename_start = strlen(exec_path); + while (basename_start > 0 && + !OS::isDirectorySeparator(exec_path[basename_start - 1])) { + --basename_start; } - if (path_separator >= 0) { - int name_length = static_cast<int>(strlen(name)); - *buffer = - reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1)); - *buffer[0] = '\0'; - strncat(*buffer, exec_path, path_separator + 1); - strncat(*buffer, name, name_length); - } else { - *buffer = strdup(name); - } - return *buffer; + size_t name_length = strlen(name); + auto buffer = std::make_unique<char[]>(basename_start + name_length + 1); + if (basename_start > 0) memcpy(buffer.get(), exec_path, basename_start); + memcpy(buffer.get() + basename_start, name, name_length); + return buffer; } } // namespace base diff --git a/chromium/v8/src/base/file-utils.h b/chromium/v8/src/base/file-utils.h index afd9a1fc253..84b57fb40b3 100644 --- a/chromium/v8/src/base/file-utils.h +++ b/chromium/v8/src/base/file-utils.h @@ -5,6 +5,8 @@ #ifndef V8_BASE_FILE_UTILS_H_ #define V8_BASE_FILE_UTILS_H_ +#include <memory> + #include "src/base/base-export.h" namespace v8 { @@ -12,8 +14,8 @@ namespace base { // Helper functions to manipulate file paths. -V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path, - const char* name); +V8_BASE_EXPORT +std::unique_ptr<char[]> RelativePath(const char* exec_path, const char* name); } // namespace base } // namespace v8 diff --git a/chromium/v8/src/base/free_deleter.h b/chromium/v8/src/base/free_deleter.h index 77e4f0ed14a..a5569266859 100644 --- a/chromium/v8/src/base/free_deleter.h +++ b/chromium/v8/src/base/free_deleter.h @@ -9,6 +9,7 @@ #define V8_BASE_FREE_DELETER_H_ #include <stdlib.h> +#include <memory> namespace v8 { namespace base { diff --git a/chromium/v8/src/base/iterator.h b/chromium/v8/src/base/iterator.h index b081af62aea..baaf324e218 100644 --- a/chromium/v8/src/base/iterator.h +++ b/chromium/v8/src/base/iterator.h @@ -59,6 +59,26 @@ class iterator_range { const_iterator const end_; }; +template <typename ForwardIterator> +auto make_iterator_range(ForwardIterator&& begin, ForwardIterator&& end) { + return iterator_range<ForwardIterator>{std::forward<ForwardIterator>(begin), + std::forward<ForwardIterator>(end)}; +} + +// {Reversed} returns a container adapter usable in a range-based "for" +// statement for iterating a reversible container in reverse order. +// +// Example: +// +// std::vector<int> v = ...; +// for (int i : base::Reversed(v)) { +// // iterates through v from back to front +// } +template <typename T> +auto Reversed(T& t) { // NOLINT(runtime/references): match {rbegin} and {rend} + return make_iterator_range(std::rbegin(t), std::rend(t)); +} + } // namespace base } // namespace v8 diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h index ad70e9820dd..72ef64cfbe0 100644 --- a/chromium/v8/src/base/macros.h +++ b/chromium/v8/src/base/macros.h @@ -232,35 +232,16 @@ struct is_trivially_copyable { // the standard does not, so let's skip this check.) // Trivial non-deleted destructor. std::is_trivially_destructible<T>::value; - -#elif defined(__GNUC__) && __GNUC__ < 5 - // WARNING: - // On older libstdc++ versions, there is no way to correctly implement - // is_trivially_copyable. The workaround below is an approximation (neither - // over- nor underapproximation). E.g. it wrongly returns true if the move - // constructor is non-trivial, and it wrongly returns false if the copy - // constructor is deleted, but copy assignment is trivial. - // TODO(rongjie) Remove this workaround once we require gcc >= 5.0 - static constexpr bool value = - __has_trivial_copy(T) && __has_trivial_destructor(T); - #else static constexpr bool value = std::is_trivially_copyable<T>::value; #endif }; -#if defined(__GNUC__) && __GNUC__ < 5 -// On older libstdc++ versions, base::is_trivially_copyable<T>::value is only an -// approximation (see above), so make ASSERT_{NOT_,}TRIVIALLY_COPYABLE a noop. -#define ASSERT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled") -#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled") -#else #define ASSERT_TRIVIALLY_COPYABLE(T) \ static_assert(::v8::base::is_trivially_copyable<T>::value, \ #T " should be trivially copyable") #define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \ static_assert(!::v8::base::is_trivially_copyable<T>::value, \ #T " should not be trivially copyable") -#endif // The USE(x, ...) template is used to silence C++ compiler warnings // issued for (yet) unused variables (typically parameters). @@ -407,6 +388,9 @@ bool is_inbounds(float_t v) { constexpr bool kUpperBoundIsMax = static_cast<biggest_int_t>(kUpperBound) == static_cast<biggest_int_t>(std::numeric_limits<int_t>::max()); + // Using USE(var) is only a workaround for a GCC 8.1 bug. + USE(kLowerBoundIsMin); + USE(kUpperBoundIsMax); return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) && (kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound)); } diff --git a/chromium/v8/src/base/optional.h b/chromium/v8/src/base/optional.h index b8df88d8442..6610c7ffc33 100644 --- a/chromium/v8/src/base/optional.h +++ b/chromium/v8/src/base/optional.h @@ -131,21 +131,8 @@ struct OptionalStorageBase<T, true /* trivially destructible */> { // the condition of constexpr-ness is satisfied because the base class also has // compiler generated constexpr {copy,move} constructors). Note that // placement-new is prohibited in constexpr. -#if defined(__GNUC__) && __GNUC__ < 5 -// gcc <5 does not implement std::is_trivially_copy_constructible. -// Conservatively assume false for this configuration. -// TODO(clemensh): Remove this once we drop support for gcc <5. -#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) false -#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) false -#else -#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ - std::is_trivially_copy_constructible<T>::value -#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \ - std::is_trivially_move_constructible<T>::value -#endif -template <typename T, bool = TRIVIALLY_COPY_CONSTRUCTIBLE(T), - bool = TRIVIALLY_MOVE_CONSTRUCTIBLE(T)> -#undef TRIVIALLY_COPY_CONSTRUCTIBLE +template <typename T, bool = std::is_trivially_copy_constructible<T>::value, + bool = std::is_trivially_move_constructible<T>::value> struct OptionalStorage : OptionalStorageBase<T> { // This is no trivially {copy,move} constructible case. Other cases are // defined below as specializations. diff --git a/chromium/v8/src/base/platform/mutex.h b/chromium/v8/src/base/platform/mutex.h index c48cf8d3393..5b3b31ec1e5 100644 --- a/chromium/v8/src/base/platform/mutex.h +++ b/chromium/v8/src/base/platform/mutex.h @@ -290,6 +290,7 @@ class LockGuard final { }; using MutexGuard = LockGuard<Mutex>; +using RecursiveMutexGuard = LockGuard<RecursiveMutex>; enum MutexSharedType : bool { kShared = true, kExclusive = false }; diff --git a/chromium/v8/src/base/platform/platform-openbsd.cc b/chromium/v8/src/base/platform/platform-openbsd.cc index c133ffb68d7..e4a3cb6f35f 100644 --- a/chromium/v8/src/base/platform/platform-openbsd.cc +++ b/chromium/v8/src/base/platform/platform-openbsd.cc @@ -107,7 +107,7 @@ void OS::SignalCodeMovingGC() { // it. This injects a GC marker into the stream of events generated // by the kernel and allows us to synchronize V8 code log and the // kernel log. - int size = sysconf(_SC_PAGESIZE); + long size = sysconf(_SC_PAGESIZE); // NOLINT: type more fit than uint64_t FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); if (f == nullptr) { OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); @@ -116,7 +116,7 @@ void OS::SignalCodeMovingGC() { void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); DCHECK(addr != MAP_FAILED); - OS::Free(addr, size); + CHECK(OS::Free(addr, size)); fclose(f); } diff --git a/chromium/v8/src/base/platform/platform-posix.cc b/chromium/v8/src/base/platform/platform-posix.cc index c50cdd7a98e..99abcd55686 100644 --- a/chromium/v8/src/base/platform/platform-posix.cc +++ b/chromium/v8/src/base/platform/platform-posix.cc @@ -48,6 +48,7 @@ #if V8_OS_MACOSX #include <dlfcn.h> +#include <mach/mach.h> #endif #if V8_OS_LINUX diff --git a/chromium/v8/src/base/platform/semaphore.cc b/chromium/v8/src/base/platform/semaphore.cc index a7e50f58804..66464d8258d 100644 --- a/chromium/v8/src/base/platform/semaphore.cc +++ b/chromium/v8/src/base/platform/semaphore.cc @@ -5,8 +5,7 @@ #include "src/base/platform/semaphore.h" #if V8_OS_MACOSX -#include <mach/mach_init.h> -#include <mach/task.h> +#include <dispatch/dispatch.h> #endif #include <errno.h> @@ -21,53 +20,23 @@ namespace base { #if V8_OS_MACOSX Semaphore::Semaphore(int count) { - kern_return_t result = semaphore_create( - mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); + native_handle_ = dispatch_semaphore_create(count); + DCHECK(native_handle_); } +Semaphore::~Semaphore() { dispatch_release(native_handle_); } -Semaphore::~Semaphore() { - kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); -} - -void Semaphore::Signal() { - kern_return_t result = semaphore_signal(native_handle_); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); -} - +void Semaphore::Signal() { dispatch_semaphore_signal(native_handle_); } void Semaphore::Wait() { - while (true) { - kern_return_t result = semaphore_wait(native_handle_); - if (result == KERN_SUCCESS) return; // Semaphore was signalled. - DCHECK_EQ(KERN_ABORTED, result); - } + dispatch_semaphore_wait(native_handle_, DISPATCH_TIME_FOREVER); } bool Semaphore::WaitFor(const TimeDelta& rel_time) { - TimeTicks now = TimeTicks::Now(); - TimeTicks end = now + rel_time; - while (true) { - mach_timespec_t ts; - if (now >= end) { - // Return immediately if semaphore was not signalled. - ts.tv_sec = 0; - ts.tv_nsec = 0; - } else { - ts = (end - now).ToMachTimespec(); - } - kern_return_t result = semaphore_timedwait(native_handle_, ts); - if (result == KERN_SUCCESS) return true; // Semaphore was signalled. - if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout. - DCHECK_EQ(KERN_ABORTED, result); - now = TimeTicks::Now(); - } + dispatch_time_t timeout = + dispatch_time(DISPATCH_TIME_NOW, rel_time.InNanoseconds()); + return dispatch_semaphore_wait(native_handle_, timeout) == 0; } #elif V8_OS_POSIX diff --git a/chromium/v8/src/base/platform/semaphore.h b/chromium/v8/src/base/platform/semaphore.h index 11ff0b9199f..c4937acadd1 100644 --- a/chromium/v8/src/base/platform/semaphore.h +++ b/chromium/v8/src/base/platform/semaphore.h @@ -12,7 +12,7 @@ #endif #if V8_OS_MACOSX -#include <mach/semaphore.h> // NOLINT +#include <dispatch/dispatch.h> // NOLINT #elif V8_OS_POSIX #include <semaphore.h> // NOLINT #endif @@ -50,7 +50,7 @@ class V8_BASE_EXPORT Semaphore final { bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; #if V8_OS_MACOSX - using NativeHandle = semaphore_t; + using NativeHandle = dispatch_semaphore_t; #elif V8_OS_POSIX using NativeHandle = sem_t; #elif V8_OS_WIN diff --git a/chromium/v8/src/base/template-utils.h b/chromium/v8/src/base/template-utils.h index 530114a8e2f..146f8d6e6ae 100644 --- a/chromium/v8/src/base/template-utils.h +++ b/chromium/v8/src/base/template-utils.h @@ -6,32 +6,20 @@ #define V8_BASE_TEMPLATE_UTILS_H_ #include <array> -#include <memory> +#include <type_traits> +#include <utility> namespace v8 { namespace base { namespace detail { -// make_array_helper statically iteratively creates the index list 0 .. Size-1. -// A specialization for the base case (first index is 0) finally constructs the -// array. -// TODO(clemensh): Use std::index_sequence once we have C++14 support. -template <class Function, std::size_t... Indexes> -struct make_array_helper; - -template <class Function, std::size_t... Indexes> -struct make_array_helper<Function, 0, Indexes...> { - constexpr static std::array<typename std::result_of<Function(size_t)>::type, - sizeof...(Indexes) + 1> - make_array(Function f) { - return {{f(0), f(Indexes)...}}; - } -}; - -template <class Function, std::size_t FirstIndex, std::size_t... Indexes> -struct make_array_helper<Function, FirstIndex, Indexes...> - : make_array_helper<Function, FirstIndex - 1, FirstIndex, Indexes...> {}; +template <typename Function, std::size_t... Indexes> +constexpr inline auto make_array_helper(Function f, + std::index_sequence<Indexes...>) + -> std::array<decltype(f(0)), sizeof...(Indexes)> { + return {{f(Indexes)...}}; +} } // namespace detail @@ -42,18 +30,8 @@ struct make_array_helper<Function, FirstIndex, Indexes...> // [](std::size_t i) { return static_cast<int>(2 * i); }); // The resulting array will be constexpr if the passed function is constexpr. template <std::size_t Size, class Function> -constexpr std::array<typename std::result_of<Function(size_t)>::type, Size> -make_array(Function f) { - static_assert(Size > 0, "Can only create non-empty arrays"); - return detail::make_array_helper<Function, Size - 1>::make_array(f); -} - -// base::make_unique<T>: Construct an object of type T and wrap it in a -// std::unique_ptr. -// Replacement for C++14's std::make_unique. -template <typename T, typename... Args> -std::unique_ptr<T> make_unique(Args&&... args) { - return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); +constexpr auto make_array(Function f) { + return detail::make_array_helper(f, std::make_index_sequence<Size>{}); } // Helper to determine how to pass values: Pass scalars and arrays by value, @@ -80,38 +58,17 @@ struct has_output_operator<T, decltype(void(std::declval<std::ostream&>() << std::declval<T>()))> : std::true_type {}; -namespace detail { - -template <typename Func, typename T, typename... Ts> -struct fold_helper { - static_assert(sizeof...(Ts) == 0, "this is the base case"); - using result_t = typename std::remove_reference<T>::type; - static constexpr T&& fold(Func func, T&& first) { - return std::forward<T>(first); - } -}; +// Fold all arguments from left to right with a given function. +template <typename Func, typename T> +constexpr auto fold(Func func, T&& t) { + return std::forward<T>(t); +} template <typename Func, typename T1, typename T2, typename... Ts> -struct fold_helper<Func, T1, T2, Ts...> { - using folded_t = typename std::result_of<Func(T1, T2)>::type; - using next_fold_helper = fold_helper<Func, folded_t&&, Ts...>; - using result_t = typename next_fold_helper::result_t; - static constexpr result_t fold(Func func, T1&& first, T2&& second, - Ts&&... more) { - return next_fold_helper::fold( - func, func(std::forward<T1>(first), std::forward<T2>(second)), - std::forward<Ts>(more)...); - } -}; - -} // namespace detail - -// Fold all arguments from left to right with a given function. -template <typename Func, typename... Ts> -constexpr auto fold(Func func, Ts&&... more) -> - typename detail::fold_helper<Func, Ts...>::result_t { - return detail::fold_helper<Func, Ts...>::fold(func, - std::forward<Ts>(more)...); +constexpr auto fold(Func func, T1&& first, T2&& second, Ts&&... more) { + auto&& folded = func(std::forward<T1>(first), std::forward<T2>(second)); + return fold(std::move(func), std::forward<decltype(folded)>(folded), + std::forward<Ts>(more)...); } // {is_same<Ts...>::value} is true if all Ts are the same, false otherwise. diff --git a/chromium/v8/src/base/ubsan.cc b/chromium/v8/src/base/ubsan.cc new file mode 100644 index 00000000000..fc77156eb1c --- /dev/null +++ b/chromium/v8/src/base/ubsan.cc @@ -0,0 +1,50 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <stdint.h> +#include <limits> + +#include "src/base/build_config.h" + +#if !defined(UNDEFINED_SANITIZER) || !defined(V8_TARGET_ARCH_32_BIT) +#error "This file is only needed for 32-bit UBSan builds." +#endif + +// Compiling with -fsanitize=undefined on 32-bit platforms requires __mulodi4 +// to be available. Usually it comes from libcompiler_rt, which our build +// doesn't provide, so here is a custom implementation (inspired by digit_mul +// in src/objects/bigint.cc). +extern "C" int64_t __mulodi4(int64_t a, int64_t b, int* overflow) { + // Multiply in 32-bit chunks. + // For inputs [AH AL]*[BH BL], the result is: + // + // [AL*BL] // r_low + // + [AL*BH] // r_mid1 + // + [AH*BL] // r_mid2 + // + [AH*BH] // r_high + // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1] + // + // Where of course we must be careful with carries between the columns. + uint64_t a_low = a & 0xFFFFFFFFu; + uint64_t a_high = static_cast<uint64_t>(a) >> 32; + uint64_t b_low = b & 0xFFFFFFFFu; + uint64_t b_high = static_cast<uint64_t>(b) >> 32; + + uint64_t r_low = a_low * b_low; + uint64_t r_mid1 = a_low * b_high; + uint64_t r_mid2 = a_high * b_low; + uint64_t r_high = a_high * b_high; + + uint64_t result1 = r_low + (r_mid1 << 32); + if (result1 < r_low) r_high++; + uint64_t result2 = result1 + (r_mid2 << 32); + if (result2 < result1) r_high++; + r_high += (r_mid1 >> 32) + (r_mid2 >> 32); + int64_t result = static_cast<int64_t>(result2); + uint64_t result_sign = (result >> 63); + uint64_t expected_result_sign = (a >> 63) ^ (b >> 63); + + *overflow = (r_high > 0 || result_sign != expected_result_sign) ? 1 : 0; + return result; +} diff --git a/chromium/v8/src/builtins/accessors.cc b/chromium/v8/src/builtins/accessors.cc index ea6308622da..fa39142cb4c 100644 --- a/chromium/v8/src/builtins/accessors.cc +++ b/chromium/v8/src/builtins/accessors.cc @@ -16,6 +16,7 @@ #include "src/objects/contexts.h" #include "src/objects/field-index-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-regexp-inl.h" #include "src/objects/module-inl.h" #include "src/objects/property-details.h" #include "src/objects/prototype.h" @@ -840,5 +841,25 @@ Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) { &ErrorStackGetter, &ErrorStackSetter); } +// +// Accessors::RegExpResultIndices +// + +void Accessors::RegExpResultIndicesGetter( + v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); + HandleScope scope(isolate); + Handle<JSRegExpResult> regexp_result( + Handle<JSRegExpResult>::cast(Utils::OpenHandle(*info.Holder()))); + Handle<Object> indices( + JSRegExpResult::GetAndCacheIndices(isolate, regexp_result)); + info.GetReturnValue().Set(Utils::ToLocal(indices)); +} + +Handle<AccessorInfo> Accessors::MakeRegExpResultIndicesInfo(Isolate* isolate) { + return MakeAccessor(isolate, isolate->factory()->indices_string(), + &RegExpResultIndicesGetter, nullptr); +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/builtins/accessors.h b/chromium/v8/src/builtins/accessors.h index 43a65342966..b6a8e65446f 100644 --- a/chromium/v8/src/builtins/accessors.h +++ b/chromium/v8/src/builtins/accessors.h @@ -43,6 +43,8 @@ class JavaScriptFrame; kHasSideEffectToReceiver) \ V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \ kHasSideEffectToReceiver) \ + V(_, regexp_result_indices, RegExpResultIndices, kHasSideEffectToReceiver, \ + kHasSideEffectToReceiver) \ V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver) #define ACCESSOR_SETTER_LIST(V) \ diff --git a/chromium/v8/src/builtins/arm/builtins-arm.cc b/chromium/v8/src/builtins/arm/builtins-arm.cc index e9b562620fc..164c09db259 100644 --- a/chromium/v8/src/builtins/arm/builtins-arm.cc +++ b/chromium/v8/src/builtins/arm/builtins-arm.cc @@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, - Register scratch2) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r3 : new target (preserved for callee if needed, and caller) // -- r1 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch)); Register closure = r1; - Register optimized_code_entry = scratch1; - - __ ldr( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ cmp(optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kNone))); - __ b(eq, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ ldr(scratch, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ ldr(scratch, + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ b(ne, &found_deoptimized_code); - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ cmp( - optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } - } + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); + static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch"); + __ LoadCodeObjectEntry(r2, optimized_code_entry); + __ Jump(r2); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ ldr(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ ldr( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit)); - __ b(ne, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); - static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch"); - __ LoadCodeObjectEntry(r2, optimized_code_entry); - __ Jump(r2); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r3 : new target (preserved for callee if needed, and caller) + // -- r1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ cmp(optimization_marker, + Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -999,7 +967,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ExternalReference::bytecode_size_table_address()); // Check if the bytecode is a Wide or ExtraWide prefix bytecode. - Label process_bytecode, extra_wide; + Label process_bytecode; STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide)); STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide)); STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide)); @@ -1008,31 +976,34 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ cmp(bytecode, Operand(0x3)); __ b(hi, &process_bytecode); __ tst(bytecode, Operand(0x1)); - __ b(ne, &extra_wide); - - // Load the next bytecode and update table to the wide scaled table. + // Load the next bytecode. __ add(bytecode_offset, bytecode_offset, Operand(1)); __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + + // Update table to the wide scaled table. __ add(bytecode_size_table, bytecode_size_table, Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&process_bytecode); - - __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ add(bytecode_offset, bytecode_offset, Operand(1)); - __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + // Conditionally update table to the extra wide scaled table. We are taking + // advantage of the fact that the extra wide follows the wide one. __ add(bytecode_size_table, bytecode_size_table, - Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); + Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC, + ne); __ bind(&process_bytecode); // Bailout to the return label if this is a return bytecode. -#define JUMP_IF_EQUAL(NAME) \ - __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \ - __ b(if_return, eq); + + // Create cmp, cmpne, ..., cmpne to check for a return bytecode. + Condition flag = al; +#define JUMP_IF_EQUAL(NAME) \ + __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME)), \ + flag); \ + flag = ne; RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) #undef JUMP_IF_EQUAL + __ b(if_return, eq); + // Otherwise, load the size of the current bytecode and advance the offset. __ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2)); __ add(bytecode_offset, bytecode_offset, scratch1); @@ -1084,9 +1055,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE)); __ b(ne, &push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6); + Register optimized_code_entry = r4; + + // Read off the optimized code slot in the feedback vector. + __ ldr(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ cmp(optimized_code_entry, + Operand(Smi::FromEnum(OptimizationMarker::kNone))); + __ b(ne, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ ldr(r9, FieldMemOperand(feedback_vector, @@ -1121,28 +1104,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ sub(r9, sp, Operand(r4)); LoadRealStackLimit(masm, r2); __ cmp(r9, Operand(r2)); - __ b(hs, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ b(lo, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(r9, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ b(&loop_check, al); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ push(r9); + __ push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ sub(r4, r4, Operand(kPointerSize), SetCC); @@ -1157,8 +1138,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmp(r9, Operand::Zero()); __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1199,8 +1179,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r2); __ Jump(lr); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1565,14 +1563,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2182,7 +2174,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r1, &non_callable); __ bind(&non_smi); __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE); @@ -2199,12 +2191,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ cmp(r5, Operand(JS_PROXY_TYPE)); - __ b(ne, &non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // Let the "call_as_function_delegate" take care of the rest. @@ -3167,51 +3157,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { __ Ret(); } -void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) { - Register dest = r0; - Register src = r1; - Register chars = r2; - - { - UseScratchRegisterScope temps(masm); - - Register temp1 = r3; - Register temp2 = temps.Acquire(); - Register temp3 = lr; - Register temp4 = r4; - Label loop; - Label not_two; - - __ Push(lr, r4); - __ bic(temp2, chars, Operand(0x3)); - __ add(temp2, dest, Operand(temp2, LSL, 1)); - - __ bind(&loop); - __ ldr(temp1, MemOperand(src, 4, PostIndex)); - __ uxtb16(temp3, temp1); - __ uxtb16(temp4, temp1, 8); - __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16)); - __ str(temp1, MemOperand(dest)); - __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16)); - __ str(temp1, MemOperand(dest, 4)); - __ add(dest, dest, Operand(8)); - __ cmp(dest, temp2); - __ b(&loop, ne); - - __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs - __ b(¬_two, cc); - __ ldrh(temp1, MemOperand(src, 2, PostIndex)); - __ uxtb(temp3, temp1, 8); - __ mov(temp3, Operand(temp3, LSL, 16)); - __ uxtab(temp3, temp3, temp1); - __ str(temp3, MemOperand(dest, 4, PostIndex)); - __ bind(¬_two); - __ ldrb(temp1, MemOperand(src), ne); - __ strh(temp1, MemOperand(dest), ne); - __ Pop(pc, r4); - } -} - #undef __ } // namespace internal diff --git a/chromium/v8/src/builtins/arm64/builtins-arm64.cc b/chromium/v8/src/builtins/arm64/builtins-arm64.cc index 4e159a69b7e..9edd074e3d0 100644 --- a/chromium/v8/src/builtins/arm64/builtins-arm64.cc +++ b/chromium/v8/src/builtins/arm64/builtins-arm64.cc @@ -1001,108 +1001,78 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker marker, Runtime::FunctionId function_id) { Label no_match; - __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match); + __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, + &no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, - Register scratch2) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- x3 : new target (preserved for callee if needed, and caller) // -- x1 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch)); Register closure = x1; - Register optimized_code_entry = scratch1; - - __ LoadAnyTaggedField( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret is at a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CompareAndBranch(optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq, - &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ Cmp( - optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ B(&fallthrough); - } - } - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadTaggedPointerField( + scratch, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ Ldr(scratch.W(), + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, + &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); + static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); + __ LoadCodeObjectEntry(x2, optimized_code_entry); + __ Jump(x2); - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadTaggedPointerField( - scratch2, - FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ Ldr( - scratch2.W(), - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit, - &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); - static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - __ LoadCodeObjectEntry(x2, optimized_code_entry); - __ Jump(x2); + // Optimized code slot contains deoptimized code, evict it and re-enter the + // closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- x3 : new target (preserved for callee if needed, and caller) + // -- x1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpTagged( + optimization_marker, + Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1129,19 +1099,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ Cmp(bytecode, Operand(0x3)); __ B(hi, &process_bytecode); __ Tst(bytecode, Operand(0x1)); - __ B(ne, &extra_wide); - - // Load the next bytecode and update table to the wide scaled table. + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here since they do not modify the flags after Tst. __ Add(bytecode_offset, bytecode_offset, Operand(1)); __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + __ B(ne, &extra_wide); + + // Update table to the wide scaled table. __ Add(bytecode_size_table, bytecode_size_table, Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ B(&process_bytecode); __ Bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ Add(bytecode_offset, bytecode_offset, Operand(1)); - __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + // Update table to the extra wide scaled table. __ Add(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -1211,7 +1181,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4); + Register optimized_code_entry = x7; + __ LoadAnyTaggedField( + optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CompareTaggedAndBranch(optimized_code_entry, + Operand(Smi::FromEnum(OptimizationMarker::kNone)), + ne, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. // MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse @@ -1248,13 +1231,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, x0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Sub(x10, sp, Operand(x11)); { UseScratchRegisterScope temps(masm); @@ -1262,21 +1245,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LoadRealStackLimit(masm, scratch); __ Cmp(x10, scratch); } - __ B(hs, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ Bind(&ok); + __ B(lo, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. // Note: there should always be at least one stack slot for the return // register in the register file. Label loop_header; - __ LoadRoot(x10, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ Lsr(x11, x11, kSystemPointerSizeLog2); // Round up the number of registers to a multiple of 2, to align the stack // to 16 bytes. __ Add(x11, x11, 1); __ Bic(x11, x11, 1); - __ PushMultipleTimes(x10, x11); + __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11); __ Bind(&loop_header); } @@ -1291,8 +1272,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2)); __ Bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1315,9 +1295,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ Ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ Ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Either return, or advance to the next bytecode and dispatch. Label do_return; @@ -1333,9 +1312,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, x2); __ Ret(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); __ Unreachable(); // Should not return. + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ Unreachable(); // Should not return. } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1543,9 +1541,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Get the target bytecode offset from the frame. - __ Ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Dispatch to the target bytecode. __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister, @@ -1560,9 +1557,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Load the current bytecode. __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister, @@ -1633,7 +1629,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { // Set flags for determining the value of smi-tagged argc. // lt => 1, eq => 2, gt => 3. - __ Cmp(argc, Smi::FromInt(2)); + __ CmpTagged(argc, Smi::FromInt(2)); __ B(gt, &three_args); // One or two arguments. @@ -1769,20 +1765,14 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ PushArgument(x0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } // If the code object is null, just return to the caller. Label skip; - __ CompareAndBranch(x0, Smi::zero(), ne, &skip); + __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip); __ Ret(); __ Bind(&skip); @@ -1878,8 +1868,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ Cmp(arg_array, null_value); - __ Ccmp(arg_array, undefined_value, ZFlag, ne); + __ CmpTagged(arg_array, null_value); + __ CcmpTagged(arg_array, undefined_value, ZFlag, ne); __ B(eq, &no_arguments); // 4a. Apply the receiver to the given argArray. @@ -2261,7 +2251,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Bind(&loop); __ Sub(len, len, 1); __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex)); - __ Cmp(scratch, the_hole_value); + __ CmpTagged(scratch, the_hole_value); __ Csel(scratch, scratch, undefined_value, ne); __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2)); __ Cbnz(len, &loop); @@ -2319,7 +2309,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ Ldr(x4, MemOperand(args_fp, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); + __ CmpTagged(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); __ B(eq, &arguments_adaptor); { __ Ldr(scratch, @@ -2626,7 +2616,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- x1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(x1, &non_callable); __ Bind(&non_smi); __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE); @@ -2642,12 +2632,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ Cmp(x5, JS_PROXY_TYPE); - __ B(ne, &non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ Bind(&non_function); // Overwrite the original receiver with the (original) target. __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2)); // Let the "call_as_function_delegate" take care of the rest. @@ -2712,7 +2700,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // Patch new.target to [[BoundTargetFunction]] if new.target equals target. { Label done; - __ Cmp(x1, x3); + __ CmpTagged(x1, x3); __ B(ne, &done); __ LoadTaggedPointerField( x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); diff --git a/chromium/v8/src/builtins/base.tq b/chromium/v8/src/builtins/base.tq index aa5d4cc50a7..065cd08e4c3 100644 --- a/chromium/v8/src/builtins/base.tq +++ b/chromium/v8/src/builtins/base.tq @@ -90,11 +90,28 @@ type bool generates 'TNode<BoolT>' constexpr 'bool'; type bint generates 'TNode<BInt>' constexpr 'BInt'; type string constexpr 'const char*'; -type NameDictionary extends FixedArray; +// The HashTable inheritance hierarchy doesn't actually look like this in C++ +// because it uses some class templates that we can't yet (and may never) +// express in Torque, but this is the expected organization of instance types. +@abstract @dirtyInstantiatedAbstractClass +extern class HashTable extends FixedArray generates 'TNode<FixedArray>'; +extern class OrderedHashMap extends HashTable; +extern class OrderedHashSet extends HashTable; +extern class OrderedNameDictionary extends HashTable; +extern class NameDictionary extends HashTable; +extern class GlobalDictionary extends HashTable; +extern class SimpleNumberDictionary extends HashTable; +extern class StringTable extends HashTable; +extern class EphemeronHashTable extends HashTable; +type ObjectHashTable extends HashTable + generates 'TNode<ObjectHashTable>'; +extern class NumberDictionary extends HashTable; type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*'; -type Code extends HeapObject generates 'TNode<Code>'; +extern class Code extends HeapObject; type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>'; + +@abstract extern class Context extends HeapObject { length: Smi; scope_info: ScopeInfo; @@ -102,10 +119,27 @@ extern class Context extends HeapObject { extension: Object; native_context: Object; } -type NativeContext extends Context generates 'TNode<NativeContext>'; +extern class AwaitContext extends Context generates 'TNode<Context>'; +extern class BlockContext extends Context generates 'TNode<Context>'; +extern class CatchContext extends Context generates 'TNode<Context>'; +extern class DebugEvaluateContext extends Context + generates 'TNode<Context>'; +extern class EvalContext extends Context generates 'TNode<Context>'; +extern class FunctionContext extends Context generates 'TNode<Context>'; +extern class ModuleContext extends Context generates 'TNode<Context>'; +extern class NativeContext extends Context; +extern class ScriptContext extends Context generates 'TNode<Context>'; +extern class WithContext extends Context generates 'TNode<Context>'; + +@generateCppClass +@abstract +extern class PrimitiveHeapObject extends HeapObject { +} @generateCppClass -extern class Oddball extends HeapObject { +@apiExposedInstanceTypeValue(0x43) +@highestInstanceTypeWithinParentClassRange +extern class Oddball extends PrimitiveHeapObject { to_number_raw: float64; to_string: String; to_number: Number; @@ -113,13 +147,13 @@ extern class Oddball extends HeapObject { kind: Smi; } -extern class HeapNumber extends HeapObject { value: float64; } +extern class HeapNumber extends PrimitiveHeapObject { value: float64; } type Number = Smi | HeapNumber; type Numeric = Number | BigInt; @abstract @generateCppClass -extern class Name extends HeapObject { +extern class Name extends PrimitiveHeapObject { hash_field: uint32; } // This is the same as Name, but with the information that there are no other @@ -137,6 +171,7 @@ type PrivateSymbol extends Symbol; @abstract @generateCppClass +@reserveBitsInInstanceType(6) extern class String extends Name { length: int32; } @@ -222,20 +257,35 @@ extern class FixedArrayBase extends HeapObject { length: Smi; } -extern class FixedArray extends FixedArrayBase { objects[length]: Object; } +@abstract +@dirtyInstantiatedAbstractClass +extern class FixedArray extends FixedArrayBase { + objects[length]: Object; +} extern class FixedDoubleArray extends FixedArrayBase { floats[length]: float64; } -extern class WeakFixedArray extends HeapObject { length: Smi; } +@abstract +@dirtyInstantiatedAbstractClass +extern class WeakFixedArray extends HeapObject { + length: Smi; +} extern class ByteArray extends FixedArrayBase {} +@hasSameInstanceTypeAsParent +extern class ArrayList extends FixedArray { +} + +extern class ObjectBoilerplateDescription extends FixedArray; +extern class ClosureFeedbackCellArray extends FixedArray; +extern class ScriptContextTable extends FixedArray; + type LayoutDescriptor extends ByteArray generates 'TNode<LayoutDescriptor>'; -type TransitionArray extends WeakFixedArray - generates 'TNode<TransitionArray>'; +extern class TransitionArray extends WeakFixedArray; type InstanceType extends uint16 constexpr 'v8::internal::InstanceType'; @@ -282,6 +332,7 @@ extern class SourcePositionTableWithFrameCache extends Struct { // We make this class abstract because it is missing the variable-sized part, // which is still impossible to express in Torque. @abstract +@dirtyInstantiatedAbstractClass extern class DescriptorArray extends HeapObject { number_of_all_descriptors: uint16; number_of_descriptors: uint16; @@ -327,7 +378,9 @@ intrinsic } } +// JSReceiver corresponds to objects in the JS sense. @abstract +@highestInstanceTypeWithinParentClassRange extern class JSReceiver extends HeapObject { properties_or_hash: FixedArrayBase | PropertyArray | Smi; } @@ -337,6 +390,8 @@ type Constructor extends JSReceiver; @abstract @dirtyInstantiatedAbstractClass @generateCppClass +@apiExposedInstanceTypeValue(0x421) +@highestInstanceTypeWithinParentClassRange extern class JSObject extends JSReceiver { // [elements]: The elements (properties with names that are integers). // @@ -368,6 +423,18 @@ macro NewJSObject(implicit context: Context)(): JSObject { }; } +@abstract +@generateCppClass +@lowestInstanceTypeWithinParentClassRange +extern class JSCustomElementsObject extends JSObject { +} + +@abstract +@generateCppClass +@lowestInstanceTypeWithinParentClassRange +extern class JSSpecialObject extends JSCustomElementsObject { +} + extern macro HasPrototypeSlot(JSFunction): bool; macro GetDerivedMap(implicit context: Context)( @@ -401,7 +468,8 @@ macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map): map, properties, kEmptyFixedArray, kNone, kWithSlackTracking); } -extern class JSFunction extends JSObject { +@highestInstanceTypeWithinParentClassRange +extern class JSFunction extends JSFunctionOrBoundFunction { shared_function_info: SharedFunctionInfo; context: Context; feedback_cell: FeedbackCell; @@ -419,6 +487,7 @@ extern class JSProxy extends JSReceiver { // Just a starting shape for JSObject; properties can move after initialization. @noVerifier +@hasSameInstanceTypeAsParent extern class JSProxyRevocableResult extends JSObject { proxy: JSAny; revoke: JSAny; @@ -436,14 +505,14 @@ macro NewJSProxyRevocableResult(implicit context: Context)( } @generateCppClass -extern class JSGlobalProxy extends JSObject { +extern class JSGlobalProxy extends JSSpecialObject { // [native_context]: the owner native context of this global proxy object. // It is null value if this object is not used by any context. native_context: Object; } @generateCppClass -extern class JSPrimitiveWrapper extends JSObject { +extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; } @@ -531,8 +600,6 @@ extern class CallHandlerInfo extends Struct { data: Object; } -type ObjectHashTable extends FixedArray; - @abstract extern class Module extends HeapObject { exports: ObjectHashTable; @@ -569,9 +636,12 @@ extern class SourceTextModule extends Module { // Lazily initialized on first access. It's the hole before first access and // a JSObject afterwards. import_meta: TheHole | JSObject; - + async_parent_modules: ArrayList; + top_level_capability: JSPromise | Undefined; dfs_index: Smi; dfs_ancestor_index: Smi; + pending_async_dependencies: Smi; + flags: Smi; } @generateCppClass @@ -583,7 +653,8 @@ extern class SyntheticModule extends Module { @abstract @generateCppClass -extern class JSModuleNamespace extends JSObject { +@dirtyInstantiatedAbstractClass +extern class JSModuleNamespace extends JSSpecialObject { module: Module; } @@ -606,6 +677,7 @@ extern class JSWeakMap extends JSWeakCollection { } @generateCppClass +@abstract extern class JSCollectionIterator extends JSObject { // The backing hash table mapping keys to values. table: Object; @@ -613,6 +685,20 @@ extern class JSCollectionIterator extends JSObject { index: Object; } +@abstract extern class JSMapIterator extends JSCollectionIterator; +extern class JSMapKeyIterator extends JSMapIterator + generates 'TNode<JSMapIterator>'; +extern class JSMapKeyValueIterator extends JSMapIterator + generates 'TNode<JSMapIterator>'; +extern class JSMapValueIterator extends JSMapIterator + generates 'TNode<JSMapIterator>'; + +@abstract extern class JSSetIterator extends JSCollectionIterator; +extern class JSSetKeyValueIterator extends JSSetIterator + generates 'TNode<JSSetIterator>'; +extern class JSSetValueIterator extends JSSetIterator + generates 'TNode<JSSetIterator>'; + extern class JSMessageObject extends JSObject { // Tagged fields. message_type: Smi; @@ -656,7 +742,7 @@ extern class Script extends Struct { line_ends: Object; id: Smi; eval_from_shared_or_wrapped_arguments: Object; - eval_from_position: Smi; + eval_from_position: Smi | Foreign; // Smi or Managed<wasm::NativeModule> shared_function_infos: Object; flags: Smi; source_url: Object; @@ -669,12 +755,13 @@ extern class EmbedderDataArray extends HeapObject { length: Smi; } -type ScopeInfo extends HeapObject generates 'TNode<ScopeInfo>'; +extern class ScopeInfo extends FixedArray; +@generateCppClass extern class PreparseData extends HeapObject { // TODO(v8:8983): Add declaration for variable-sized region. data_length: int32; - inner_length: int32; + children_length: int32; } extern class InterpreterData extends Struct { @@ -697,13 +784,36 @@ extern class SharedFunctionInfo extends HeapObject { @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32; } +@abstract +@generateCppClass +extern class UncompiledData extends HeapObject { + inferred_name: String; + start_position: int32; + end_position: int32; +} + +@generateCppClass +extern class UncompiledDataWithoutPreparseData extends UncompiledData { +} + +@generateCppClass +extern class UncompiledDataWithPreparseData extends UncompiledData { + preparse_data: PreparseData; +} + +@abstract +@generateCppClass +@highestInstanceTypeWithinParentClassRange +extern class JSFunctionOrBoundFunction extends JSObject { +} + @generateCppClass -extern class JSBoundFunction extends JSObject { +extern class JSBoundFunction extends JSFunctionOrBoundFunction { // The wrapped function object. bound_target_function: Callable; // The value that is always passed as the this value when calling the wrapped // function. - bound_this: JSAny; + bound_this: JSAny | SourceTextModule; // A list of values whose elements are used as the first arguments to any call // to the wrapped function. bound_arguments: FixedArray; @@ -728,8 +838,6 @@ extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength( FixedArrayBase): intptr; type SloppyArgumentsElements extends FixedArray; -type NumberDictionary extends HeapObject - generates 'TNode<NumberDictionary>'; extern class FreeSpace extends HeapObject { size: Smi; @@ -763,6 +871,8 @@ const PROXY_REVOCABLE_RESULT_MAP_INDEX: constexpr NativeContextSlot generates 'Context::PROXY_REVOCABLE_RESULT_MAP_INDEX'; const REFLECT_APPLY_INDEX: constexpr NativeContextSlot generates 'Context::REFLECT_APPLY_INDEX'; +const REGEXP_FUNCTION_INDEX: constexpr NativeContextSlot + generates 'Context::REGEXP_FUNCTION_INDEX'; const REGEXP_LAST_MATCH_INFO_INDEX: constexpr NativeContextSlot generates 'Context::REGEXP_LAST_MATCH_INFO_INDEX'; const INITIAL_STRING_ITERATOR_MAP_INDEX: constexpr NativeContextSlot @@ -834,7 +944,7 @@ extern class JSDate extends JSObject { cache_stamp: Undefined | Smi | NaN; } -extern class JSGlobalObject extends JSObject { +extern class JSGlobalObject extends JSSpecialObject { native_context: NativeContext; global_proxy: JSGlobalProxy; } @@ -847,9 +957,12 @@ extern class JSAsyncFromSyncIterator extends JSObject { next: Object; } +@generateCppClass extern class JSStringIterator extends JSObject { + // The [[IteratedString]] inobject property. string: String; - next_index: Smi; + // The [[StringIteratorNextIndex]] inobject property. + index: Smi; } @abstract @@ -885,7 +998,7 @@ extern class FunctionTemplateRareData extends Struct { @generateCppClass extern class FunctionTemplateInfo extends TemplateInfo { // Handler invoked when calling an instance of this FunctionTemplateInfo. - // Either CallInfoHandler or Undefined. + // Either CallHandlerInfo or Undefined. call_code: Object; class_name: Object; // If the signature is a FunctionTemplateInfo it is used to check whether the @@ -946,7 +1059,10 @@ const UTF16: const UTF32: constexpr UnicodeEncoding generates 'UnicodeEncoding::UTF32'; -extern class Foreign extends HeapObject { foreign_address: RawPtr; } +@apiExposedInstanceTypeValue(0x46) +extern class Foreign extends HeapObject { + foreign_address: RawPtr; +} @generateCppClass extern class InterceptorInfo extends Struct { @@ -985,6 +1101,7 @@ extern class Cell extends HeapObject { value: Object; } +@abstract extern class DataHandler extends Struct { smi_handler: Smi | Code; validity_cell: Smi | Cell; @@ -996,6 +1113,9 @@ extern class DataHandler extends Struct { @noVerifier weak data_3: Object; } +extern class LoadHandler extends DataHandler; +extern class StoreHandler extends DataHandler; + @abstract @dirtyInstantiatedAbstractClass @generateCppClass @@ -1087,7 +1207,7 @@ extern class ClassPositions extends Struct { end: Smi; } -type WasmInstanceObject extends JSObject; +extern class WasmInstanceObject extends JSObject; extern class WasmExportedFunctionData extends Struct { wrapper_code: Code; @@ -1129,6 +1249,7 @@ extern class WasmIndirectFunctionTable extends Struct { extern class WasmDebugInfo extends Struct { instance: WasmInstanceObject; interpreter_handle: Foreign | Undefined; + interpreter_reference_stack: Cell; locals_names: FixedArray | Undefined; c_wasm_entries: FixedArray | Undefined; c_wasm_entry_map: Foreign | Undefined; // Managed<wasm::SignatureMap> @@ -1305,9 +1426,6 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate const kString: constexpr PrimitiveType generates 'PrimitiveType::kString'; -const kExternalPointerForOnHeapArray: constexpr RawPtr - generates 'JSTypedArray::ExternalPointerForOnHeapArray()'; - const kNameDictionaryInitialCapacity: constexpr int32 generates 'NameDictionary::kInitialCapacity'; @@ -1332,6 +1450,7 @@ extern macro EmptyStringConstant(): EmptyString; extern macro LengthStringConstant(): String; extern macro NanConstant(): NaN; extern macro IteratorSymbolConstant(): Symbol; +extern macro MatchSymbolConstant(): Symbol; const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); @@ -1443,15 +1562,30 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void; @hasSameInstanceTypeAsParent extern class JSRegExpResult extends JSArray { + // In-object properties: + // The below fields are externally exposed. index: JSAny; input: JSAny; groups: JSAny; + + // The below fields are for internal use only. + cached_indices_or_match_info: JSRegExpResultIndices | RegExpMatchInfo; + names: FixedArray | Undefined; } +@hasSameInstanceTypeAsParent +extern class JSRegExpResultIndices extends JSArray { + // In-object properties: + // The groups field is externally exposed. + groups: JSAny; +} + +transient type FastJSRegExpResult extends JSRegExpResult; + @generateCppClass extern class JSRegExpStringIterator extends JSObject { // The [[IteratingRegExp]] internal property. - iterating_reg_exp: JSAny; + iterating_reg_exp: JSReceiver; // The [[IteratedString]] internal property. iterated_string: String; flags: Smi; @@ -1493,21 +1627,33 @@ extern class AccessorInfo extends Struct { data: Object; } +@generateCppClass extern class AccessorPair extends Struct { getter: Object; setter: Object; } -extern class BreakPoint extends Tuple2 {} -extern class BreakPointInfo extends Tuple2 {} +@hasSameInstanceTypeAsParent +extern class BreakPoint extends Tuple2 { +} +@hasSameInstanceTypeAsParent +extern class BreakPointInfo extends Tuple2 { +} type CoverageInfo extends FixedArray; +@generateCppClass extern class DebugInfo extends Struct { - shared_function_info: SharedFunctionInfo; + shared: SharedFunctionInfo; debugger_hints: Smi; + // Script field from shared function info. script: Undefined | Script; + // The original uninstrumented bytecode array for functions with break + // points - the instrumented bytecode is held in the shared function info. original_bytecode_array: Undefined | BytecodeArray; + // The debug instrumented bytecode array for functions with break points + // - also pointed to by the shared function info. debug_bytecode_array: Undefined | BytecodeArray; + // Fixed array holding status information for each active break point. break_points: FixedArray; flags: Smi; coverage_info: CoverageInfo | Undefined; @@ -1527,12 +1673,15 @@ extern class FeedbackVector extends HeapObject { padding: uint32; } +@generateCppClass extern class FeedbackCell extends Struct { value: Undefined | FeedbackVector | FixedArray; interrupt_budget: int32; } -type AllocationSite extends Struct; +extern class FeedbackMetadata extends HeapObject; + +extern class AllocationSite extends Struct; extern class AllocationMemento extends Struct { allocation_site: AllocationSite; } @@ -1541,9 +1690,7 @@ extern class WasmModuleObject extends JSObject { native_module: Foreign; export_wrappers: FixedArray; script: Script; - weak_instance_list: WeakArrayList; asm_js_offset_table: ByteArray | Undefined; - break_point_infos: FixedArray | Undefined; } extern class WasmTableObject extends JSObject { @@ -1590,22 +1737,35 @@ extern class JSFinalizationGroup extends JSObject { flags: Smi; } +@generateCppClass extern class JSFinalizationGroupCleanupIterator extends JSObject { finalization_group: JSFinalizationGroup; } +@generateCppClass extern class WeakCell extends HeapObject { finalization_group: Undefined | JSFinalizationGroup; target: Undefined | JSReceiver; holdings: Object; + + // For storing doubly linked lists of WeakCells in JSFinalizationGroup's + // "active_cells" and "cleared_cells" lists. prev: Undefined | WeakCell; next: Undefined | WeakCell; + + // For storing doubly linked lists of WeakCells per key in + // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its + // key, so that we can remove the key from the key_map when we remove the last + // WeakCell associated with it. key: Object; key_list_prev: Undefined | WeakCell; key_list_next: Undefined | WeakCell; } -extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; } +@generateCppClass +extern class JSWeakRef extends JSObject { + target: Undefined | JSReceiver; +} extern class BytecodeArray extends FixedArrayBase { // TODO(v8:8983): bytecode array object sizes vary based on their contents. @@ -1620,6 +1780,29 @@ extern class BytecodeArray extends FixedArrayBase { bytecode_age: int8; } +extern class Filler extends HeapObject generates 'TNode<HeapObject>'; +extern class CodeDataContainer extends HeapObject; +@abstract +extern class SmallOrderedHashTable extends HeapObject + generates 'TNode<HeapObject>'; +extern class SmallOrderedHashMap extends SmallOrderedHashTable; +extern class SmallOrderedHashSet extends SmallOrderedHashTable; +extern class SmallOrderedNameDictionary extends SmallOrderedHashTable; + +// Various logical subclasses of JSObject, which have their own instance types +// but not their own class definitions: + +// Like JSObject, but created from API function. +@apiExposedInstanceTypeValue(0x420) +extern class JSApiObject extends JSObject generates 'TNode<JSObject>'; +// Like JSApiObject, but requires access checks and/or has interceptors. +@apiExposedInstanceTypeValue(0x410) +extern class JSSpecialApiObject extends JSSpecialObject + generates 'TNode<JSSpecialObject>'; +extern class JSContextExtensionObject extends JSObject + generates 'TNode<JSObject>'; +extern class JSError extends JSObject generates 'TNode<JSObject>'; + extern macro Is64(): constexpr bool; extern macro SelectBooleanConstant(bool): Boolean; @@ -1657,6 +1840,8 @@ extern transitioning builtin HasProperty(implicit context: Context)( extern transitioning macro HasProperty_Inline(implicit context: Context)( JSReceiver, JSAny): Boolean; extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny; +extern macro CollectCallFeedback( + JSAny, Context, Undefined | FeedbackVector, uintptr); extern macro ThrowRangeError(implicit context: Context)( constexpr MessageTemplate): never; @@ -1674,6 +1859,10 @@ extern macro ThrowTypeError(implicit context: Context)( constexpr MessageTemplate, Object, Object, Object): never; extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)( Smi, Object, Object): void; +extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)( + JSAny): never; +extern transitioning runtime ThrowSymbolIteratorInvalid( + implicit context: Context)(): never; extern transitioning macro ThrowIfNotJSReceiver(implicit context: Context)( JSAny, constexpr MessageTemplate, constexpr string): void; @@ -2232,6 +2421,14 @@ Cast<JSStringIterator>(o: HeapObject): JSStringIterator return HeapObjectToJSStringIterator(o) otherwise CastError; } +Cast<JSRegExpStringIterator>(o: HeapObject): JSRegExpStringIterator + labels CastError { + if (IsJSRegExpStringIterator(o)) { + return %RawDownCast<JSRegExpStringIterator>(o); + } + goto CastError; +} + Cast<JSTypedArray>(o: HeapObject): JSTypedArray labels CastError { if (IsJSTypedArray(o)) return %RawDownCast<JSTypedArray>(o); @@ -2354,12 +2551,25 @@ Cast<JSRegExp>(o: HeapObject): JSRegExp goto CastError; } +Cast<FastJSRegExpResult>(implicit context: Context)(o: HeapObject): + FastJSRegExpResult + labels CastError { + if (regexp::IsFastRegExpResult(o)) return %RawDownCast<FastJSRegExpResult>(o); + goto CastError; +} + Cast<Map>(implicit context: Context)(o: HeapObject): Map labels CastError { if (IsMap(o)) return %RawDownCast<Map>(o); goto CastError; } +Cast<FeedbackVector>(implicit context: Context)(o: HeapObject): FeedbackVector + labels CastError { + if (IsFeedbackVector(o)) return %RawDownCast<FeedbackVector>(o); + goto CastError; +} + Cast<JSPrimitiveWrapper>(o: HeapObject): JSPrimitiveWrapper labels CastError { if (IsJSPrimitiveWrapper(o)) return %RawDownCast<JSPrimitiveWrapper>(o); @@ -2513,6 +2723,7 @@ extern macro Signed(RawPtr): intptr; extern macro TruncateIntPtrToInt32(intptr): int32; extern macro SmiTag(intptr): Smi; extern macro SmiFromInt32(int32): Smi; +extern macro SmiFromUint32(uint32): Smi; extern macro SmiUntag(Smi): intptr; extern macro SmiToInt32(Smi): int32; extern macro RoundIntPtrToFloat64(intptr): float64; @@ -2556,6 +2767,7 @@ extern macro BitcastWordToTaggedSigned(uintptr): Smi; extern macro BitcastWordToTagged(intptr): Object; extern macro BitcastWordToTagged(uintptr): Object; extern macro BitcastTaggedToWord(Tagged): intptr; +extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr; intrinsic %FromConstexpr<To: type, From: type>(b: From): To; macro FromConstexpr<To: type, From: type>(o: From): To; @@ -2674,7 +2886,7 @@ Convert<Number, uint32>(ui: uint32): Number { return ChangeUint32ToTagged(ui); } Convert<Smi, uint32>(ui: uint32): Smi { - return SmiFromInt32(Signed(ui)); + return SmiFromUint32(ui); } Convert<uintptr, uint32>(ui: uint32): uintptr { return ChangeUint32ToWord(ui); @@ -2811,8 +3023,7 @@ extern macro IsMockArrayBufferAllocatorFlag(): bool; extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map): bool; -extern operator '.data_ptr' macro LoadJSTypedArrayBackingStore(JSTypedArray): - RawPtr; +extern operator '.data_ptr' macro LoadJSTypedArrayDataPtr(JSTypedArray): RawPtr; extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind; extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray): @@ -2879,8 +3090,6 @@ extern macro LoadConstructorOrBackPointer(Map): Object; extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): JSAny labels NotData, IfHole; -extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, JSAny) - labels NotData, IfHole, ReadOnly; extern macro IsFastElementsKind(ElementsKind): bool; extern macro IsDoubleElementsKind(ElementsKind): bool; @@ -3255,9 +3464,11 @@ extern macro PerformStackCheck(implicit context: Context)(): void; extern macro IsCallable(HeapObject): bool; extern macro IsConstructor(HeapObject): bool; +extern macro IsFeedbackVector(HeapObject): bool; extern macro IsJSArray(HeapObject): bool; extern macro IsJSProxy(HeapObject): bool; extern macro IsJSRegExp(HeapObject): bool; +extern macro IsJSRegExpStringIterator(HeapObject): bool; extern macro IsMap(HeapObject): bool; extern macro IsJSFunction(HeapObject): bool; extern macro IsJSObject(HeapObject): bool; diff --git a/chromium/v8/src/builtins/bigint.tq b/chromium/v8/src/builtins/bigint.tq index a1b1cb67809..f0409ad23df 100644 --- a/chromium/v8/src/builtins/bigint.tq +++ b/chromium/v8/src/builtins/bigint.tq @@ -7,7 +7,8 @@ // TODO(nicohartmann): Discuss whether types used by multiple builtins should be // in global namespace @noVerifier -extern class BigIntBase extends HeapObject generates 'TNode<BigInt>' { +extern class BigIntBase extends PrimitiveHeapObject + generates 'TNode<BigInt>' { } type BigInt extends BigIntBase; @@ -44,9 +45,6 @@ namespace bigint { MutableBigInt, intptr, uintptr): void; extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr; - @export // Silence unused warning. - // TODO(szuend): Remove @export once macros that are only used in - // asserts are no longer detected as unused. macro IsCanonicalized(bigint: BigIntBase): bool { const length = ReadBigIntLength(bigint); diff --git a/chromium/v8/src/builtins/builtins-arguments-gen.cc b/chromium/v8/src/builtins/builtins-arguments-gen.cc index c4399175e98..fb6169adf86 100644 --- a/chromium/v8/src/builtins/builtins-arguments-gen.cc +++ b/chromium/v8/src/builtins/builtins-arguments-gen.cc @@ -17,38 +17,34 @@ namespace v8 { namespace internal { -using Node = compiler::Node; - -std::tuple<Node*, Node*, Node*> -ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, - Node* arguments_count, - Node* parameter_map_count, - ParameterMode mode, - int base_size) { +ArgumentsBuiltinsAssembler::ArgumentsAllocationResult +ArgumentsBuiltinsAssembler::AllocateArgumentsObject( + TNode<Map> map, TNode<BInt> arguments_count, + TNode<BInt> parameter_map_count, int base_size) { // Allocate the parameter object (either a Rest parameter object, a strict // argument object or a sloppy arguments object) and the elements/mapped // arguments together. int elements_offset = base_size; - Node* element_count = arguments_count; + TNode<BInt> element_count = arguments_count; if (parameter_map_count != nullptr) { base_size += FixedArray::kHeaderSize; - element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode); + element_count = IntPtrOrSmiAdd(element_count, parameter_map_count); } - bool empty = IsIntPtrOrSmiConstantZero(arguments_count, mode); + bool empty = IsIntPtrOrSmiConstantZero(arguments_count); DCHECK_IMPLIES(empty, parameter_map_count == nullptr); TNode<IntPtrT> size = empty ? IntPtrConstant(base_size) - : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode, + : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, base_size + FixedArray::kHeaderSize); TNode<HeapObject> result = Allocate(size); Comment("Initialize arguments object"); StoreMapNoWriteBarrier(result, map); TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant(); StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array); - TNode<Smi> smi_arguments_count = ParameterToTagged(arguments_count, mode); + TNode<Smi> smi_arguments_count = BIntToSmi(arguments_count); StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, smi_arguments_count); - Node* arguments = nullptr; + TNode<HeapObject> arguments; if (!empty) { arguments = InnerAllocate(result, elements_offset); StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset, @@ -56,18 +52,17 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, TNode<Map> fixed_array_map = FixedArrayMapConstant(); StoreMapNoWriteBarrier(arguments, fixed_array_map); } - Node* parameter_map = nullptr; - if (parameter_map_count != nullptr) { + TNode<HeapObject> parameter_map; + if (!parameter_map_count.is_null()) { TNode<IntPtrT> parameter_map_offset = ElementOffsetFromIndex( - arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize); - parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset); + arguments_count, PACKED_ELEMENTS, FixedArray::kHeaderSize); + parameter_map = InnerAllocate(arguments, parameter_map_offset); StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, parameter_map); TNode<Map> sloppy_elements_map = SloppyArgumentsElementsMapConstant(); StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map); - parameter_map_count = ParameterToTagged(parameter_map_count, mode); StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset, - parameter_map_count); + BIntToSmi(parameter_map_count)); } else { if (empty) { StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, @@ -77,80 +72,73 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, arguments); } } - return std::tuple<Node*, Node*, Node*>(result, arguments, parameter_map); + return {CAST(result), UncheckedCast<FixedArray>(arguments), + UncheckedCast<FixedArray>(parameter_map)}; } -Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs( - Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg, - Node* rest_count, ParameterMode param_mode, int base_size) { +TNode<JSObject> ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs( + TNode<Map> map, TNode<RawPtrT> frame_ptr, TNode<BInt> arg_count, + TNode<BInt> first_arg, TNode<BInt> rest_count, int base_size) { // Allocate the parameter object (either a Rest parameter object, a strict // argument object or a sloppy arguments object) and the elements together and // fill in the contents with the arguments above |formal_parameter_count|. - Node* result; - Node* elements; - Node* unused; - std::tie(result, elements, unused) = - AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size); - DCHECK_NULL(unused); - CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode); - VARIABLE(offset, MachineType::PointerRepresentation()); - offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag)); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, rest_count, {}, base_size); + DCHECK(alloc_result.parameter_map.is_null()); + CodeStubArguments arguments(this, arg_count, frame_ptr); + TVARIABLE(IntPtrT, offset, + IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag)); VariableList list({&offset}, zone()); arguments.ForEach( list, - [this, elements, &offset](Node* arg) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, - offset.value(), arg); + [&](TNode<Object> arg) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset.value(), arg); Increment(&offset, kTaggedSize); }, - first_arg, nullptr, param_mode); - return result; + first_arg); + return alloc_result.arguments_object; } -Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context, - Node* function) { +TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewRestParameter( + TNode<Context> context, TNode<JSFunction> function) { ParameterMode mode = OptimalParameterMode(); - Node* zero = IntPtrOrSmiConstant(0, mode); + TNode<BInt> zero = BIntConstant(0); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast<JSFunction>(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(JSObject, result); Label no_rest_parameters(this), runtime(this, Label::kDeferred), done(this, &result); - Node* rest_count = - IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode); + TNode<BInt> rest_count = + IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count); TNode<NativeContext> const native_context = LoadNativeContext(context); TNode<Map> const array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode), - &no_rest_parameters); + GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero), &no_rest_parameters); GotoIfFixedArraySizeDoesntFitInNewSpace( rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode); // Allocate the Rest JSArray and the elements together and fill in the // contents with the arguments above |formal_parameter_count|. - result.Bind(ConstructParametersObjectFromArgs( + result = ConstructParametersObjectFromArgs( array_map, info.frame, info.argument_count, info.formal_parameter_count, - rest_count, mode, JSArray::kSize)); + rest_count, JSArray::kSize); Goto(&done); BIND(&no_rest_parameters); { - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = - AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize); - result.Bind(arguments); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(array_map, zero, {}, JSArray::kSize); + result = alloc_result.arguments_object; Goto(&done); } BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function)); + result = CAST(CallRuntime(Runtime::kNewRestParameter, context, function)); Goto(&done); } @@ -158,45 +146,41 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context, return result.value(); } -Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, - Node* function) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments( + TNode<Context> context, TNode<JSFunction> function) { + TVARIABLE(JSObject, result); Label done(this, &result), empty(this), runtime(this, Label::kDeferred); ParameterMode mode = OptimalParameterMode(); TNode<BInt> zero = BIntConstant(0); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast<JSFunction>(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); GotoIfFixedArraySizeDoesntFitInNewSpace( info.argument_count, &runtime, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode); TNode<NativeContext> const native_context = LoadNativeContext(context); - TNode<Object> const map = - LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX); + TNode<Map> map = CAST( + LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX)); GotoIf(BIntEqual(info.argument_count, zero), &empty); - result.Bind(ConstructParametersObjectFromArgs( - map, info.frame, info.argument_count, zero, info.argument_count, mode, - JSStrictArgumentsObject::kSize)); + result = ConstructParametersObjectFromArgs( + map, info.frame, info.argument_count, zero, info.argument_count, + JSStrictArgumentsObject::kSize); Goto(&done); BIND(&empty); { - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = AllocateArgumentsObject( - map, zero, nullptr, mode, JSStrictArgumentsObject::kSize); - result.Bind(arguments); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, zero, {}, JSStrictArgumentsObject::kSize); + result = alloc_result.arguments_object; Goto(&done); } BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function)); + result = CAST(CallRuntime(Runtime::kNewStrictArguments, context, function)); Goto(&done); } @@ -204,9 +188,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, return result.value(); } -Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, - Node* function) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode<JSObject> ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments( + TNode<Context> context, TNode<JSFunction> function) { + TVARIABLE(JSObject, result); ParameterMode mode = OptimalParameterMode(); TNode<BInt> zero = BIntConstant(0); @@ -214,8 +198,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, Label done(this, &result), empty(this), no_parameters(this), runtime(this, Label::kDeferred); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast<JSFunction>(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); GotoIf(BIntEqual(info.argument_count, zero), &empty); @@ -224,54 +207,55 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, { Comment("Mapped parameter JSSloppyArgumentsObject"); - Node* mapped_count = - IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count, mode); + TNode<BInt> mapped_count = + IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count); - Node* parameter_map_size = - IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode); + TNode<BInt> parameter_map_size = + IntPtrOrSmiAdd(mapped_count, BIntConstant(2)); // Verify that the overall allocation will fit in new space. - Node* elements_allocated = - IntPtrOrSmiAdd(info.argument_count, parameter_map_size, mode); + TNode<BInt> elements_allocated = + IntPtrOrSmiAdd(info.argument_count, parameter_map_size); GotoIfFixedArraySizeDoesntFitInNewSpace( elements_allocated, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode); TNode<NativeContext> const native_context = LoadNativeContext(context); - TNode<Object> const map = LoadContextElement( - native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); - Node* argument_object; - Node* elements; - Node* map_array; - std::tie(argument_object, elements, map_array) = + TNode<Map> const map = CAST(LoadContextElement( + native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)); + ArgumentsAllocationResult alloc_result = AllocateArgumentsObject(map, info.argument_count, parameter_map_size, - mode, JSSloppyArgumentsObject::kSize); - StoreObjectFieldNoWriteBarrier( - argument_object, JSSloppyArgumentsObject::kCalleeOffset, function); - StoreFixedArrayElement(CAST(map_array), 0, context, SKIP_WRITE_BARRIER); - StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER); + JSSloppyArgumentsObject::kSize); + StoreObjectFieldNoWriteBarrier(alloc_result.arguments_object, + JSSloppyArgumentsObject::kCalleeOffset, + function); + StoreFixedArrayElement(alloc_result.parameter_map, 0, context, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(alloc_result.parameter_map, 1, alloc_result.elements, + SKIP_WRITE_BARRIER); Comment("Fill in non-mapped parameters"); TNode<IntPtrT> argument_offset = - ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode, + ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag); TNode<IntPtrT> mapped_offset = - ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode, + ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag); - CodeStubArguments arguments(this, info.argument_count, info.frame, mode); - VARIABLE(current_argument, MachineType::PointerRepresentation()); - current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode)); + CodeStubArguments arguments(this, info.argument_count, info.frame); + TVARIABLE(RawPtrT, current_argument, + arguments.AtIndexPtr(info.argument_count)); VariableList var_list1({¤t_argument}, zone()); - mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop( + mapped_offset = BuildFastLoop<IntPtrT>( var_list1, argument_offset, mapped_offset, - [this, elements, ¤t_argument](Node* offset) { + [&](TNode<IntPtrT> offset) { Increment(¤t_argument, kSystemPointerSize); TNode<Object> arg = LoadBufferObject( - UncheckedCast<RawPtrT>(current_argument.value()), 0); - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, - arg); + ReinterpretCast<RawPtrT>(current_argument.value()), 0); + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset, arg); + return; }, - -kTaggedSize, INTPTR_PARAMETERS)); + -kTaggedSize); // Copy the parameter slots and the holes in the arguments. // We need to fill in mapped_count slots. They index the context, @@ -282,32 +266,32 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, // MIN_CONTEXT_SLOTS+argument_count-mapped_count // We loop from right to left. Comment("Fill in mapped parameters"); - VARIABLE(context_index, OptimalParameterRepresentation()); - context_index.Bind(IntPtrOrSmiSub( - IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode), - info.formal_parameter_count, mode), - mapped_count, mode)); + TVARIABLE( + BInt, context_index, + IntPtrOrSmiSub(IntPtrOrSmiAdd(BIntConstant(Context::MIN_CONTEXT_SLOTS), + info.formal_parameter_count), + mapped_count)); TNode<Oddball> the_hole = TheHoleConstant(); VariableList var_list2({&context_index}, zone()); const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2); TNode<IntPtrT> adjusted_map_array = IntPtrAdd( - BitcastTaggedToWord(map_array), + BitcastTaggedToWord(alloc_result.parameter_map), IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize)); TNode<IntPtrT> zero_offset = ElementOffsetFromIndex( zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); - BuildFastLoop( + BuildFastLoop<IntPtrT>( var_list2, mapped_offset, zero_offset, - [=, &context_index](Node* offset) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, - the_hole); + [&](TNode<IntPtrT> offset) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset, the_hole); StoreNoWriteBarrier(MachineRepresentation::kTagged, adjusted_map_array, offset, - ParameterToTagged(context_index.value(), mode)); - Increment(&context_index, 1, mode); + BIntToSmi(context_index.value())); + Increment(&context_index); }, - -kTaggedSize, INTPTR_PARAMETERS); + -kTaggedSize); - result.Bind(argument_object); + result = alloc_result.arguments_object; Goto(&done); } @@ -318,11 +302,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, info.argument_count, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode); TNode<NativeContext> const native_context = LoadNativeContext(context); - TNode<Object> const map = - LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); - result.Bind(ConstructParametersObjectFromArgs( - map, info.frame, info.argument_count, zero, info.argument_count, mode, - JSSloppyArgumentsObject::kSize)); + TNode<Map> map = CAST(LoadContextElement( + native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + result = ConstructParametersObjectFromArgs( + map, info.frame, info.argument_count, zero, info.argument_count, + JSSloppyArgumentsObject::kSize); StoreObjectFieldNoWriteBarrier( result.value(), JSSloppyArgumentsObject::kCalleeOffset, function); Goto(&done); @@ -332,14 +316,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, { Comment("Empty JSSloppyArgumentsObject"); TNode<NativeContext> const native_context = LoadNativeContext(context); - TNode<Object> const map = - LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = AllocateArgumentsObject( - map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize); - result.Bind(arguments); + TNode<Map> const map = CAST(LoadContextElement( + native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, zero, {}, JSSloppyArgumentsObject::kSize); + result = alloc_result.arguments_object; StoreObjectFieldNoWriteBarrier( result.value(), JSSloppyArgumentsObject::kCalleeOffset, function); Goto(&done); @@ -347,7 +328,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function)); + result = CAST(CallRuntime(Runtime::kNewSloppyArguments, context, function)); Goto(&done); } diff --git a/chromium/v8/src/builtins/builtins-arguments-gen.h b/chromium/v8/src/builtins/builtins-arguments-gen.h index 4eeae4bf866..2565c3e81ff 100644 --- a/chromium/v8/src/builtins/builtins-arguments-gen.h +++ b/chromium/v8/src/builtins/builtins-arguments-gen.h @@ -10,7 +10,7 @@ namespace v8 { namespace internal { -using Node = compiler::Node; +// TODO(v8:9396): these declarations pollute the v8::internal scope. using CodeAssemblerState = compiler::CodeAssemblerState; using CodeAssemblerLabel = compiler::CodeAssemblerLabel; @@ -19,19 +19,25 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler { explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* EmitFastNewStrictArguments(Node* context, Node* function); - Node* EmitFastNewSloppyArguments(Node* context, Node* function); - Node* EmitFastNewRestParameter(Node* context, Node* function); + TNode<JSObject> EmitFastNewStrictArguments(TNode<Context> context, + TNode<JSFunction> function); + TNode<JSObject> EmitFastNewSloppyArguments(TNode<Context> context, + TNode<JSFunction> function); + TNode<JSObject> EmitFastNewRestParameter(TNode<Context> context, + TNode<JSFunction> function); private: + struct ArgumentsAllocationResult { + TNode<JSObject> arguments_object; + TNode<FixedArray> elements; + TNode<FixedArray> parameter_map; + }; // Allocates an an arguments (either rest, strict or sloppy) together with the // FixedArray elements for the arguments and a parameter map (for sloppy - // arguments only). A tuple is returned with pointers to the arguments object, - // the elements and parameter map in the form: - // <argument object, arguments FixedArray, parameter map or nullptr> - std::tuple<Node*, Node*, Node*> AllocateArgumentsObject( - Node* map, Node* arguments, Node* mapped_arguments, - ParameterMode param_mode, int base_size); + // arguments only, or empty TNode<> otherwise). + ArgumentsAllocationResult AllocateArgumentsObject( + TNode<Map> map, TNode<BInt> arguments, TNode<BInt> mapped_arguments, + int base_size); // For Rest parameters and Strict arguments, the copying of parameters from // the stack into the arguments object is straight-forward and shares much of @@ -40,11 +46,9 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler { // and then copies |rest_count| arguments from the stack frame pointed to by // |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| + // |rest_count|. - Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr, - Node* arg_count, Node* first_arg, - Node* rest_count, - ParameterMode param_mode, - int base_size); + TNode<JSObject> ConstructParametersObjectFromArgs( + TNode<Map> map, TNode<RawPtrT> frame_ptr, TNode<BInt> arg_count, + TNode<BInt> first_arg, TNode<BInt> rest_count, int base_size); }; } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-array-gen.cc b/chromium/v8/src/builtins/builtins-array-gen.cc index c7d8eb00912..f176924ae58 100644 --- a/chromium/v8/src/builtins/builtins-array-gen.cc +++ b/chromium/v8/src/builtins/builtins-array-gen.cc @@ -25,10 +25,9 @@ using IteratorRecord = TorqueStructIteratorRecord; ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( compiler::CodeAssemblerState* state) : CodeStubAssembler(state), - k_(this, MachineRepresentation::kTagged), - a_(this, MachineRepresentation::kTagged), - to_(this, MachineRepresentation::kTagged, SmiConstant(0)), - fully_spec_compliant_(this, {&k_, &a_, &to_}) {} + k_(this), + a_(this), + fully_spec_compliant_(this, {&k_, &a_}) {} void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { // 6. Let A be ? TypedArraySpeciesCreate(O, len). @@ -44,14 +43,16 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { LoadJSTypedArrayLength(a))); fast_typed_array_target_ = Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a)); - a_.Bind(a); + a_ = a; } // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. -Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { +TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor( + TNode<Object> k_value, TNode<Object> k) { // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »). - Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(), - callbackfn(), this_arg(), k_value, k, o()); + TNode<Object> mapped_value = + CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(), + k_value, k, o()); Label fast(this), slow(this), done(this), detached(this, Label::kDeferred); // 8. d. Perform ? Set(A, Pk, mapped_value, true). @@ -65,7 +66,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let // numValue be ? ToBigInt(v). // 6. Otherwise, let numValue be ? ToNumber(value). - Node* num_value; + TNode<Object> num_value; if (source_elements_kind_ == BIGINT64_ELEMENTS || source_elements_kind_ == BIGUINT64_ELEMENTS) { num_value = ToBigInt(context(), mapped_value); @@ -78,7 +79,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { Goto(&done); BIND(&slow); - SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value)); + SetPropertyStrict(context(), a(), k, mapped_value); Goto(&done); BIND(&detached); @@ -90,32 +91,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { return a(); } -void ArrayBuiltinsAssembler::NullPostLoopAction() {} - -void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array, - TNode<Smi> smi_length) { - CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array))); - - TNode<IntPtrT> length = SmiToIntPtr(smi_length); - TNode<IntPtrT> byte_length = TimesTaggedSize(length); - CSA_ASSERT(this, UintPtrLessThan(length, byte_length)); - - static const int32_t fa_base_data_offset = - FixedArray::kHeaderSize - kHeapObjectTag; - TNode<IntPtrT> backing_store = IntPtrAdd(BitcastTaggedToWord(array), - IntPtrConstant(fa_base_data_offset)); - - // Call out to memset to perform initialization. - TNode<ExternalReference> memset = - ExternalConstant(ExternalReference::libc_memset_function()); - STATIC_ASSERT(kSizetSize == kIntptrSize); - CallCFunction(memset, MachineType::Pointer(), - std::make_pair(MachineType::Pointer(), backing_store), - std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)), - std::make_pair(MachineType::UintPtr(), byte_length)); -} - -void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { +void ArrayBuiltinsAssembler::ReturnFromBuiltin(TNode<Object> value) { if (argc_ == nullptr) { Return(value); } else { @@ -126,8 +102,8 @@ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { } void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( - TNode<Context> context, TNode<Object> receiver, Node* callbackfn, - Node* this_arg, TNode<IntPtrT> argc) { + TNode<Context> context, TNode<Object> receiver, TNode<Object> callbackfn, + TNode<Object> this_arg, TNode<IntPtrT> argc) { context_ = context; receiver_ = receiver; callbackfn_ = callbackfn; @@ -137,8 +113,7 @@ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( const char* name, const BuiltinResultGenerator& generator, - const CallResultProcessor& processor, const PostLoopAction& action, - ForEachDirection direction) { + const CallResultProcessor& processor, ForEachDirection direction) { name_ = name; // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray @@ -160,7 +135,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( Label throw_not_callable(this, Label::kDeferred); Label distinguish_types(this); GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable); - Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types, + Branch(IsCallableMap(LoadMap(CAST(callbackfn_))), &distinguish_types, &throw_not_callable); BIND(&throw_not_typed_array); @@ -192,9 +167,9 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( generator(this); if (direction == ForEachDirection::kForward) { - k_.Bind(SmiConstant(0)); + k_ = SmiConstant(0); } else { - k_.Bind(NumberDec(len())); + k_ = NumberDec(len()); } CSA_ASSERT(this, IsSafeInteger(k())); TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map); @@ -214,26 +189,18 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( Goto(&done); // No exception, return success BIND(&done); - action(this); ReturnFromBuiltin(a_.value()); } } void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( - Node* array_buffer, const CallResultProcessor& processor, Label* detached, - ForEachDirection direction, TNode<JSTypedArray> typed_array) { - VariableList list({&a_, &k_, &to_}, zone()); - - FastLoopBody body = [&](Node* index) { - GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached); - TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array); - auto value = LoadFixedTypedArrayElementAsTagged( - data_ptr, index, source_elements_kind_, SMI_PARAMETERS); - k_.Bind(index); - a_.Bind(processor(this, value, index)); - }; - Node* start = SmiConstant(0); - Node* end = len_; + TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor, + Label* detached, ForEachDirection direction, + TNode<JSTypedArray> typed_array) { + VariableList list({&a_, &k_}, zone()); + + TNode<Smi> start = SmiConstant(0); + TNode<Smi> end = CAST(len_); IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost; int incr = 1; if (direction == ForEachDirection::kReverse) { @@ -241,54 +208,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( advance_mode = IndexAdvanceMode::kPre; incr = -1; } - BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS, - advance_mode); -} - -// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). -void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) { - Label runtime(this, Label::kDeferred), done(this); - - TNode<Map> const original_map = LoadMap(o()); - GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE), - &runtime); - - GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map), - &runtime); - - TNode<PropertyCell> species_protector = ArraySpeciesProtectorConstant(); - TNode<Object> value = - LoadObjectField(species_protector, PropertyCell::kValueOffset); - TNode<Smi> const protector_invalid = SmiConstant(Isolate::kProtectorInvalid); - GotoIf(TaggedEqual(value, protector_invalid), &runtime); - - GotoIfNot(TaggedIsPositiveSmi(len), &runtime); - GotoIfNot(IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS), - &runtime); - - // We need to be conservative and start with holey because the builtins - // that create output arrays aren't guaranteed to be called for every - // element in the input array (maybe the callback deletes an element). - const ElementsKind elements_kind = - GetHoleyElementsKind(GetInitialFastElementsKind()); - TNode<NativeContext> native_context = LoadNativeContext(context()); - TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context); - a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len), - nullptr, CodeStubAssembler::SMI_PARAMETERS, - kAllowLargeObjectAllocation)); - - Goto(&done); - - BIND(&runtime); - { - // 5. Let A be ? ArraySpeciesCreate(O, len). - TNode<JSReceiver> constructor = - CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o())); - a_.Bind(Construct(context(), constructor, len)); - Goto(&fully_spec_compliant_); - } - - BIND(&done); + BuildFastLoop<Smi>( + list, start, end, + [&](TNode<Smi> index) { + GotoIf(IsDetachedBuffer(array_buffer), detached); + TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array); + TNode<Numeric> value = LoadFixedTypedArrayElementAsTagged( + data_ptr, index, source_elements_kind_, SMI_PARAMETERS); + k_ = index; + a_ = processor(this, value, index); + }, + incr, advance_mode); } TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { @@ -297,7 +227,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { TNode<Context> context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode<Object> receiver = args.GetReceiver(); Label runtime(this, Label::kDeferred); @@ -315,9 +245,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { BIND(&fast); { TNode<JSArray> array_receiver = CAST(receiver); - CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver))); - TNode<IntPtrT> length = - LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset); + TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(array_receiver)); Label return_undefined(this), fast_elements(this); GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined); @@ -394,14 +322,12 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { Label double_transition(this); Label runtime(this, Label::kDeferred); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode<Int32T> argc = UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode<Object> receiver = args.GetReceiver(); TNode<JSArray> array_receiver; TNode<Int32T> kind; @@ -493,9 +419,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { BIND(&default_label); { args.ForEach( - [this, array_receiver, context](Node* arg) { + [=](TNode<Object> arg) { TNode<Number> length = LoadJSArrayLength(array_receiver); - SetPropertyStrict(context, array_receiver, length, CAST(arg)); + SetPropertyStrict(context, array_receiver, length, arg); }, arg_index.value()); args.PopAndReturn(LoadJSArrayLength(array_receiver)); @@ -515,11 +441,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) { ParameterMode mode = OptimalParameterMode(); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* array = Parameter(Descriptor::kSource); + TNode<JSArray> array = CAST(Parameter(Descriptor::kSource)); Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode); Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode); - CSA_ASSERT(this, IsJSArray(array)); CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid())); Return(ExtractFastJSArray(context, array, begin, count, mode)); @@ -555,7 +480,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) { Word32BinaryNot(IsNoElementsProtectorCellInvalid()))); ParameterMode mode = OptimalParameterMode(); - Return(CloneFastJSArray(context, array, mode, nullptr, + Return(CloneFastJSArray(context, array, mode, {}, HoleConversionMode::kConvertToUndefined)); } @@ -584,9 +509,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler { TNode<Map> array_map = CAST(LoadContextElement( context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX)); - array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0), - SmiConstant(0), nullptr, - ParameterMode::SMI_PARAMETERS); + array = + AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0), + SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS); Goto(&done); } @@ -626,7 +551,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { TNode<Int32T> argc = UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode<Object> items = args.GetOptionalArgumentValue(0); TNode<Object> receiver = args.GetReceiver(); @@ -810,8 +735,8 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { GotoIf(IsUndefined(map_function), &next); CSA_ASSERT(this, IsCallable(CAST(map_function))); - value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function, - this_arg, value.value(), index.value())); + value = CallJS(CodeFactory::Call(isolate()), context, map_function, + this_arg, value.value(), index.value()); Goto(&next); BIND(&next); } @@ -846,8 +771,7 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) { GenerateIteratingTypedArrayBuiltinBody( "%TypedArray%.prototype.map", &ArrayBuiltinsAssembler::TypedArrayMapResultGenerator, - &ArrayBuiltinsAssembler::TypedArrayMapProcessor, - &ArrayBuiltinsAssembler::NullPostLoopAction); + &ArrayBuiltinsAssembler::TypedArrayMapProcessor); } TF_BUILTIN(ArrayIsArray, CodeStubAssembler) { @@ -884,15 +808,25 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler { void Generate(SearchVariant variant, TNode<IntPtrT> argc, TNode<Context> context); - void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements, - TNode<Object> search_element, Node* array_length, - Node* from_index); - void GeneratePackedDoubles(SearchVariant variant, Node* elements, - Node* search_element, Node* array_length, - Node* from_index); - void GenerateHoleyDoubles(SearchVariant variant, Node* elements, - Node* search_element, Node* array_length, - Node* from_index); + void GenerateSmiOrObject(SearchVariant variant, TNode<Context> context, + TNode<FixedArray> elements, + TNode<Object> search_element, + TNode<Smi> array_length, TNode<Smi> from_index); + void GeneratePackedDoubles(SearchVariant variant, + TNode<FixedDoubleArray> elements, + TNode<Object> search_element, + TNode<Smi> array_length, TNode<Smi> from_index); + void GenerateHoleyDoubles(SearchVariant variant, + TNode<FixedDoubleArray> elements, + TNode<Object> search_element, + TNode<Smi> array_length, TNode<Smi> from_index); + + void ReturnIfEmpty(TNode<Smi> length, TNode<Object> value) { + Label done(this); + GotoIf(SmiGreaterThan(length, SmiConstant(0)), &done); + Return(value); + BIND(&done); + } }; void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, @@ -916,7 +850,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BranchIfFastJSArrayForRead(receiver, context, &init_index, &call_runtime); BIND(&init_index); - VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero); + TVARIABLE(IntPtrT, index_var, intptr_zero); TNode<JSArray> array = CAST(receiver); // JSArray length is always a positive Smi for fast arrays. @@ -946,14 +880,14 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BIND(&is_smi); { TNode<IntPtrT> intptr_start_from = SmiUntag(CAST(start_from)); - index_var.Bind(intptr_start_from); + index_var = intptr_start_from; GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done); // The fromIndex is negative: add it to the array's length. - index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value())); + index_var = IntPtrAdd(array_length_untagged, index_var.value()); // Clamp negative results at zero. GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done); - index_var.Bind(intptr_zero); + index_var = intptr_zero; Goto(&done); } BIND(&done); @@ -1031,8 +965,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BIND(&call_runtime); { - TNode<Object> start_from = - args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant()); + TNode<Object> start_from = args.GetOptionalArgumentValue(kFromIndexArg); Runtime::FunctionId function = variant == kIncludes ? Runtime::kArrayIncludes_Slow : Runtime::kArrayIndexOf; @@ -1042,8 +975,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, } void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( - SearchVariant variant, Node* context, Node* elements, - TNode<Object> search_element, Node* array_length, Node* from_index) { + SearchVariant variant, TNode<Context> context, TNode<FixedArray> elements, + TNode<Object> search_element, TNode<Smi> array_length, + TNode<Smi> from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TVARIABLE(Float64T, search_num); TNode<IntPtrT> array_length_untagged = SmiUntag(array_length); @@ -1077,7 +1011,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedEqual(element_k, search_element), &return_found); Increment(&index_var); @@ -1090,7 +1024,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(IsUndefined(element_k), &return_found); GotoIf(IsTheHole(element_k), &return_found); @@ -1110,7 +1044,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIfNot(TaggedIsSmi(element_k), ¬_smi); Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))), &return_found, &continue_loop); @@ -1133,7 +1067,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop); BranchIfFloat64IsNaN(LoadHeapNumberValue(CAST(element_k)), &return_found, @@ -1157,7 +1091,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIf(TaggedEqual(search_element_string, element_k), &return_found); TNode<Uint16T> element_k_type = LoadInstanceType(CAST(element_k)); @@ -1186,7 +1120,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( &return_not_found); TNode<Object> element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); Label continue_loop(this); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop); @@ -1213,11 +1147,10 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( } } -void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, - Node* elements, - Node* search_element, - Node* array_length, - Node* from_index) { +void ArrayIncludesIndexofAssembler::GeneratePackedDoubles( + SearchVariant variant, TNode<FixedDoubleArray> elements, + TNode<Object> search_element, TNode<Smi> array_length, + TNode<Smi> from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TNode<IntPtrT> array_length_untagged = SmiUntag(array_length); @@ -1228,13 +1161,13 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, search_num = Float64Constant(0); GotoIfNot(TaggedIsSmi(search_element), &search_notnan); - search_num = SmiToFloat64(search_element); + search_num = SmiToFloat64(CAST(search_element)); Goto(¬_nan_loop); BIND(&search_notnan); - GotoIfNot(IsHeapNumber(search_element), &return_not_found); + GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found); - search_num = LoadHeapNumberValue(search_element); + search_num = LoadHeapNumberValue(CAST(search_element)); Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found; BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop); @@ -1282,11 +1215,10 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, } } -void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant, - Node* elements, - Node* search_element, - Node* array_length, - Node* from_index) { +void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles( + SearchVariant variant, TNode<FixedDoubleArray> elements, + TNode<Object> search_element, TNode<Smi> array_length, + TNode<Smi> from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TNode<IntPtrT> array_length_untagged = SmiUntag(array_length); @@ -1297,16 +1229,16 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant, search_num = Float64Constant(0); GotoIfNot(TaggedIsSmi(search_element), &search_notnan); - search_num = SmiToFloat64(search_element); + search_num = SmiToFloat64(CAST(search_element)); Goto(¬_nan_loop); BIND(&search_notnan); if (variant == kIncludes) { GotoIf(IsUndefined(search_element), &hole_loop); } - GotoIfNot(IsHeapNumber(search_element), &return_not_found); + GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found); - search_num = LoadHeapNumberValue(search_element); + search_num = LoadHeapNumberValue(CAST(search_element)); Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found; BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop); @@ -1387,32 +1319,34 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) { TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) { TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* elements = Parameter(Descriptor::kElements); + TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements)); TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); GenerateSmiOrObject(kIncludes, context, elements, search_element, array_length, from_index); } TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements)); + TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); - GeneratePackedDoubles(kIncludes, elements, search_element, array_length, + ReturnIfEmpty(array_length, FalseConstant()); + GeneratePackedDoubles(kIncludes, CAST(elements), search_element, array_length, from_index); } TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements)); + TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); - GenerateHoleyDoubles(kIncludes, elements, search_element, array_length, + ReturnIfEmpty(array_length, FalseConstant()); + GenerateHoleyDoubles(kIncludes, CAST(elements), search_element, array_length, from_index); } @@ -1426,32 +1360,34 @@ TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) { TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* elements = Parameter(Descriptor::kElements); + TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements)); TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length, from_index); } TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements)); + TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); - GeneratePackedDoubles(kIndexOf, elements, search_element, array_length, + ReturnIfEmpty(array_length, NumberConstant(-1)); + GeneratePackedDoubles(kIndexOf, CAST(elements), search_element, array_length, from_index); } TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements)); + TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex)); - GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length, + ReturnIfEmpty(array_length, NumberConstant(-1)); + GenerateHoleyDoubles(kIndexOf, CAST(elements), search_element, array_length, from_index); } @@ -1484,10 +1420,10 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { const char* method_name = "Array Iterator.prototype.next"; TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* iterator = Parameter(Descriptor::kReceiver); + TNode<Object> maybe_iterator = CAST(Parameter(Descriptor::kReceiver)); - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label allocate_entry_if_needed(this); Label allocate_iterator_result(this); @@ -1497,9 +1433,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { // If O does not have all of the internal slots of an Array Iterator Instance // (22.1.5.3), throw a TypeError exception - ThrowIfNotInstanceType(context, iterator, JS_ARRAY_ITERATOR_TYPE, + ThrowIfNotInstanceType(context, maybe_iterator, JS_ARRAY_ITERATOR_TYPE, method_name); + TNode<JSArrayIterator> iterator = CAST(maybe_iterator); + // Let a be O.[[IteratedObject]]. TNode<JSReceiver> array = CAST(LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset)); @@ -1531,8 +1469,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { iterator, JSArrayIterator::kNextIndexOffset, ChangeUint32ToTagged(Unsigned(Int32Add(index32, Int32Constant(1))))); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1543,9 +1481,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { TNode<Int32T> elements_kind = LoadMapElementsKind(array_map); TNode<FixedArrayBase> elements = LoadElements(CAST(array)); GotoIfForceSlowPath(&if_generic); - var_value.Bind(LoadFixedArrayBaseElementAsTagged( + var_value = LoadFixedArrayBaseElementAsTagged( elements, Signed(ChangeUint32ToWord(index32)), elements_kind, - &if_generic, &if_hole)); + &if_generic, &if_hole); Goto(&allocate_entry_if_needed); BIND(&if_hole); @@ -1553,7 +1491,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic); GotoIfNot(IsPrototypeInitialArrayPrototype(context, array_map), &if_generic); - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); Goto(&allocate_entry_if_needed); } } @@ -1572,8 +1510,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset, NumberInc(index)); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; Branch(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1609,7 +1547,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { BIND(&if_generic); { - var_value.Bind(GetProperty(context, array, index)); + var_value = GetProperty(context, array, index); Goto(&allocate_entry_if_needed); } @@ -1632,8 +1570,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset, SmiInc(CAST(index))); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1641,9 +1579,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { &allocate_iterator_result); TNode<Int32T> elements_kind = LoadMapElementsKind(array_map); - TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(CAST(array)); - var_value.Bind(LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index), - elements_kind)); + TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(array)); + var_value = LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index), + elements_kind); Goto(&allocate_entry_if_needed); } @@ -1654,7 +1592,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { Int32Constant(static_cast<int>(IterationKind::kValues))), &allocate_iterator_result); - Node* result = + TNode<JSObject> result = AllocateJSIteratorResultForEntry(context, index, var_value.value()); Return(result); } @@ -1673,29 +1611,28 @@ class ArrayFlattenAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray - Node* FlattenIntoArray(Node* context, Node* target, Node* source, - Node* source_length, Node* start, Node* depth, - Node* mapper_function = nullptr, - Node* this_arg = nullptr) { - CSA_ASSERT(this, IsJSReceiver(target)); - CSA_ASSERT(this, IsJSReceiver(source)); + TNode<Number> FlattenIntoArray( + TNode<Context> context, TNode<JSReceiver> target, + TNode<JSReceiver> source, TNode<Number> source_length, + TNode<Number> start, TNode<Number> depth, + base::Optional<TNode<HeapObject>> mapper_function = base::nullopt, + base::Optional<TNode<Object>> this_arg = base::nullopt) { CSA_ASSERT(this, IsNumberPositive(source_length)); CSA_ASSERT(this, IsNumberPositive(start)); - CSA_ASSERT(this, IsNumber(depth)); // 1. Let targetIndex be start. - VARIABLE(var_target_index, MachineRepresentation::kTagged, start); + TVARIABLE(Number, var_target_index, start); // 2. Let sourceIndex be 0. - VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0)); + TVARIABLE(Number, var_source_index, SmiConstant(0)); // 3. Repeat... Label loop(this, {&var_target_index, &var_source_index}), done_loop(this); Goto(&loop); BIND(&loop); { - Node* const source_index = var_source_index.value(); - Node* const target_index = var_target_index.value(); + TNode<Number> source_index = var_source_index.value(); + TNode<Number> target_index = var_target_index.value(); // ...while sourceIndex < sourceLen GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop); @@ -1716,16 +1653,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler { GetProperty(context, source, source_index); // ii. If mapperFunction is present, then - if (mapper_function != nullptr) { - CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function), - IsCallable(mapper_function))); - DCHECK_NOT_NULL(this_arg); + if (mapper_function) { + CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function.value()), + IsCallable(mapper_function.value()))); + DCHECK(this_arg.has_value()); // 1. Set element to ? Call(mapperFunction, thisArg , « element, // sourceIndex, source »). - element_maybe_smi = CAST( - CallJS(CodeFactory::Call(isolate()), context, mapper_function, - this_arg, element_maybe_smi, source_index, source)); + element_maybe_smi = CallJS(CodeFactory::Call(isolate()), context, + mapper_function.value(), this_arg.value(), + element_maybe_smi, source_index, source); } // iii. Let shouldFlatten be false. @@ -1752,7 +1689,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // 2. Set targetIndex to ? FlattenIntoArray(target, element, // elementLen, targetIndex, // depth - 1). - var_target_index.Bind( + var_target_index = CAST( CallBuiltin(Builtins::kFlattenIntoArray, context, target, element, element_length, target_index, NumberDec(depth))); Goto(&next); @@ -1769,7 +1706,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // 2. Set targetIndex to ? FlattenIntoArray(target, element, // elementLen, targetIndex, // depth - 1). - var_target_index.Bind( + var_target_index = CAST( CallBuiltin(Builtins::kFlattenIntoArray, context, target, element, element_length, target_index, NumberDec(depth))); Goto(&next); @@ -1789,7 +1726,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { target_index, element); // 3. Increase targetIndex by 1. - var_target_index.Bind(NumberInc(target_index)); + var_target_index = NumberInc(target_index); Goto(&next); BIND(&throw_error); @@ -1800,7 +1737,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { BIND(&next); // d. Increase sourceIndex by 1. - var_source_index.Bind(NumberInc(source_index)); + var_source_index = NumberInc(source_index); Goto(&loop); } @@ -1811,16 +1748,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const target = Parameter(Descriptor::kTarget); - Node* const source = Parameter(Descriptor::kSource); - Node* const source_length = Parameter(Descriptor::kSourceLength); - Node* const start = Parameter(Descriptor::kStart); - Node* const depth = Parameter(Descriptor::kDepth); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget)); + TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource)); + TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength)); + TNode<Number> start = CAST(Parameter(Descriptor::kStart)); + TNode<Number> depth = CAST(Parameter(Descriptor::kDepth)); // FlattenIntoArray might get called recursively, check stack for overflow // manually as it has stub linkage. - PerformStackCheck(CAST(context)); + PerformStackCheck(context); Return( FlattenIntoArray(context, target, source, source_length, start, depth)); @@ -1828,14 +1765,15 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) { // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const target = Parameter(Descriptor::kTarget); - Node* const source = Parameter(Descriptor::kSource); - Node* const source_length = Parameter(Descriptor::kSourceLength); - Node* const start = Parameter(Descriptor::kStart); - Node* const depth = Parameter(Descriptor::kDepth); - Node* const mapper_function = Parameter(Descriptor::kMapperFunction); - Node* const this_arg = Parameter(Descriptor::kThisArg); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget)); + TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource)); + TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength)); + TNode<Number> start = CAST(Parameter(Descriptor::kStart)); + TNode<Number> depth = CAST(Parameter(Descriptor::kDepth)); + TNode<HeapObject> mapper_function = + CAST(Parameter(Descriptor::kMapperFunction)); + TNode<Object> this_arg = CAST(Parameter(Descriptor::kThisArg)); Return(FlattenIntoArray(context, target, source, source_length, start, depth, mapper_function, this_arg)); @@ -2127,8 +2065,9 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) { } void ArrayBuiltinsAssembler::GenerateConstructor( - Node* context, Node* array_function, Node* array_map, Node* array_size, - Node* allocation_site, ElementsKind elements_kind, + TNode<Context> context, TNode<HeapObject> array_function, + TNode<Map> array_map, TNode<Object> array_size, + TNode<HeapObject> allocation_site, ElementsKind elements_kind, AllocationSiteMode mode) { Label ok(this); Label smi_size(this); @@ -2138,33 +2077,37 @@ void ArrayBuiltinsAssembler::GenerateConstructor( Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime); BIND(&smi_size); - - if (IsFastPackedElementsKind(elements_kind)) { - Label abort(this, Label::kDeferred); - Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort); - - BIND(&abort); - TNode<Smi> reason = - SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray); - TailCallRuntime(Runtime::kAbort, context, reason); - } else { - int element_size = - IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize; - int max_fast_elements = - (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize - - AllocationMemento::kSize) / - element_size; - Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)), - &call_runtime, &small_smi_size); - } - - BIND(&small_smi_size); { - TNode<JSArray> array = AllocateJSArray( - elements_kind, CAST(array_map), array_size, CAST(array_size), - mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site, - CodeStubAssembler::SMI_PARAMETERS); - Return(array); + TNode<Smi> array_size_smi = CAST(array_size); + + if (IsFastPackedElementsKind(elements_kind)) { + Label abort(this, Label::kDeferred); + Branch(SmiEqual(array_size_smi, SmiConstant(0)), &small_smi_size, &abort); + + BIND(&abort); + TNode<Smi> reason = + SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray); + TailCallRuntime(Runtime::kAbort, context, reason); + } else { + int element_size = + IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize; + int max_fast_elements = + (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - + JSArray::kSize - AllocationMemento::kSize) / + element_size; + Branch(SmiAboveOrEqual(array_size_smi, SmiConstant(max_fast_elements)), + &call_runtime, &small_smi_size); + } + + BIND(&small_smi_size); + { + TNode<JSArray> array = AllocateJSArray( + elements_kind, array_map, array_size_smi, array_size_smi, + mode == DONT_TRACK_ALLOCATION_SITE ? TNode<AllocationSite>() + : CAST(allocation_site), + CodeStubAssembler::SMI_PARAMETERS); + Return(array); + } } BIND(&call_runtime); @@ -2181,8 +2124,9 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor( Parameter(Descriptor::kFunction), JSFunction::kContextOffset)); bool track_allocation_site = AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES; - Node* allocation_site = - track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr; + TNode<AllocationSite> allocation_site = + track_allocation_site ? CAST(Parameter(Descriptor::kAllocationSite)) + : TNode<AllocationSite>(); TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context); TNode<JSArray> array = AllocateJSArray( kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements), @@ -2194,7 +2138,7 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor( ElementsKind kind, AllocationSiteOverrideMode mode) { using Descriptor = ArraySingleArgumentConstructorDescriptor; TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* function = Parameter(Descriptor::kFunction); + TNode<HeapObject> function = CAST(Parameter(Descriptor::kFunction)); TNode<NativeContext> native_context = CAST(LoadObjectField(function, JSFunction::kContextOffset)); TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context); @@ -2206,8 +2150,11 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor( : DONT_TRACK_ALLOCATION_SITE; } - Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter); - Node* allocation_site = Parameter(Descriptor::kAllocationSite); + TNode<Object> array_size = + CAST(Parameter(Descriptor::kArraySizeSmiParameter)); + // allocation_site can be Undefined or an AllocationSite + TNode<HeapObject> allocation_site = + CAST(Parameter(Descriptor::kAllocationSite)); GenerateConstructor(context, function, array_map, array_size, allocation_site, kind, allocation_site_mode); @@ -2219,7 +2166,7 @@ void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor( // Replace incoming JS receiver argument with the target. // TODO(ishell): Avoid replacing the target on the stack and just add it // as another additional parameter for Runtime::kNewArray. - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); args.SetReceiver(target); // Adjust arguments count for the runtime call: +1 for implicit receiver diff --git a/chromium/v8/src/builtins/builtins-array-gen.h b/chromium/v8/src/builtins/builtins-array-gen.h index 6b8c704038f..a19ba1a5da1 100644 --- a/chromium/v8/src/builtins/builtins-array-gen.h +++ b/chromium/v8/src/builtins/builtins-array-gen.h @@ -17,51 +17,13 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { using BuiltinResultGenerator = std::function<void(ArrayBuiltinsAssembler* masm)>; - using CallResultProcessor = std::function<Node*(ArrayBuiltinsAssembler* masm, - Node* k_value, Node* k)>; - - using PostLoopAction = std::function<void(ArrayBuiltinsAssembler* masm)>; - - void FindResultGenerator(); - - Node* FindProcessor(Node* k_value, Node* k); - - void FindIndexResultGenerator(); - - Node* FindIndexProcessor(Node* k_value, Node* k); - - void ForEachResultGenerator(); - - Node* ForEachProcessor(Node* k_value, Node* k); - - void SomeResultGenerator(); - - Node* SomeProcessor(Node* k_value, Node* k); - - void EveryResultGenerator(); - - Node* EveryProcessor(Node* k_value, Node* k); - - void ReduceResultGenerator(); - - Node* ReduceProcessor(Node* k_value, Node* k); - - void ReducePostLoopAction(); + using CallResultProcessor = std::function<TNode<Object>( + ArrayBuiltinsAssembler* masm, TNode<Object> k_value, TNode<Object> k)>; void TypedArrayMapResultGenerator(); - Node* SpecCompliantMapProcessor(Node* k_value, Node* k); - - Node* FastMapProcessor(Node* k_value, Node* k); - // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. - Node* TypedArrayMapProcessor(Node* k_value, Node* k); - - void NullPostLoopAction(); - - // Uses memset to effectively initialize the given FixedArray with Smi zeroes. - void FillFixedArrayWithSmiZero(TNode<FixedArray> array, - TNode<Smi> smi_length); + TNode<Object> TypedArrayMapProcessor(TNode<Object> k_value, TNode<Object> k); TNode<String> CallJSArrayArrayJoinConcatToSequentialString( TNode<FixedArray> fixed_array, TNode<IntPtrT> length, TNode<String> sep, @@ -86,20 +48,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode<IntPtrT> argc() { return argc_; } TNode<JSReceiver> o() { return o_; } TNode<Number> len() { return len_; } - Node* callbackfn() { return callbackfn_; } - Node* this_arg() { return this_arg_; } - TNode<Number> k() { return CAST(k_.value()); } - Node* a() { return a_.value(); } + TNode<Object> callbackfn() { return callbackfn_; } + TNode<Object> this_arg() { return this_arg_; } + TNode<Number> k() { return k_.value(); } + TNode<Object> a() { return a_.value(); } - void ReturnFromBuiltin(Node* value); + void ReturnFromBuiltin(TNode<Object> value); void InitIteratingArrayBuiltinBody(TNode<Context> context, - TNode<Object> receiver, Node* callbackfn, - Node* this_arg, TNode<IntPtrT> argc); + TNode<Object> receiver, + TNode<Object> callbackfn, + TNode<Object> this_arg, + TNode<IntPtrT> argc); void GenerateIteratingTypedArrayBuiltinBody( const char* name, const BuiltinResultGenerator& generator, - const CallResultProcessor& processor, const PostLoopAction& action, + const CallResultProcessor& processor, ForEachDirection direction = ForEachDirection::kForward); void TailCallArrayConstructorStub( @@ -107,23 +71,25 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode<JSFunction> target, TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc); - void GenerateDispatchToArrayStub( - TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc, - AllocationSiteOverrideMode mode, - TNode<AllocationSite> allocation_site = TNode<AllocationSite>()); + void GenerateDispatchToArrayStub(TNode<Context> context, + TNode<JSFunction> target, TNode<Int32T> argc, + AllocationSiteOverrideMode mode, + TNode<AllocationSite> allocation_site = {}); void CreateArrayDispatchNoArgument( TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc, AllocationSiteOverrideMode mode, - TNode<AllocationSite> allocation_site = TNode<AllocationSite>()); + TNode<AllocationSite> allocation_site = {}); void CreateArrayDispatchSingleArgument( TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc, AllocationSiteOverrideMode mode, - TNode<AllocationSite> allocation_site = TNode<AllocationSite>()); + TNode<AllocationSite> allocation_site = {}); - void GenerateConstructor(Node* context, Node* array_function, Node* array_map, - Node* array_size, Node* allocation_site, + void GenerateConstructor(TNode<Context> context, + TNode<HeapObject> array_function, + TNode<Map> array_map, TNode<Object> array_size, + TNode<HeapObject> allocation_site, ElementsKind elements_kind, AllocationSiteMode mode); void GenerateArrayNoArgumentConstructor(ElementsKind kind, AllocationSiteOverrideMode mode); @@ -135,33 +101,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode<HeapObject> maybe_allocation_site); private: - static ElementsKind ElementsKindForInstanceType(InstanceType type); - - void VisitAllTypedArrayElements(Node* array_buffer, + void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor, Label* detached, ForEachDirection direction, TNode<JSTypedArray> typed_array); - // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). - // This version is specialized to create a zero length array - // of the elements kind of the input array. - void GenerateArraySpeciesCreate(); - - // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). - void GenerateArraySpeciesCreate(TNode<Number> len); - - Node* callbackfn_ = nullptr; + TNode<Object> callbackfn_; TNode<JSReceiver> o_; - Node* this_arg_ = nullptr; + TNode<Object> this_arg_; TNode<Number> len_; TNode<Context> context_; TNode<Object> receiver_; TNode<IntPtrT> argc_; - Node* fast_typed_array_target_ = nullptr; + TNode<BoolT> fast_typed_array_target_; const char* name_ = nullptr; - Variable k_; - Variable a_; - Variable to_; + TVariable<Number> k_; + TVariable<Object> a_; Label fully_spec_compliant_; ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS; }; diff --git a/chromium/v8/src/builtins/builtins-array.cc b/chromium/v8/src/builtins/builtins-array.cc index 6c3e7246492..8002c069962 100644 --- a/chromium/v8/src/builtins/builtins-array.cc +++ b/chromium/v8/src/builtins/builtins-array.cc @@ -1189,7 +1189,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver, static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) { HandleScope handle_scope(isolate); if (!obj->IsJSReceiver()) return Just(false); - if (!isolate->IsIsConcatSpreadableLookupChainIntact(JSReceiver::cast(*obj))) { + if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate) || + JSReceiver::cast(*obj).HasProxyInPrototype(isolate)) { // Slow path if @@isConcatSpreadable has been used. Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol()); Handle<Object> value; @@ -1258,7 +1259,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species, // dictionary. bool fast_case = is_array_species && (estimate_nof * 2) >= estimate_result_length && - isolate->IsIsConcatSpreadableLookupChainIntact(); + Protectors::IsIsConcatSpreadableLookupChainIntact(isolate); if (fast_case && kind == PACKED_DOUBLE_ELEMENTS) { Handle<FixedArrayBase> storage = @@ -1406,7 +1407,7 @@ bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) { MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, BuiltinArguments* args) { - if (!isolate->IsIsConcatSpreadableLookupChainIntact()) { + if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate)) { return MaybeHandle<JSArray>(); } // We shouldn't overflow when adding another len. diff --git a/chromium/v8/src/builtins/builtins-arraybuffer.cc b/chromium/v8/src/builtins/builtins-arraybuffer.cc index 9ecb1815bcc..b062b9ca3ce 100644 --- a/chromium/v8/src/builtins/builtins-arraybuffer.cc +++ b/chromium/v8/src/builtins/builtins-arraybuffer.cc @@ -30,29 +30,38 @@ namespace { Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target, Handle<JSReceiver> new_target, Handle<Object> length, - bool initialize) { + InitializedFlag initialized) { + SharedFlag shared = (*target != target->native_context().array_buffer_fun()) + ? SharedFlag::kShared + : SharedFlag::kNotShared; Handle<JSObject> result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, JSObject::New(target, new_target, Handle<AllocationSite>::null())); + auto array_buffer = Handle<JSArrayBuffer>::cast(result); + // Ensure that all fields are initialized because BackingStore::Allocate is + // allowed to GC. Note that we cannot move the allocation of the ArrayBuffer + // after BackingStore::Allocate because of the spec. + array_buffer->Setup(shared, nullptr); + size_t byte_length; if (!TryNumberToSize(*length, &byte_length) || byte_length > JSArrayBuffer::kMaxByteLength) { - JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer>::cast(result), isolate); + // ToNumber failed. THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - SharedFlag shared_flag = - (*target == target->native_context().array_buffer_fun()) - ? SharedFlag::kNotShared - : SharedFlag::kShared; - if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result), - isolate, byte_length, initialize, - shared_flag)) { + + auto backing_store = + BackingStore::Allocate(isolate, byte_length, shared, initialized); + if (!backing_store) { + // Allocation of backing store failed. THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed)); } - return *result; + + array_buffer->Attach(std::move(backing_store)); + return *array_buffer; } } // namespace @@ -80,7 +89,8 @@ BUILTIN(ArrayBufferConstructor) { isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - return ConstructBuffer(isolate, target, new_target, number_length, true); + return ConstructBuffer(isolate, target, new_target, number_length, + InitializedFlag::kZeroInitialized); } // This is a helper to construct an ArrayBuffer with uinitialized memory. @@ -91,7 +101,8 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) { Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(), isolate); Handle<Object> length = args.atOrUndefined(isolate, 1); - return ConstructBuffer(isolate, target, target, length, false); + return ConstructBuffer(isolate, target, target, length, + InitializedFlag::kUninitialized); } // ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength diff --git a/chromium/v8/src/builtins/builtins-async-function-gen.cc b/chromium/v8/src/builtins/builtins-async-function-gen.cc index 6ac37da3f6f..cfd355724e1 100644 --- a/chromium/v8/src/builtins/builtins-async-function-gen.cc +++ b/chromium/v8/src/builtins/builtins-async-function-gen.cc @@ -263,7 +263,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait( TNode<Object> value = CAST(Parameter(Descriptor::kValue)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - TNode<Object> outer_promise = LoadObjectField( + TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>( async_function_object, JSAsyncFunctionObject::kPromiseOffset); Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred); diff --git a/chromium/v8/src/builtins/builtins-async-gen.cc b/chromium/v8/src/builtins/builtins-async-gen.cc index 70d4eac9c8b..edcb0272265 100644 --- a/chromium/v8/src/builtins/builtins-async-gen.cc +++ b/chromium/v8/src/builtins/builtins-async-gen.cc @@ -6,6 +6,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/heap/factory-inl.h" +#include "src/objects/js-generator.h" #include "src/objects/js-promise.h" #include "src/objects/shared-function-info.h" @@ -23,11 +24,12 @@ class ValueUnwrapContext { } // namespace -Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, - Node* value, Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { +TNode<Object> AsyncBuiltinsAssembler::AwaitOld( + TNode<Context> context, TNode<JSGeneratorObject> generator, + TNode<Object> value, TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught) { TNode<NativeContext> const native_context = LoadNativeContext(context); static const int kWrappedPromiseOffset = @@ -91,8 +93,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, InitializeNativeClosure(closure_context, native_context, on_reject, on_reject_context_index); - VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer, - UndefinedConstant()); + TVARIABLE(HeapObject, var_throwaway, UndefinedConstant()); // Deal with PromiseHooks and debug support in the runtime. This // also allocates the throwaway promise, which is only needed in @@ -101,9 +102,9 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &if_debugging, &do_resolve_promise); BIND(&if_debugging); - var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value, - wrapped_value, outer_promise, on_reject, - is_predicted_as_caught)); + var_throwaway = CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context, + value, wrapped_value, outer_promise, + on_reject, is_predicted_as_caught)); Goto(&do_resolve_promise); BIND(&do_resolve_promise); @@ -114,13 +115,13 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, on_resolve, on_reject, var_throwaway.value()); } -Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, - Node* promise, Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { +TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized( + TNode<Context> context, TNode<JSGeneratorObject> generator, + TNode<JSPromise> promise, TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught) { TNode<NativeContext> const native_context = LoadNativeContext(context); - CSA_ASSERT(this, IsJSPromise(promise)); static const int kResolveClosureOffset = FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS); @@ -130,8 +131,8 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, kRejectClosureOffset + JSFunction::kSizeWithoutPrototype; // 2. Let promise be ? PromiseResolve(« promise »). - // Node* const promise = - // CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value); + // We skip this step, because promise is already guaranteed to be a + // JSPRomise at this point. TNode<HeapObject> base = AllocateInNewSpace(kTotalSize); TNode<Context> closure_context = UncheckedCast<Context>(base); @@ -162,8 +163,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, InitializeNativeClosure(closure_context, native_context, on_reject, on_reject_context_index); - VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer, - UndefinedConstant()); + TVARIABLE(HeapObject, var_throwaway, UndefinedConstant()); // Deal with PromiseHooks and debug support in the runtime. This // also allocates the throwaway promise, which is only needed in @@ -172,9 +172,9 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &if_debugging, &do_perform_promise_then); BIND(&if_debugging); - var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, - promise, outer_promise, on_reject, - is_predicted_as_caught)); + var_throwaway = + CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise, + outer_promise, on_reject, is_predicted_as_caught)); Goto(&do_perform_promise_then); BIND(&do_perform_promise_then); @@ -182,12 +182,13 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, on_resolve, on_reject, var_throwaway.value()); } -Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, - Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode<Object> AsyncBuiltinsAssembler::Await( + TNode<Context> context, TNode<JSGeneratorObject> generator, + TNode<Object> value, TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught) { + TVARIABLE(Object, result); Label if_old(this), if_new(this), done(this), if_slow_constructor(this, Label::kDeferred); @@ -197,7 +198,8 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, // to allocate the wrapper promise and can just use the `AwaitOptimized` // logic. GotoIf(TaggedIsSmi(value), &if_old); - TNode<Map> const value_map = LoadMap(value); + TNode<HeapObject> value_object = CAST(value); + TNode<Map> const value_map = LoadMap(value_object); GotoIfNot(IsJSPromiseMap(value_map), &if_old); // We can skip the "constructor" lookup on {value} if it's [[Prototype]] // is the (initial) Promise.prototype and the @@species protector is @@ -223,25 +225,24 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, } BIND(&if_old); - result.Bind(AwaitOld(context, generator, value, outer_promise, - on_resolve_context_index, on_reject_context_index, - is_predicted_as_caught)); + result = AwaitOld(context, generator, value, outer_promise, + on_resolve_context_index, on_reject_context_index, + is_predicted_as_caught); Goto(&done); BIND(&if_new); - result.Bind(AwaitOptimized(context, generator, value, outer_promise, - on_resolve_context_index, on_reject_context_index, - is_predicted_as_caught)); + result = AwaitOptimized(context, generator, CAST(value), outer_promise, + on_resolve_context_index, on_reject_context_index, + is_predicted_as_caught); Goto(&done); BIND(&done); return result.value(); } -void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, - Node* native_context, - Node* function, - Node* context_index) { +void AsyncBuiltinsAssembler::InitializeNativeClosure( + TNode<Context> context, TNode<NativeContext> native_context, + TNode<HeapObject> function, TNode<IntPtrT> context_index) { TNode<Map> function_map = CAST(LoadContextElement( native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); // Ensure that we don't have to initialize prototype_or_initial_map field of @@ -276,24 +277,23 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code); } -Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context, - Node* done) { - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<SharedFunctionInfo> const on_fulfilled_shared = CAST(LoadContextElement( +TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure( + TNode<NativeContext> native_context, TNode<Oddball> done) { + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> on_fulfilled_shared = CAST(LoadContextElement( native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN)); - Node* const closure_context = + const TNode<Context> closure_context = AllocateAsyncIteratorValueUnwrapContext(native_context, done); return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared, closure_context); } -Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( - Node* native_context, Node* done) { - CSA_ASSERT(this, IsNativeContext(native_context)); +TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( + TNode<NativeContext> native_context, TNode<Oddball> done) { CSA_ASSERT(this, IsBoolean(done)); - Node* const context = + TNode<Context> context = CreatePromiseContext(native_context, ValueUnwrapContext::kLength); StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot, done); @@ -301,8 +301,8 @@ Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( } TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) { - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<Object> const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot); diff --git a/chromium/v8/src/builtins/builtins-async-gen.h b/chromium/v8/src/builtins/builtins-async-gen.h index 9dafddef210..7b9c944f4ac 100644 --- a/chromium/v8/src/builtins/builtins-async-gen.h +++ b/chromium/v8/src/builtins/builtins-async-gen.h @@ -21,20 +21,27 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { // point to a SharedFunctioninfo instance used to create the closure. The // value following the reject index should be a similar value for the resolve // closure. Returns the Promise-wrapped `value`. - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - Node* on_resolve_context_index, Node* on_reject_context_index, - Node* is_predicted_as_caught); - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - int on_resolve_context_index, int on_reject_context_index, - Node* is_predicted_as_caught) { + TNode<Object> Await(TNode<Context> context, + TNode<JSGeneratorObject> generator, TNode<Object> value, + TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught); + TNode<Object> Await(TNode<Context> context, + TNode<JSGeneratorObject> generator, TNode<Object> value, + TNode<JSPromise> outer_promise, + int on_resolve_context_index, int on_reject_context_index, + TNode<Oddball> is_predicted_as_caught) { return Await(context, generator, value, outer_promise, IntPtrConstant(on_resolve_context_index), IntPtrConstant(on_reject_context_index), is_predicted_as_caught); } - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - int on_resolve_context_index, int on_reject_context_index, - bool is_predicted_as_caught) { + TNode<Object> Await(TNode<Context> context, + TNode<JSGeneratorObject> generator, TNode<Object> value, + TNode<JSPromise> outer_promise, + int on_resolve_context_index, int on_reject_context_index, + bool is_predicted_as_caught) { return Await(context, generator, value, outer_promise, on_resolve_context_index, on_reject_context_index, BooleanConstant(is_predicted_as_caught)); @@ -42,21 +49,30 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { // Return a new built-in function object as defined in // Async Iterator Value Unwrap Functions - Node* CreateUnwrapClosure(Node* const native_context, Node* const done); + TNode<JSFunction> CreateUnwrapClosure(TNode<NativeContext> native_context, + TNode<Oddball> done); private: - void InitializeNativeClosure(Node* context, Node* native_context, - Node* function, Node* context_index); - Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context, - Node* done); + void InitializeNativeClosure(TNode<Context> context, + TNode<NativeContext> native_context, + TNode<HeapObject> function, + TNode<IntPtrT> context_index); + TNode<Context> AllocateAsyncIteratorValueUnwrapContext( + TNode<NativeContext> native_context, TNode<Oddball> done); - Node* AwaitOld(Node* context, Node* generator, Node* value, - Node* outer_promise, Node* on_resolve_context_index, - Node* on_reject_context_index, Node* is_predicted_as_caught); - Node* AwaitOptimized(Node* context, Node* generator, Node* value, - Node* outer_promise, Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught); + TNode<Object> AwaitOld(TNode<Context> context, + TNode<JSGeneratorObject> generator, + TNode<Object> value, TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught); + TNode<Object> AwaitOptimized(TNode<Context> context, + TNode<JSGeneratorObject> generator, + TNode<JSPromise> promise, + TNode<JSPromise> outer_promise, + TNode<IntPtrT> on_resolve_context_index, + TNode<IntPtrT> on_reject_context_index, + TNode<Oddball> is_predicted_as_caught); }; } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-async-generator-gen.cc b/chromium/v8/src/builtins/builtins-async-generator-gen.cc index 8053cf0dc8b..2ed7e8c83e0 100644 --- a/chromium/v8/src/builtins/builtins-async-generator-gen.cc +++ b/chromium/v8/src/builtins/builtins-async-generator-gen.cc @@ -23,146 +23,142 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler { explicit AsyncGeneratorBuiltinsAssembler(CodeAssemblerState* state) : AsyncBuiltinsAssembler(state) {} - inline Node* TaggedIsAsyncGenerator(Node* tagged_object) { - TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object); - return Select<BoolT>( - if_notsmi, - [=] { - return HasInstanceType(tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE); - }, - [=] { return if_notsmi; }); - } - inline Node* LoadGeneratorState(Node* const generator) { - return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset); + inline TNode<Smi> LoadGeneratorState( + const TNode<JSGeneratorObject> generator) { + return LoadObjectField<Smi>(generator, + JSGeneratorObject::kContinuationOffset); } - inline TNode<BoolT> IsGeneratorStateClosed(SloppyTNode<Smi> const state) { + inline TNode<BoolT> IsGeneratorStateClosed(const TNode<Smi> state) { return SmiEqual(state, SmiConstant(JSGeneratorObject::kGeneratorClosed)); } - inline TNode<BoolT> IsGeneratorClosed(Node* const generator) { + inline TNode<BoolT> IsGeneratorClosed( + const TNode<JSGeneratorObject> generator) { return IsGeneratorStateClosed(LoadGeneratorState(generator)); } - inline TNode<BoolT> IsGeneratorStateSuspended(SloppyTNode<Smi> const state) { + inline TNode<BoolT> IsGeneratorStateSuspended(const TNode<Smi> state) { return SmiGreaterThanOrEqual(state, SmiConstant(0)); } - inline TNode<BoolT> IsGeneratorSuspended(Node* const generator) { + inline TNode<BoolT> IsGeneratorSuspended( + const TNode<JSGeneratorObject> generator) { return IsGeneratorStateSuspended(LoadGeneratorState(generator)); } - inline TNode<BoolT> IsGeneratorStateSuspendedAtStart( - SloppyTNode<Smi> const state) { + inline TNode<BoolT> IsGeneratorStateSuspendedAtStart(const TNode<Smi> state) { return SmiEqual(state, SmiConstant(0)); } - inline TNode<BoolT> IsGeneratorStateNotExecuting( - SloppyTNode<Smi> const state) { + inline TNode<BoolT> IsGeneratorStateNotExecuting(const TNode<Smi> state) { return SmiNotEqual(state, SmiConstant(JSGeneratorObject::kGeneratorExecuting)); } - inline TNode<BoolT> IsGeneratorNotExecuting(Node* const generator) { + inline TNode<BoolT> IsGeneratorNotExecuting( + const TNode<JSGeneratorObject> generator) { return IsGeneratorStateNotExecuting(LoadGeneratorState(generator)); } - inline TNode<BoolT> IsGeneratorAwaiting(Node* const generator) { + inline TNode<BoolT> IsGeneratorAwaiting( + const TNode<JSGeneratorObject> generator) { TNode<Object> is_generator_awaiting = LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset); return TaggedEqual(is_generator_awaiting, SmiConstant(1)); } - inline void SetGeneratorAwaiting(Node* const generator) { + inline void SetGeneratorAwaiting(const TNode<JSGeneratorObject> generator) { CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); StoreObjectFieldNoWriteBarrier( generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1)); CSA_ASSERT(this, IsGeneratorAwaiting(generator)); } - inline void SetGeneratorNotAwaiting(Node* const generator) { + inline void SetGeneratorNotAwaiting( + const TNode<JSGeneratorObject> generator) { CSA_ASSERT(this, IsGeneratorAwaiting(generator)); StoreObjectFieldNoWriteBarrier( generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0)); CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); } - inline void CloseGenerator(Node* const generator) { + inline void CloseGenerator(const TNode<JSGeneratorObject> generator) { StoreObjectFieldNoWriteBarrier( generator, JSGeneratorObject::kContinuationOffset, SmiConstant(JSGeneratorObject::kGeneratorClosed)); } - inline Node* IsFastJSIterResult(Node* const value, Node* const context) { - CSA_ASSERT(this, TaggedIsNotSmi(value)); - TNode<NativeContext> const native_context = LoadNativeContext(context); - return TaggedEqual( - LoadMap(value), - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); - } - - inline Node* LoadFirstAsyncGeneratorRequestFromQueue(Node* const generator) { - return LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset); + inline TNode<HeapObject> LoadFirstAsyncGeneratorRequestFromQueue( + const TNode<JSGeneratorObject> generator) { + return LoadObjectField<HeapObject>(generator, + JSAsyncGeneratorObject::kQueueOffset); } - inline Node* LoadResumeTypeFromAsyncGeneratorRequest(Node* const request) { - return LoadObjectField(request, AsyncGeneratorRequest::kResumeModeOffset); + inline TNode<Smi> LoadResumeTypeFromAsyncGeneratorRequest( + const TNode<AsyncGeneratorRequest> request) { + return LoadObjectField<Smi>(request, + AsyncGeneratorRequest::kResumeModeOffset); } - inline Node* LoadPromiseFromAsyncGeneratorRequest(Node* const request) { - return LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset); + inline TNode<JSPromise> LoadPromiseFromAsyncGeneratorRequest( + const TNode<AsyncGeneratorRequest> request) { + return LoadObjectField<JSPromise>(request, + AsyncGeneratorRequest::kPromiseOffset); } - inline Node* LoadValueFromAsyncGeneratorRequest(Node* const request) { + inline TNode<Object> LoadValueFromAsyncGeneratorRequest( + const TNode<AsyncGeneratorRequest> request) { return LoadObjectField(request, AsyncGeneratorRequest::kValueOffset); } - inline TNode<BoolT> IsAbruptResumeType(SloppyTNode<Smi> const resume_type) { + inline TNode<BoolT> IsAbruptResumeType(const TNode<Smi> resume_type) { return SmiNotEqual(resume_type, SmiConstant(JSGeneratorObject::kNext)); } - void AsyncGeneratorEnqueue(CodeStubArguments* args, Node* context, - Node* generator, Node* value, + void AsyncGeneratorEnqueue(CodeStubArguments* args, TNode<Context> context, + TNode<Object> receiver, TNode<Object> value, JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name); - Node* TakeFirstAsyncGeneratorRequestFromQueue(Node* generator); - Node* TakeFirstAsyncGeneratorRequestFromQueueIfPresent(Node* generator, - Label* if_not_present); - void AddAsyncGeneratorRequestToQueue(Node* generator, Node* request); + TNode<AsyncGeneratorRequest> TakeFirstAsyncGeneratorRequestFromQueue( + TNode<JSAsyncGeneratorObject> generator); + void AddAsyncGeneratorRequestToQueue(TNode<JSAsyncGeneratorObject> generator, + TNode<AsyncGeneratorRequest> request); - Node* AllocateAsyncGeneratorRequest( - JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value, - Node* promise); + TNode<AsyncGeneratorRequest> AllocateAsyncGeneratorRequest( + JSAsyncGeneratorObject::ResumeMode resume_mode, + TNode<Object> resume_value, TNode<JSPromise> promise); // Shared implementation of the catchable and uncatchable variations of Await // for AsyncGenerators. template <typename Descriptor> void AsyncGeneratorAwait(bool is_catchable); void AsyncGeneratorAwaitResumeClosure( - Node* context, Node* value, + TNode<Context> context, TNode<Object> value, JSAsyncGeneratorObject::ResumeMode resume_mode); }; // Shared implementation for the 3 Async Iterator protocol methods of Async // Generators. void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( - CodeStubArguments* args, Node* context, Node* generator, Node* value, - JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name) { + CodeStubArguments* args, TNode<Context> context, TNode<Object> receiver, + TNode<Object> value, JSAsyncGeneratorObject::ResumeMode resume_mode, + const char* method_name) { // AsyncGeneratorEnqueue produces a new Promise, and appends it to the list // of async generator requests to be executed. If the generator is not // presently executing, then this method will loop through, processing each // request from front to back. // This loop resides in AsyncGeneratorResumeNext. - Node* promise = AllocateAndInitJSPromise(context); - - Label enqueue(this), if_receiverisincompatible(this, Label::kDeferred); + TNode<JSPromise> promise = AllocateAndInitJSPromise(context); - Branch(TaggedIsAsyncGenerator(generator), &enqueue, - &if_receiverisincompatible); + Label if_receiverisincompatible(this, Label::kDeferred); + GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible); + GotoIfNot(HasInstanceType(CAST(receiver), JS_ASYNC_GENERATOR_OBJECT_TYPE), + &if_receiverisincompatible); - BIND(&enqueue); { Label done(this); - Node* const req = + const TNode<JSAsyncGeneratorObject> generator = CAST(receiver); + const TNode<AsyncGeneratorRequest> req = AllocateAsyncGeneratorRequest(resume_mode, value, promise); AddAsyncGeneratorRequestToQueue(generator, req); @@ -171,7 +167,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( // If state is not "executing", then // Perform AsyncGeneratorResumeNext(Generator) // Check if the {receiver} is running or already closed. - TNode<Smi> continuation = CAST(LoadGeneratorState(generator)); + TNode<Smi> continuation = LoadGeneratorState(generator); GotoIf(SmiEqual(continuation, SmiConstant(JSAsyncGeneratorObject::kGeneratorExecuting)), @@ -186,20 +182,18 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( BIND(&if_receiverisincompatible); { - Node* const error = - MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context, - StringConstant(method_name), generator); - - CallBuiltin(Builtins::kRejectPromise, context, promise, error, + CallBuiltin(Builtins::kRejectPromise, context, promise, + MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, + context, StringConstant(method_name), receiver), TrueConstant()); args->PopAndReturn(promise); } } -Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( - JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value, - Node* promise) { - CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE)); +TNode<AsyncGeneratorRequest> +AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( + JSAsyncGeneratorObject::ResumeMode resume_mode, TNode<Object> resume_value, + TNode<JSPromise> promise) { TNode<HeapObject> request = Allocate(AsyncGeneratorRequest::kSize); StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap); StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset, @@ -213,15 +207,14 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( promise); StoreObjectFieldRoot(request, AsyncGeneratorRequest::kNextOffset, RootIndex::kUndefinedValue); - return request; + return CAST(request); } void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure( - Node* context, Node* value, + TNode<Context> context, TNode<Object> value, JSAsyncGeneratorObject::ResumeMode resume_mode) { - TNode<Object> const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); - CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator)); + const TNode<JSAsyncGeneratorObject> generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -259,12 +252,13 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) { } void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( - Node* generator, Node* request) { - VARIABLE(var_current, MachineRepresentation::kTagged); + TNode<JSAsyncGeneratorObject> generator, + TNode<AsyncGeneratorRequest> request) { + TVARIABLE(HeapObject, var_current); Label empty(this), loop(this, &var_current), done(this); - var_current.Bind( - LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset)); + var_current = LoadObjectField<HeapObject>( + generator, JSAsyncGeneratorObject::kQueueOffset); Branch(IsUndefined(var_current.value()), &empty, &loop); BIND(&empty); @@ -276,9 +270,9 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( BIND(&loop); { Label loop_next(this), next_empty(this); - Node* current = var_current.value(); - TNode<Object> next = - LoadObjectField(current, AsyncGeneratorRequest::kNextOffset); + TNode<AsyncGeneratorRequest> current = CAST(var_current.value()); + TNode<HeapObject> next = LoadObjectField<HeapObject>( + current, AsyncGeneratorRequest::kNextOffset); Branch(IsUndefined(next), &next_empty, &loop_next); BIND(&next_empty); @@ -289,20 +283,20 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( BIND(&loop_next); { - var_current.Bind(next); + var_current = next; Goto(&loop); } } BIND(&done); } -Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue( - Node* generator) { +TNode<AsyncGeneratorRequest> +AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue( + TNode<JSAsyncGeneratorObject> generator) { // Removes and returns the first AsyncGeneratorRequest from a // JSAsyncGeneratorObject's queue. Asserts that the queue is not empty. - CSA_ASSERT(this, TaggedIsAsyncGenerator(generator)); - TNode<AsyncGeneratorRequest> request = - CAST(LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset)); + TNode<AsyncGeneratorRequest> request = LoadObjectField<AsyncGeneratorRequest>( + generator, JSAsyncGeneratorObject::kQueueOffset); TNode<Object> next = LoadObjectField(request, AsyncGeneratorRequest::kNextOffset); @@ -323,7 +317,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) { TNode<Object> generator = args.GetReceiver(); TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kNext, @@ -341,7 +335,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) { TNode<Object> generator = args.GetReceiver(); TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kReturn, @@ -359,7 +353,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) { TNode<Object> generator = args.GetReceiver(); TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kThrow, @@ -367,15 +361,15 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorAwaitResumeClosure(context, value, JSAsyncGeneratorObject::kNext); } TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) { - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorAwaitResumeClosure(context, value, JSAsyncGeneratorObject::kThrow); } @@ -392,8 +386,9 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) { TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { using Descriptor = AsyncGeneratorResumeNextDescriptor; - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const context = Parameter(Descriptor::kContext); + const TNode<JSAsyncGeneratorObject> generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // The penultimate step of proposal-async-iteration/#sec-asyncgeneratorresolve // and proposal-async-iteration/#sec-asyncgeneratorreject both recursively @@ -403,12 +398,10 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { // performs a loop in AsyncGeneratorResumeNext, which continues as long as // there is an AsyncGeneratorRequest in the queue, and as long as the // generator is not suspended due to an AwaitExpression. - VARIABLE(var_state, MachineRepresentation::kTaggedSigned, - LoadGeneratorState(generator)); - VARIABLE(var_next, MachineRepresentation::kTagged, - LoadFirstAsyncGeneratorRequestFromQueue(generator)); - Variable* loop_variables[] = {&var_state, &var_next}; - Label start(this, 2, loop_variables); + TVARIABLE(Smi, var_state, LoadGeneratorState(generator)); + TVARIABLE(HeapObject, var_next, + LoadFirstAsyncGeneratorRequestFromQueue(generator)); + Label start(this, {&var_state, &var_next}); Goto(&start); BIND(&start); @@ -420,9 +413,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { // Stop resuming if request queue is empty. ReturnIf(IsUndefined(var_next.value()), UndefinedConstant()); - Node* const next = var_next.value(); - TNode<Smi> const resume_type = - CAST(LoadResumeTypeFromAsyncGeneratorRequest(next)); + const TNode<AsyncGeneratorRequest> next = CAST(var_next.value()); + const TNode<Smi> resume_type = LoadResumeTypeFromAsyncGeneratorRequest(next); Label if_abrupt(this), if_normal(this), resume_generator(this); Branch(IsAbruptResumeType(resume_type), &if_abrupt, &if_normal); @@ -432,11 +424,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateSuspendedAtStart(var_state.value()), &settle_promise); CloseGenerator(generator); - var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed)); + var_state = SmiConstant(JSGeneratorObject::kGeneratorClosed); Goto(&settle_promise); BIND(&settle_promise); - Node* next_value = LoadValueFromAsyncGeneratorRequest(next); + TNode<Object> next_value = LoadValueFromAsyncGeneratorRequest(next); Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)), &if_return, &if_throw); @@ -457,7 +449,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator); CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, next_value); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } @@ -466,8 +458,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator); CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, UndefinedConstant(), TrueConstant()); - var_state.Bind(LoadGeneratorState(generator)); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_state = LoadGeneratorState(generator); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } @@ -478,19 +470,19 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { generator, JSGeneratorObject::kResumeModeOffset, resume_type); CallStub(CodeFactory::ResumeGenerator(isolate()), context, LoadValueFromAsyncGeneratorRequest(next), generator); - var_state.Bind(LoadGeneratorState(generator)); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_state = LoadGeneratorState(generator); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } } TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const done = Parameter(Descriptor::kDone); - Node* const context = Parameter(Descriptor::kContext); + const TNode<JSAsyncGeneratorObject> generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Object> done = CAST(Parameter(Descriptor::kDone)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator)); CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); // This operation should be called only when the `value` parameter has been @@ -499,11 +491,12 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { // non-callable value. This can't be checked with assertions due to being // observable, but keep it in mind. - Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator); - Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next); + const TNode<AsyncGeneratorRequest> next = + TakeFirstAsyncGeneratorRequestFromQueue(generator); + const TNode<JSPromise> promise = LoadPromiseFromAsyncGeneratorRequest(next); // Let iteratorResult be CreateIterResultObject(value, done). - TNode<HeapObject> const iter_result = Allocate(JSIteratorResult::kSize); + const TNode<HeapObject> iter_result = Allocate(JSIteratorResult::kSize); { TNode<Object> map = LoadContextElement(LoadNativeContext(context), Context::ITERATOR_RESULT_MAP_INDEX); @@ -555,25 +548,30 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) { using Descriptor = AsyncGeneratorRejectDescriptor; - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode<JSAsyncGeneratorObject> generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator); - Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next); + TNode<AsyncGeneratorRequest> next = + TakeFirstAsyncGeneratorRequestFromQueue(generator); + TNode<JSPromise> promise = LoadPromiseFromAsyncGeneratorRequest(next); Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value, TrueConstant())); } TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const is_caught = Parameter(Descriptor::kIsCaught); - Node* const context = Parameter(Descriptor::kContext); + const TNode<JSGeneratorObject> generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator); - Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request); + const TNode<AsyncGeneratorRequest> request = + CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + const TNode<JSPromise> outer_promise = + LoadPromiseFromAsyncGeneratorRequest(request); const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN; const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN; @@ -585,10 +583,10 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode<Object> const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<JSAsyncGeneratorObject> generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -617,33 +615,35 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { // (per proposal-async-iteration/#sec-asyncgeneratorresumenext step 10.b.i) // // In all cases, the final step is to jump back to AsyncGeneratorResumeNext. - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const is_caught = Parameter(Descriptor::kIsCaught); - Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator); - CSA_ASSERT(this, IsNotUndefined(req)); + const TNode<JSGeneratorObject> generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught)); + const TNode<AsyncGeneratorRequest> req = + CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); Label perform_await(this); - VARIABLE(var_on_resolve, MachineType::PointerRepresentation(), - IntPtrConstant( - Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN)); - VARIABLE( - var_on_reject, MachineType::PointerRepresentation(), + TVARIABLE(IntPtrT, var_on_resolve, + IntPtrConstant( + Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN)); + TVARIABLE( + IntPtrT, var_on_reject, IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN)); - Node* const state = LoadGeneratorState(generator); + const TNode<Smi> state = LoadGeneratorState(generator); GotoIf(IsGeneratorStateClosed(state), &perform_await); - var_on_resolve.Bind( - IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN)); - var_on_reject.Bind( - IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN)); + var_on_resolve = + IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN); + var_on_reject = + IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN); Goto(&perform_await); BIND(&perform_await); SetGeneratorAwaiting(generator); - Node* const context = Parameter(Descriptor::kContext); - Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<JSPromise> outer_promise = + LoadPromiseFromAsyncGeneratorRequest(req); Await(context, generator, value, outer_promise, var_on_resolve.value(), var_on_reject.value(), is_caught); @@ -656,8 +656,8 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { // proposal-async-iteration/#sec-asyncgeneratoryield step 8.e TF_BUILTIN(AsyncGeneratorReturnResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn); } @@ -666,10 +666,10 @@ TF_BUILTIN(AsyncGeneratorReturnResolveClosure, // AsyncGeneratorResumeNext. TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode<Object> const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<JSAsyncGeneratorObject> generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -684,10 +684,10 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure, TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode<Object> const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<JSAsyncGeneratorObject> generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); diff --git a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc index 0b5c5ef8b96..39ff8c92172 100644 --- a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc +++ b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/base/optional.h" #include "src/builtins/builtins-async-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" @@ -20,29 +21,34 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { explicit AsyncFromSyncBuiltinsAssembler(compiler::CodeAssemblerState* state) : AsyncBuiltinsAssembler(state) {} - void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object, + void ThrowIfNotAsyncFromSyncIterator(const TNode<Context> context, + const TNode<Object> object, Label* if_exception, - Variable* var_exception, + TVariable<Object>* var_exception, const char* method_name); - using UndefinedMethodHandler = std::function<void( - Node* const context, Node* const promise, Label* if_exception)>; - using SyncIteratorNodeGenerator = std::function<Node*(Node*)>; + using UndefinedMethodHandler = + std::function<void(const TNode<NativeContext> native_context, + const TNode<JSPromise> promise, Label* if_exception)>; + using SyncIteratorNodeGenerator = + std::function<TNode<Object>(TNode<JSReceiver>)>; void Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, + const TNode<Context> context, const TNode<Object> iterator, + const TNode<Object> sent_value, const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type = Label::kDeferred, - Node* const initial_exception_value = nullptr); + base::Optional<TNode<Object>> initial_exception_value = base::nullopt); void Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, - Handle<String> name, const UndefinedMethodHandler& if_method_undefined, + const TNode<Context> context, const TNode<Object> iterator, + const TNode<Object> sent_value, Handle<String> name, + const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type = Label::kDeferred, - Node* const initial_exception_value = nullptr) { - auto get_method = [=](Node* const sync_iterator) { + base::Optional<TNode<Object>> initial_exception_value = base::nullopt) { + auto get_method = [=](const TNode<JSReceiver> sync_iterator) { return GetProperty(context, sync_iterator, name); }; return Generate_AsyncFromSyncIteratorMethod( @@ -51,26 +57,26 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { } // Load "value" and "done" from an iterator result object. If an exception - // is thrown at any point, jumps to te `if_exception` label with exception + // is thrown at any point, jumps to the `if_exception` label with exception // stored in `var_exception`. // // Returns a Pair of Nodes, whose first element is the value of the "value" // property, and whose second element is the value of the "done" property, // converted to a Boolean if needed. - std::pair<Node*, Node*> LoadIteratorResult(Node* const context, - Node* const native_context, - Node* const iter_result, - Label* if_exception, - Variable* var_exception); + std::pair<TNode<Object>, TNode<Oddball>> LoadIteratorResult( + const TNode<Context> context, const TNode<NativeContext> native_context, + const TNode<Object> iter_result, Label* if_exception, + TVariable<Object>* var_exception); }; void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( - Node* const context, Node* const object, Label* if_exception, - Variable* var_exception, const char* method_name) { + const TNode<Context> context, const TNode<Object> object, + Label* if_exception, TVariable<Object>* var_exception, + const char* method_name) { Label if_receiverisincompatible(this, Label::kDeferred), done(this); GotoIf(TaggedIsSmi(object), &if_receiverisincompatible); - Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done, + Branch(HasInstanceType(CAST(object), JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done, &if_receiverisincompatible); BIND(&if_receiverisincompatible); @@ -79,13 +85,13 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( // internal slot, then // Let badIteratorError be a new TypeError exception. - Node* const error = - MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context, - StringConstant(method_name), object); + TNode<HeapObject> error = + CAST(MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, + context, StringConstant(method_name), object)); // Perform ! Call(promiseCapability.[[Reject]], undefined, // « badIteratorError »). - var_exception->Bind(error); + *var_exception = error; Goto(if_exception); } @@ -93,26 +99,27 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( } void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, - const SyncIteratorNodeGenerator& get_method, + const TNode<Context> context, const TNode<Object> iterator, + const TNode<Object> sent_value, const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type, - Node* const initial_exception_value) { - TNode<NativeContext> const native_context = LoadNativeContext(context); - Node* const promise = AllocateAndInitJSPromise(context); + base::Optional<TNode<Object>> initial_exception_value) { + const TNode<NativeContext> native_context = LoadNativeContext(context); + const TNode<JSPromise> promise = AllocateAndInitJSPromise(context); - VARIABLE(var_exception, MachineRepresentation::kTagged, - initial_exception_value == nullptr ? UndefinedConstant() - : initial_exception_value); + TVARIABLE( + Object, var_exception, + initial_exception_value ? *initial_exception_value : UndefinedConstant()); Label reject_promise(this, reject_label_type); ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise, &var_exception, operation_name); - TNode<Object> const sync_iterator = - LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset); + TNode<JSAsyncFromSyncIterator> async_iterator = CAST(iterator); + const TNode<JSReceiver> sync_iterator = LoadObjectField<JSReceiver>( + async_iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset); - Node* const method = get_method(sync_iterator); + TNode<Object> method = get_method(sync_iterator); if (if_method_undefined) { Label if_isnotundefined(this); @@ -123,21 +130,21 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( BIND(&if_isnotundefined); } - Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context, - method, sync_iterator, sent_value); + const TNode<Object> iter_result = CallJS( + CodeFactory::Call(isolate()), context, method, sync_iterator, sent_value); GotoIfException(iter_result, &reject_promise, &var_exception); - Node* value; - Node* done; + TNode<Object> value; + TNode<Oddball> done; std::tie(value, done) = LoadIteratorResult( context, native_context, iter_result, &reject_promise, &var_exception); - TNode<JSFunction> const promise_fun = + const TNode<JSFunction> promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); CSA_ASSERT(this, IsConstructor(promise_fun)); // Let valueWrapper be PromiseResolve(%Promise%, « value »). - TNode<Object> const value_wrapper = CallBuiltin( + const TNode<Object> value_wrapper = CallBuiltin( Builtins::kPromiseResolve, native_context, promise_fun, value); // IfAbruptRejectPromise(valueWrapper, promiseCapability). GotoIfException(value_wrapper, &reject_promise, &var_exception); @@ -145,7 +152,8 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( // Let onFulfilled be a new built-in function object as defined in // Async Iterator Value Unwrap Functions. // Set onFulfilled.[[Done]] to throwDone. - Node* const on_fulfilled = CreateUnwrapClosure(native_context, done); + const TNode<JSFunction> on_fulfilled = + CreateUnwrapClosure(native_context, done); // Perform ! PerformPromiseThen(valueWrapper, // onFulfilled, undefined, promiseCapability). @@ -154,35 +162,39 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( BIND(&reject_promise); { - Node* const exception = var_exception.value(); + const TNode<Object> exception = var_exception.value(); CallBuiltin(Builtins::kRejectPromise, context, promise, exception, TrueConstant()); Return(promise); } } -std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( - Node* const context, Node* const native_context, Node* const iter_result, - Label* if_exception, Variable* var_exception) { + +std::pair<TNode<Object>, TNode<Oddball>> +AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( + const TNode<Context> context, const TNode<NativeContext> native_context, + const TNode<Object> iter_result, Label* if_exception, + TVariable<Object>* var_exception) { Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this), done(this), if_notanobject(this, Label::kDeferred); GotoIf(TaggedIsSmi(iter_result), &if_notanobject); - TNode<Map> const iter_result_map = LoadMap(iter_result); + const TNode<Map> iter_result_map = LoadMap(CAST(iter_result)); GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject); - TNode<Object> const fast_iter_result_map = + const TNode<Object> fast_iter_result_map = LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_done, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); + TVARIABLE(Object, var_done); Branch(TaggedEqual(iter_result_map, fast_iter_result_map), &if_fastpath, &if_slowpath); BIND(&if_fastpath); { - var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset)); - var_value.Bind( - LoadObjectField(iter_result, JSIteratorResult::kValueOffset)); + TNode<JSObject> fast_iter_result = CAST(iter_result); + var_done = LoadObjectField(fast_iter_result, JSIteratorResult::kDoneOffset); + var_value = + LoadObjectField(fast_iter_result, JSIteratorResult::kValueOffset); Goto(&merge); } @@ -190,18 +202,18 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( { // Let nextDone be IteratorComplete(nextResult). // IfAbruptRejectPromise(nextDone, promiseCapability). - TNode<Object> const done = + const TNode<Object> done = GetProperty(context, iter_result, factory()->done_string()); GotoIfException(done, if_exception, var_exception); // Let nextValue be IteratorValue(nextResult). // IfAbruptRejectPromise(nextValue, promiseCapability). - TNode<Object> const value = + const TNode<Object> value = GetProperty(context, iter_result, factory()->value_string()); GotoIfException(value, if_exception, var_exception); - var_value.Bind(value); - var_done.Bind(done); + var_value = value; + var_done = done; Goto(&merge); } @@ -209,27 +221,27 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( { // Sync iterator result is not an object --- Produce a TypeError and jump // to the `if_exception` path. - Node* const error = MakeTypeError( - MessageTemplate::kIteratorResultNotAnObject, context, iter_result); - var_exception->Bind(error); + const TNode<Object> error = CAST(MakeTypeError( + MessageTemplate::kIteratorResultNotAnObject, context, iter_result)); + *var_exception = error; Goto(if_exception); } BIND(&merge); // Ensure `iterResult.done` is a Boolean. GotoIf(TaggedIsSmi(var_done.value()), &to_boolean); - Branch(IsBoolean(var_done.value()), &done, &to_boolean); + Branch(IsBoolean(CAST(var_done.value())), &done, &to_boolean); BIND(&to_boolean); { - TNode<Object> const result = + const TNode<Object> result = CallBuiltin(Builtins::kToBoolean, context, var_done.value()); - var_done.Bind(result); + var_done = result; Goto(&done); } BIND(&done); - return std::make_pair(var_value.value(), var_done.value()); + return std::make_pair(var_value.value(), CAST(var_done.value())); } } // namespace @@ -237,12 +249,13 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( // https://tc39.github.io/proposal-async-iteration/ // Section #sec-%asyncfromsynciteratorprototype%.next TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - auto get_method = [=](Node* const unused) { - return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset); + auto get_method = [=](const TNode<JSReceiver> unused) { + return LoadObjectField(CAST(iterator), + JSAsyncFromSyncIterator::kNextOffset); }; Generate_AsyncFromSyncIteratorMethod( context, iterator, value, get_method, UndefinedMethodHandler(), @@ -253,15 +266,16 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { // Section #sec-%asyncfromsynciteratorprototype%.return TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - auto if_return_undefined = [=](Node* const native_context, - Node* const promise, Label* if_exception) { + auto if_return_undefined = [=](const TNode<NativeContext> native_context, + const TNode<JSPromise> promise, + Label* if_exception) { // If return is undefined, then // Let iterResult be ! CreateIterResultObject(value, true) - TNode<Object> const iter_result = CallBuiltin( + const TNode<Object> iter_result = CallBuiltin( Builtins::kCreateIterResultObject, context, value, TrueConstant()); // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »). @@ -280,11 +294,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, // Section #sec-%asyncfromsynciteratorprototype%.throw TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const reason = Parameter(Descriptor::kReason); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> reason = CAST(Parameter(Descriptor::kReason)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - auto if_throw_undefined = [=](Node* const native_context, Node* const promise, + auto if_throw_undefined = [=](const TNode<NativeContext> native_context, + const TNode<JSPromise> promise, Label* if_exception) { Goto(if_exception); }; Generate_AsyncFromSyncIteratorMethod( diff --git a/chromium/v8/src/builtins/builtins-async-module.cc b/chromium/v8/src/builtins/builtins-async-module.cc new file mode 100644 index 00000000000..fecdb31cf3c --- /dev/null +++ b/chromium/v8/src/builtins/builtins-async-module.cc @@ -0,0 +1,33 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-inl.h" +#include "src/objects/module-inl.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +BUILTIN(CallAsyncModuleFulfilled) { + HandleScope handle_scope(isolate); + Handle<SourceTextModule> module( + isolate->global_handles()->Create(*args.at<SourceTextModule>(0))); + SourceTextModule::AsyncModuleExecutionFulfilled(isolate, module); + return ReadOnlyRoots(isolate).undefined_value(); +} + +BUILTIN(CallAsyncModuleRejected) { + HandleScope handle_scope(isolate); + + // Arguments should be a SourceTextModule and an exception object. + DCHECK_EQ(args.length(), 2); + Handle<SourceTextModule> module( + isolate->global_handles()->Create(*args.at<SourceTextModule>(0))); + Handle<Object> exception(args.at(1)); + SourceTextModule::AsyncModuleExecutionRejected(isolate, module, exception); + return ReadOnlyRoots(isolate).undefined_value(); +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/builtins/builtins-bigint.cc b/chromium/v8/src/builtins/builtins-bigint.cc index 1201ce97300..30da5207f90 100644 --- a/chromium/v8/src/builtins/builtins-bigint.cc +++ b/chromium/v8/src/builtins/builtins-bigint.cc @@ -125,26 +125,21 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix, BUILTIN(BigIntPrototypeToLocaleString) { HandleScope scope(isolate); + const char* method = "BigInt.prototype.toLocaleString"; #ifdef V8_INTL_SUPPORT - if (FLAG_harmony_intl_bigint) { - // 1. Let x be ? thisBigIntValue(this value). - Handle<BigInt> x; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, x, - ThisBigIntValue(isolate, args.receiver(), - "BigInt.prototype.toLocaleString")); - - RETURN_RESULT_OR_FAILURE( - isolate, - Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1), - args.atOrUndefined(isolate, 2))); - } - // Fallbacks to old toString implemention if flag is off or no - // V8_INTL_SUPPORT + // 1. Let x be ? thisBigIntValue(this value). + Handle<BigInt> x; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, x, ThisBigIntValue(isolate, args.receiver(), method)); + + RETURN_RESULT_OR_FAILURE( + isolate, + Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1), + args.atOrUndefined(isolate, 2), method)); + // Fallbacks to old toString implemention if no V8_INTL_SUPPORT #endif // V8_INTL_SUPPORT Handle<Object> radix = isolate->factory()->undefined_value(); - return BigIntToStringImpl(args.receiver(), radix, isolate, - "BigInt.prototype.toLocaleString"); + return BigIntToStringImpl(args.receiver(), radix, isolate, method); } BUILTIN(BigIntPrototypeToString) { diff --git a/chromium/v8/src/builtins/builtins-call-gen.cc b/chromium/v8/src/builtins/builtins-call-gen.cc index 91370b08967..fd1ad5bb67c 100644 --- a/chromium/v8/src/builtins/builtins-call-gen.cc +++ b/chromium/v8/src/builtins/builtins-call-gen.cc @@ -9,6 +9,7 @@ #include "src/codegen/macro-assembler.h" #include "src/common/globals.h" #include "src/execution/isolate.h" +#include "src/execution/protectors.h" #include "src/objects/api-callbacks.h" #include "src/objects/arguments.h" #include "src/objects/property-cell.h" @@ -17,9 +18,6 @@ namespace v8 { namespace internal { -template <typename T> -using TNode = compiler::TNode<T>; - void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined( MacroAssembler* masm) { Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined); @@ -297,7 +295,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant(); GotoIf( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorInvalid)), + SmiConstant(Protectors::kProtectorInvalid)), &if_generic); { // The fast-path accesses the {spread} elements directly. diff --git a/chromium/v8/src/builtins/builtins-collections-gen.cc b/chromium/v8/src/builtins/builtins-collections-gen.cc index dec4142c65f..c0ca74a577b 100644 --- a/chromium/v8/src/builtins/builtins-collections-gen.cc +++ b/chromium/v8/src/builtins/builtins-collections-gen.cc @@ -8,6 +8,7 @@ #include "src/builtins/builtins-iterator-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/codegen/code-stub-assembler.h" +#include "src/execution/protectors.h" #include "src/heap/factory-inl.h" #include "src/heap/heap-inl.h" #include "src/objects/hash-table-inl.h" @@ -19,8 +20,6 @@ namespace internal { using compiler::Node; template <class T> -using TNode = compiler::TNode<T>; -template <class T> using TVariable = compiler::TypedCodeAssemblerVariable<T>; class BaseCollectionsAssembler : public CodeStubAssembler { @@ -81,8 +80,8 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode<JSReceiver> new_target); // Allocates the backing store for a collection. - virtual TNode<Object> AllocateTable(Variant variant, TNode<Context> context, - TNode<IntPtrT> at_least_space_for) = 0; + virtual TNode<HeapObject> AllocateTable( + Variant variant, TNode<IntPtrT> at_least_space_for) = 0; // Main entry point for a collection constructor builtin. void GenerateConstructor(Variant variant, @@ -124,7 +123,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode<IntPtrT> EstimatedInitialSize(TNode<Object> initial_entries, TNode<BoolT> is_fast_jsarray); - void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver); + void GotoIfNotJSReceiver(TNode<Object> const obj, Label* if_not_receiver); // Determines whether the collection's prototype has been modified. TNode<BoolT> HasInitialCollectionPrototype(Variant variant, @@ -160,8 +159,8 @@ void BaseCollectionsAssembler::AddConstructorEntry( ? LoadKeyValuePairNoSideEffects(context, key_value, if_may_have_side_effects) : LoadKeyValuePair(context, key_value); - Node* key_n = pair.key; - Node* value_n = pair.value; + TNode<Object> key_n = pair.key; + TNode<Object> value_n = pair.value; CallJS(CodeFactory::Call(isolate()), context, add_function, collection, key_n, value_n); } else { @@ -183,7 +182,7 @@ void BaseCollectionsAssembler::AddConstructorEntries( Goto(&allocate_table); BIND(&allocate_table); { - TNode<Object> table = AllocateTable(variant, context, at_least_space_for); + TNode<HeapObject> table = AllocateTable(variant, at_least_space_for); StoreObjectField(collection, GetTableOffset(variant), table); GotoIf(IsNullOrUndefined(initial_entries), &exit); GotoIfInitialAddFunctionModified(variant, CAST(native_context), @@ -261,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( &if_doubles); BIND(&if_smiorobjects); { - auto set_entry = [&](Node* index) { + auto set_entry = [&](TNode<IntPtrT> index) { TNode<Object> element = LoadAndNormalizeFixedArrayElement( CAST(elements), UncheckedCast<IntPtrT>(index)); AddConstructorEntry(variant, context, collection, add_func, element, @@ -272,8 +271,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( // elements, a fast loop is used. This assumes that adding an element // to the collection does not call user code that could mutate the elements // or collection. - BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1, + IndexAdvanceMode::kPost); Goto(&exit); } BIND(&if_doubles); @@ -288,13 +287,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( element); } else { DCHECK(variant == kSet || variant == kWeakSet); - auto set_entry = [&](Node* index) { + auto set_entry = [&](TNode<IntPtrT> index) { TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement( elements, UncheckedCast<IntPtrT>(index)); AddConstructorEntry(variant, context, collection, add_func, entry); }; - BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1, + IndexAdvanceMode::kPost); Goto(&exit); } } @@ -523,10 +522,10 @@ TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize( [=] { return IntPtrConstant(0); }); } -void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj, +void BaseCollectionsAssembler::GotoIfNotJSReceiver(TNode<Object> const obj, Label* if_not_receiver) { GotoIf(TaggedIsSmi(obj), if_not_receiver); - GotoIfNot(IsJSReceiver(obj), if_not_receiver); + GotoIfNot(IsJSReceiver(CAST(obj)), if_not_receiver); } TNode<Map> BaseCollectionsAssembler::GetInitialCollectionPrototype( @@ -608,22 +607,24 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { protected: template <typename IteratorType> - Node* AllocateJSCollectionIterator(SloppyTNode<Context> context, - int map_index, Node* collection); - TNode<Object> AllocateTable(Variant variant, TNode<Context> context, - TNode<IntPtrT> at_least_space_for) override; - TNode<IntPtrT> GetHash(SloppyTNode<HeapObject> const key); - TNode<IntPtrT> CallGetHashRaw(SloppyTNode<HeapObject> const key); - TNode<Smi> CallGetOrCreateHashRaw(SloppyTNode<HeapObject> const key); + TNode<HeapObject> AllocateJSCollectionIterator( + const TNode<Context> context, int map_index, + const TNode<HeapObject> collection); + TNode<HeapObject> AllocateTable(Variant variant, + TNode<IntPtrT> at_least_space_for) override; + TNode<IntPtrT> GetHash(const TNode<HeapObject> key); + TNode<IntPtrT> CallGetHashRaw(const TNode<HeapObject> key); + TNode<Smi> CallGetOrCreateHashRaw(const TNode<HeapObject> key); // Transitions the iterator to the non obsolete backing store. // This is a NOP if the [table] is not obsolete. - using UpdateInTransition = - std::function<void(Node* const table, Node* const index)>; + template <typename TableType> + using UpdateInTransition = std::function<void(const TNode<TableType> table, + const TNode<IntPtrT> index)>; template <typename TableType> std::pair<TNode<TableType>, TNode<IntPtrT>> Transition( TNode<TableType> const table, TNode<IntPtrT> const index, - UpdateInTransition const& update_in_transition); + UpdateInTransition<TableType> const& update_in_transition); template <typename IteratorType, typename TableType> std::pair<TNode<TableType>, TNode<IntPtrT>> TransitionAndUpdate( TNode<IteratorType> const iterator); @@ -635,35 +636,33 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. template <typename CollectionType> - void FindOrderedHashTableEntryForSmiKey(Node* table, - SloppyTNode<Smi> key_tagged, - Variable* result, Label* entry_found, - Label* not_found); - void SameValueZeroSmi(SloppyTNode<Smi> key_smi, - SloppyTNode<Object> candidate_key, Label* if_same, - Label* if_not_same); + void FindOrderedHashTableEntryForSmiKey(TNode<CollectionType> table, + TNode<Smi> key_tagged, + TVariable<IntPtrT>* result, + Label* entry_found, Label* not_found); + void SameValueZeroSmi(TNode<Smi> key_smi, TNode<Object> candidate_key, + Label* if_same, Label* if_not_same); // Specialization for heap numbers. // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. - void SameValueZeroHeapNumber(SloppyTNode<Float64T> key_float, - SloppyTNode<Object> candidate_key, - Label* if_same, Label* if_not_same); + void SameValueZeroHeapNumber(TNode<Float64T> key_float, + TNode<Object> candidate_key, Label* if_same, + Label* if_not_same); template <typename CollectionType> void FindOrderedHashTableEntryForHeapNumberKey( - SloppyTNode<Context> context, Node* table, - SloppyTNode<HeapNumber> key_heap_number, Variable* result, - Label* entry_found, Label* not_found); + TNode<CollectionType> table, TNode<HeapNumber> key_heap_number, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found); // Specialization for bigints. // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. - void SameValueZeroBigInt(Node* key, Node* candidate_key, Label* if_same, - Label* if_not_same); + void SameValueZeroBigInt(TNode<BigInt> key, TNode<Object> candidate_key, + Label* if_same, Label* if_not_same); template <typename CollectionType> - void FindOrderedHashTableEntryForBigIntKey(SloppyTNode<Context> context, - Node* table, Node* key, - Variable* result, + void FindOrderedHashTableEntryForBigIntKey(TNode<CollectionType> table, + TNode<BigInt> key_big_int, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found); @@ -671,14 +670,14 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. template <typename CollectionType> - void FindOrderedHashTableEntryForStringKey( - SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged, - Variable* result, Label* entry_found, Label* not_found); - TNode<IntPtrT> ComputeStringHash(TNode<Context> context, - TNode<String> string_key); - void SameValueZeroString(SloppyTNode<Context> context, - SloppyTNode<String> key_string, - SloppyTNode<Object> candidate_key, Label* if_same, + void FindOrderedHashTableEntryForStringKey(TNode<CollectionType> table, + TNode<String> key_tagged, + TVariable<IntPtrT>* result, + Label* entry_found, + Label* not_found); + TNode<IntPtrT> ComputeStringHash(TNode<String> string_key); + void SameValueZeroString(TNode<String> key_string, + TNode<Object> candidate_key, Label* if_same, Label* if_not_same); // Specialization for non-strings, non-numbers. For those we only need @@ -687,26 +686,32 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // or the hash code otherwise. If the hash-code has not been computed, it // should be Smi -1. template <typename CollectionType> - void FindOrderedHashTableEntryForOtherKey( - SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key, - Variable* result, Label* entry_found, Label* not_found); + void FindOrderedHashTableEntryForOtherKey(TNode<CollectionType> table, + TNode<HeapObject> key_heap_object, + TVariable<IntPtrT>* result, + Label* entry_found, + Label* not_found); template <typename CollectionType> - void TryLookupOrderedHashTableIndex(Node* const table, Node* const key, - Node* const context, Variable* result, + void TryLookupOrderedHashTableIndex(const TNode<CollectionType> table, + const TNode<Object> key, + TVariable<IntPtrT>* result, Label* if_entry_found, Label* if_not_found); - Node* NormalizeNumberKey(Node* key); + const TNode<Object> NormalizeNumberKey(const TNode<Object> key); void StoreOrderedHashMapNewEntry(TNode<OrderedHashMap> const table, - Node* const key, Node* const value, - Node* const hash, - Node* const number_of_buckets, - Node* const occupancy); + const TNode<Object> key, + const TNode<Object> value, + const TNode<IntPtrT> hash, + const TNode<IntPtrT> number_of_buckets, + const TNode<IntPtrT> occupancy); + void StoreOrderedHashSetNewEntry(TNode<OrderedHashSet> const table, - Node* const key, Node* const hash, - Node* const number_of_buckets, - Node* const occupancy); + const TNode<Object> key, + const TNode<IntPtrT> hash, + const TNode<IntPtrT> number_of_buckets, + const TNode<IntPtrT> occupancy); // Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or // Map.prototype.values() iterator. The iterator is assumed to satisfy @@ -727,11 +732,97 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false); void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false); + + // Builds code that finds OrderedHashTable entry for a key with hash code + // {hash} with using the comparison code generated by {key_compare}. The code + // jumps to {entry_found} if the key is found, or to {not_found} if the key + // was not found. In the {entry_found} branch, the variable + // entry_start_position will be bound to the index of the entry (relative to + // OrderedHashTable::kHashTableStartIndex). + // + // The {CollectionType} template parameter stands for the particular instance + // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. + template <typename CollectionType> + void FindOrderedHashTableEntry( + Node* table, Node* hash, + const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, + Variable* entry_start_position, Label* entry_found, Label* not_found); }; +template <typename CollectionType> +void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry( + Node* table, Node* hash, + const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, + Variable* entry_start_position, Label* entry_found, Label* not_found) { + // Get the index of the bucket. + TNode<IntPtrT> const number_of_buckets = + SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), CollectionType::NumberOfBucketsIndex()))); + TNode<WordT> const bucket = + WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); + TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), bucket, + CollectionType::HashTableStartIndex() * kTaggedSize))); + + // Walk the bucket chain. + TNode<IntPtrT> entry_start; + Label if_key_found(this); + { + TVARIABLE(IntPtrT, var_entry, first_entry); + Label loop(this, {&var_entry, entry_start_position}), + continue_next_entry(this); + Goto(&loop); + BIND(&loop); + + // If the entry index is the not-found sentinel, we are done. + GotoIf(IntPtrEqual(var_entry.value(), + IntPtrConstant(CollectionType::kNotFound)), + not_found); + + // Make sure the entry index is within range. + CSA_ASSERT( + this, + UintPtrLessThan( + var_entry.value(), + SmiUntag(SmiAdd( + CAST(UnsafeLoadFixedArrayElement( + CAST(table), CollectionType::NumberOfElementsIndex())), + CAST(UnsafeLoadFixedArrayElement( + CAST(table), + CollectionType::NumberOfDeletedElementsIndex())))))); + + // Compute the index of the entry relative to kHashTableStartIndex. + entry_start = + IntPtrAdd(IntPtrMul(var_entry.value(), + IntPtrConstant(CollectionType::kEntrySize)), + number_of_buckets); + + // Load the key from the entry. + TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement( + CAST(table), entry_start, + CollectionType::HashTableStartIndex() * kTaggedSize); + + key_compare(candidate_key, &if_key_found, &continue_next_entry); + + BIND(&continue_next_entry); + // Load the index of the next entry in the bucket chain. + var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), entry_start, + (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) * + kTaggedSize))); + + Goto(&loop); + } + + BIND(&if_key_found); + entry_start_position->Bind(entry_start); + Goto(entry_found); +} + template <typename IteratorType> -Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( - SloppyTNode<Context> context, int map_index, Node* collection) { +TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( + TNode<Context> const context, int map_index, + TNode<HeapObject> const collection) { TNode<Object> const table = LoadObjectField(collection, JSCollection::kTableOffset); TNode<NativeContext> const native_context = LoadNativeContext(context); @@ -749,9 +840,8 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( return iterator; } -TNode<Object> CollectionsBuiltinsAssembler::AllocateTable( - Variant variant, TNode<Context> context, - TNode<IntPtrT> at_least_space_for) { +TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable( + Variant variant, TNode<IntPtrT> at_least_space_for) { return CAST((variant == kMap || variant == kWeakMap) ? AllocateOrderedHashTable<OrderedHashMap>() : AllocateOrderedHashTable<OrderedHashSet>()); @@ -778,7 +868,7 @@ TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) { } TNode<Smi> CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw( - SloppyTNode<HeapObject> const key) { + const TNode<HeapObject> key) { TNode<ExternalReference> const function_addr = ExternalConstant(ExternalReference::get_or_create_hash_raw()); TNode<ExternalReference> const isolate_ptr = @@ -787,15 +877,15 @@ TNode<Smi> CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw( MachineType type_ptr = MachineType::Pointer(); MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_ptr, isolate_ptr), - std::make_pair(type_tagged, key)); + TNode<Smi> result = CAST(CallCFunction(function_addr, type_tagged, + std::make_pair(type_ptr, isolate_ptr), + std::make_pair(type_tagged, key))); - return CAST(result); + return result; } TNode<IntPtrT> CollectionsBuiltinsAssembler::CallGetHashRaw( - SloppyTNode<HeapObject> const key) { + const TNode<HeapObject> key) { TNode<ExternalReference> const function_addr = ExternalConstant(ExternalReference::orderedhashmap_gethash_raw()); TNode<ExternalReference> const isolate_ptr = @@ -804,15 +894,15 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::CallGetHashRaw( MachineType type_ptr = MachineType::Pointer(); MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_ptr, isolate_ptr), - std::make_pair(type_tagged, key)); + TNode<Smi> result = CAST(CallCFunction(function_addr, type_tagged, + std::make_pair(type_ptr, isolate_ptr), + std::make_pair(type_tagged, key))); return SmiUntag(result); } TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash( - SloppyTNode<HeapObject> const key) { + const TNode<HeapObject> key) { TVARIABLE(IntPtrT, var_hash); Label if_receiver(this), if_other(this), done(this); Branch(IsJSReceiver(key), &if_receiver, &if_other); @@ -833,9 +923,10 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::GetHash( return var_hash.value(); } -void CollectionsBuiltinsAssembler::SameValueZeroSmi( - SloppyTNode<Smi> key_smi, SloppyTNode<Object> candidate_key, Label* if_same, - Label* if_not_same) { +void CollectionsBuiltinsAssembler::SameValueZeroSmi(TNode<Smi> key_smi, + TNode<Object> candidate_key, + Label* if_same, + Label* if_not_same) { // If the key is the same, we are done. GotoIf(TaggedEqual(candidate_key, key_smi), if_same); @@ -862,7 +953,7 @@ void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid( DCHECK(isolate()->heap()->map_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } @@ -921,7 +1012,7 @@ void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid( DCHECK(isolate()->heap()->set_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } @@ -998,15 +1089,15 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList( TNode<Map> array_map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); TNode<JSArray> array = - AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr, + AllocateJSArray(kind, array_map, size, SmiTag(size), {}, INTPTR_PARAMETERS, kAllowLargeObjectAllocation); TNode<FixedArray> elements = CAST(LoadElements(array)); const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode<IntPtrT> first_to_element_offset = - ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0); - VARIABLE( - var_offset, MachineType::PointerRepresentation(), + ElementOffsetFromIndex(IntPtrConstant(0), kind, 0); + TVARIABLE( + IntPtrT, var_offset, IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset))); TVARIABLE(IntPtrT, var_index, index); VariableList vars({&var_index, &var_offset}, zone()); @@ -1053,8 +1144,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList( { // Increment the array offset and continue the loop to the next entry. var_index = cur_index; - var_offset.Bind( - IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize))); + var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)); Goto(&loop); } } @@ -1111,15 +1201,15 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList( TNode<Map> array_map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); TNode<JSArray> array = - AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr, + AllocateJSArray(kind, array_map, size, SmiTag(size), {}, INTPTR_PARAMETERS, kAllowLargeObjectAllocation); TNode<FixedArray> elements = CAST(LoadElements(array)); const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode<IntPtrT> first_to_element_offset = - ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0); - VARIABLE( - var_offset, MachineType::PointerRepresentation(), + ElementOffsetFromIndex(IntPtrConstant(0), kind, 0); + TVARIABLE( + IntPtrT, var_offset, IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset))); TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); Label done(this), finalize(this, {&var_index}), @@ -1139,7 +1229,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList( Store(elements, var_offset.value(), entry_key); var_index = cur_index; - var_offset.Bind(IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize))); + var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)); Goto(&loop); } @@ -1164,13 +1254,13 @@ TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) { template <typename CollectionType> void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey( - Node* table, SloppyTNode<Smi> smi_key, Variable* result, Label* entry_found, - Label* not_found) { + TNode<CollectionType> table, TNode<Smi> smi_key, TVariable<IntPtrT>* result, + Label* entry_found, Label* not_found) { TNode<IntPtrT> const key_untagged = SmiUntag(smi_key); TNode<IntPtrT> const hash = ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged)); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry<CollectionType>( table, hash, [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) { @@ -1181,28 +1271,26 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey( template <typename CollectionType> void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey( - SloppyTNode<Context> context, Node* table, SloppyTNode<String> key_tagged, - Variable* result, Label* entry_found, Label* not_found) { - TNode<IntPtrT> const hash = ComputeStringHash(context, key_tagged); + TNode<CollectionType> table, TNode<String> key_tagged, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) { + TNode<IntPtrT> const hash = ComputeStringHash(key_tagged); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry<CollectionType>( table, hash, [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) { - SameValueZeroString(context, key_tagged, other_key, if_same, - if_not_same); + SameValueZeroString(key_tagged, other_key, if_same, if_not_same); }, result, entry_found, not_found); } template <typename CollectionType> void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey( - SloppyTNode<Context> context, Node* table, - SloppyTNode<HeapNumber> key_heap_number, Variable* result, - Label* entry_found, Label* not_found) { + TNode<CollectionType> table, TNode<HeapNumber> key_heap_number, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) { TNode<IntPtrT> const hash = CallGetHashRaw(key_heap_number); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; TNode<Float64T> const key_float = LoadHeapNumberValue(key_heap_number); FindOrderedHashTableEntry<CollectionType>( table, hash, @@ -1214,36 +1302,36 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey( template <typename CollectionType> void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey( - SloppyTNode<Context> context, Node* table, Node* key, Variable* result, - Label* entry_found, Label* not_found) { - TNode<IntPtrT> const hash = CallGetHashRaw(key); + TNode<CollectionType> table, TNode<BigInt> key_big_int, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) { + TNode<IntPtrT> const hash = CallGetHashRaw(key_big_int); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry<CollectionType>( table, hash, [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) { - SameValueZeroBigInt(key, other_key, if_same, if_not_same); + SameValueZeroBigInt(key_big_int, other_key, if_same, if_not_same); }, result, entry_found, not_found); } template <typename CollectionType> void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey( - SloppyTNode<Context> context, Node* table, SloppyTNode<HeapObject> key, - Variable* result, Label* entry_found, Label* not_found) { - TNode<IntPtrT> const hash = GetHash(key); + TNode<CollectionType> table, TNode<HeapObject> key_heap_object, + TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) { + TNode<IntPtrT> const hash = GetHash(key_heap_object); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry<CollectionType>( table, hash, [&](TNode<Object> other_key, Label* if_same, Label* if_not_same) { - Branch(TaggedEqual(key, other_key), if_same, if_not_same); + Branch(TaggedEqual(key_heap_object, other_key), if_same, if_not_same); }, result, entry_found, not_found); } TNode<IntPtrT> CollectionsBuiltinsAssembler::ComputeStringHash( - TNode<Context> context, TNode<String> string_key) { + TNode<String> string_key) { TVARIABLE(IntPtrT, var_result); Label hash_not_computed(this), done(this, &var_result); @@ -1261,25 +1349,23 @@ TNode<IntPtrT> CollectionsBuiltinsAssembler::ComputeStringHash( } void CollectionsBuiltinsAssembler::SameValueZeroString( - SloppyTNode<Context> context, SloppyTNode<String> key_string, - SloppyTNode<Object> candidate_key, Label* if_same, Label* if_not_same) { + TNode<String> key_string, TNode<Object> candidate_key, Label* if_same, + Label* if_not_same) { // If the candidate is not a string, the keys are not equal. GotoIf(TaggedIsSmi(candidate_key), if_not_same); GotoIfNot(IsString(CAST(candidate_key)), if_not_same); - Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, context, key_string, - candidate_key), + Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, NoContextConstant(), + key_string, candidate_key), TrueConstant()), if_same, if_not_same); } -void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key, - Node* candidate_key, - Label* if_same, - Label* if_not_same) { - CSA_ASSERT(this, IsBigInt(key)); +void CollectionsBuiltinsAssembler::SameValueZeroBigInt( + TNode<BigInt> key, TNode<Object> candidate_key, Label* if_same, + Label* if_not_same) { GotoIf(TaggedIsSmi(candidate_key), if_not_same); - GotoIfNot(IsBigInt(candidate_key), if_not_same); + GotoIfNot(IsBigInt(CAST(candidate_key)), if_not_same); Branch(TaggedEqual(CallRuntime(Runtime::kBigIntEqualToBigInt, NoContextConstant(), key, candidate_key), @@ -1288,8 +1374,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key, } void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber( - SloppyTNode<Float64T> key_float, SloppyTNode<Object> candidate_key, - Label* if_same, Label* if_not_same) { + TNode<Float64T> key_float, TNode<Object> candidate_key, Label* if_same, + Label* if_not_same) { Label if_smi(this), if_keyisnan(this); GotoIf(TaggedIsSmi(candidate_key), &if_smi); @@ -1339,20 +1425,20 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) { IntPtrConstant(OrderedHashMap::kClearedTableSentinel)), &return_zero); - VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0)); - VARIABLE(var_index, MachineRepresentation::kTagged, index); + TVARIABLE(IntPtrT, var_i, IntPtrConstant(0)); + TVARIABLE(Smi, var_index, index); Label loop(this, {&var_i, &var_index}); Goto(&loop); BIND(&loop); { - Node* i = var_i.value(); + TNode<IntPtrT> i = var_i.value(); GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index); STATIC_ASSERT(OrderedHashMap::RemovedHolesIndex() == OrderedHashSet::RemovedHolesIndex()); TNode<Smi> removed_index = CAST(LoadFixedArrayElement( CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kTaggedSize)); GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index); - Decrement(&var_index, 1, SMI_PARAMETERS); + Decrement(&var_index); Increment(&var_i); Goto(&loop); } @@ -1368,7 +1454,7 @@ template <typename TableType> std::pair<TNode<TableType>, TNode<IntPtrT>> CollectionsBuiltinsAssembler::Transition( TNode<TableType> const table, TNode<IntPtrT> const index, - UpdateInTransition const& update_in_transition) { + UpdateInTransition<TableType> const& update_in_transition) { TVARIABLE(IntPtrT, var_index, index); TVARIABLE(TableType, var_table, table); Label if_done(this), if_transition(this, Label::kDeferred); @@ -1413,7 +1499,8 @@ CollectionsBuiltinsAssembler::TransitionAndUpdate( return Transition<TableType>( CAST(LoadObjectField(iterator, IteratorType::kTableOffset)), LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset), - [this, iterator](Node* const table, Node* const index) { + [this, iterator](const TNode<TableType> table, + const TNode<IntPtrT> index) { // Update the {iterator} with the new state. StoreObjectField(iterator, IteratorType::kTableOffset, table); StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset, @@ -1460,13 +1547,14 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table, } TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get"); - TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode<Object> const table = + LoadObjectField<Object>(CAST(receiver), JSMap::kTableOffset); TNode<Smi> index = CAST( CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key)); @@ -1485,13 +1573,14 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has"); - TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode<Object> const table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); TNode<Smi> index = CAST( CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key)); @@ -1506,17 +1595,18 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { Return(FalseConstant()); } -Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) { - VARIABLE(result, MachineRepresentation::kTagged, key); +const TNode<Object> CollectionsBuiltinsAssembler::NormalizeNumberKey( + const TNode<Object> key) { + TVARIABLE(Object, result, key); Label done(this); GotoIf(TaggedIsSmi(key), &done); - GotoIfNot(IsHeapNumber(key), &done); - TNode<Float64T> const number = LoadHeapNumberValue(key); + GotoIfNot(IsHeapNumber(CAST(key)), &done); + TNode<Float64T> const number = LoadHeapNumberValue(CAST(key)); GotoIfNot(Float64Equal(number, Float64Constant(0.0)), &done); // We know the value is zero, so we take the key to be Smi 0. // Another option would be to normalize to Smi here. - result.Bind(SmiConstant(0)); + result = SmiConstant(0); Goto(&done); BIND(&done); @@ -1524,25 +1614,23 @@ Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) { } TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set"); key = NormalizeNumberKey(key); TNode<OrderedHashMap> const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex<OrderedHashMap>(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex<OrderedHashMap>( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(&entry_found); // If we found the entry, we just store the value there. @@ -1561,18 +1649,18 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { &add_entry); // Otherwise, go to runtime to compute the hash code. - entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key))); + entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key))); Goto(&add_entry); } BIND(&add_entry); - VARIABLE(number_of_buckets, MachineType::PointerRepresentation()); - VARIABLE(occupancy, MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, number_of_buckets); + TVARIABLE(IntPtrT, occupancy); TVARIABLE(OrderedHashMap, table_var, table); { // Check we have enough space for the entry. - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashMap::NumberOfBucketsIndex())))); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table, OrderedHashMap::NumberOfBucketsIndex()))); STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2); TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1); @@ -1580,20 +1668,21 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()))); TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField( table, OrderedHashMap::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted)); + occupancy = IntPtrAdd(number_of_elements, number_of_deleted); GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); // We do not have enough space, grow the table and reload the relevant // fields. CallRuntime(Runtime::kMapGrow, context, receiver); - table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset)); - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashMap::NumberOfBucketsIndex())))); + table_var = + LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))); TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashMap::NumberOfElementsOffset()))); TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashMap::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted)); + occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); Goto(&store_new_entry); } BIND(&store_new_entry); @@ -1605,15 +1694,16 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { } void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry( - TNode<OrderedHashMap> const table, Node* const key, Node* const value, - Node* const hash, Node* const number_of_buckets, Node* const occupancy) { - TNode<WordT> const bucket = + TNode<OrderedHashMap> const table, const TNode<Object> key, + const TNode<Object> value, const TNode<IntPtrT> hash, + const TNode<IntPtrT> number_of_buckets, const TNode<IntPtrT> occupancy) { + TNode<IntPtrT> const bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement( table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize)); // Store the entry elements. - TNode<WordT> const entry_start = IntPtrAdd( + TNode<IntPtrT> const entry_start = IntPtrAdd( IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)), number_of_buckets); UnsafeStoreFixedArrayElement( @@ -1642,23 +1732,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry( } TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.delete"); TNode<OrderedHashMap> const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex<OrderedHashMap>(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex<OrderedHashMap>( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(¬_found); Return(FalseConstant()); @@ -1703,24 +1791,22 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add"); key = NormalizeNumberKey(key); TNode<OrderedHashSet> const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex<OrderedHashSet>(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex<OrderedHashSet>( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(&entry_found); // The entry was found, there is nothing to do. @@ -1735,18 +1821,18 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { &add_entry); // Otherwise, go to runtime to compute the hash code. - entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key))); + entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key))); Goto(&add_entry); } BIND(&add_entry); - VARIABLE(number_of_buckets, MachineType::PointerRepresentation()); - VARIABLE(occupancy, MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, number_of_buckets); + TVARIABLE(IntPtrT, occupancy); TVARIABLE(OrderedHashSet, table_var, table); { // Check we have enough space for the entry. - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashSet::NumberOfBucketsIndex())))); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table, OrderedHashSet::NumberOfBucketsIndex()))); STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2); TNode<WordT> const capacity = WordShl(number_of_buckets.value(), 1); @@ -1754,20 +1840,21 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()))); TNode<IntPtrT> const number_of_deleted = SmiUntag(CAST(LoadObjectField( table, OrderedHashSet::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted)); + occupancy = IntPtrAdd(number_of_elements, number_of_deleted); GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); // We do not have enough space, grow the table and reload the relevant // fields. CallRuntime(Runtime::kSetGrow, context, receiver); - table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset)); - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashSet::NumberOfBucketsIndex())))); + table_var = + LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))); TNode<IntPtrT> const new_number_of_elements = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashSet::NumberOfElementsOffset()))); TNode<IntPtrT> const new_number_of_deleted = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted)); + occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); Goto(&store_new_entry); } BIND(&store_new_entry); @@ -1779,15 +1866,16 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { } void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry( - TNode<OrderedHashSet> const table, Node* const key, Node* const hash, - Node* const number_of_buckets, Node* const occupancy) { - TNode<WordT> const bucket = + TNode<OrderedHashSet> const table, const TNode<Object> key, + const TNode<IntPtrT> hash, const TNode<IntPtrT> number_of_buckets, + const TNode<IntPtrT> occupancy) { + TNode<IntPtrT> const bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement( table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize)); // Store the entry elements. - TNode<WordT> const entry_start = IntPtrAdd( + TNode<IntPtrT> const entry_start = IntPtrAdd( IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)), number_of_buckets); UnsafeStoreFixedArrayElement( @@ -1812,23 +1900,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry( } TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.delete"); TNode<OrderedHashSet> const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField<OrderedHashSet>(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex<OrderedHashSet>(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex<OrderedHashSet>( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(¬_found); Return(FalseConstant()); @@ -1869,29 +1955,30 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.entries"); Return(AllocateJSCollectionIterator<JSMapIterator>( - context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "get Map.prototype.size"); TNode<OrderedHashMap> const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField<OrderedHashMap>(CAST(receiver), JSMap::kTableOffset); Return(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())); } TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Map.prototype.forEach"; - Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* const context = Parameter(Descriptor::kContext); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + CodeStubArguments args(this, argc); TNode<Object> const receiver = args.GetReceiver(); TNode<Object> const callback = args.GetOptionalArgumentValue(0); TNode<Object> const this_arg = args.GetOptionalArgumentValue(1); @@ -1914,8 +2001,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { // the {receiver} while we're iterating. TNode<IntPtrT> index = var_index.value(); TNode<OrderedHashMap> table = var_table.value(); - std::tie(table, index) = - Transition<OrderedHashMap>(table, index, [](Node*, Node*) {}); + std::tie(table, index) = Transition<OrderedHashMap>( + table, index, [](const TNode<OrderedHashMap>, const TNode<IntPtrT>) {}); // Read the next entry from the {table}, skipping holes. TNode<Object> entry_key; @@ -1951,31 +2038,32 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys"); Return(AllocateJSCollectionIterator<JSMapIterator>( - context, Context::MAP_KEY_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_KEY_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.values"); Return(AllocateJSCollectionIterator<JSMapIterator>( - context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Map Iterator.prototype.next"; - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // Ensure that the {receiver} is actually a JSMapIterator. Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid); - TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver); + TNode<Uint16T> const receiver_instance_type = + LoadInstanceType(CAST(receiver)); GotoIf( InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE), &if_receiver_valid); @@ -1989,8 +2077,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&if_receiver_valid); // Check if the {receiver} is exhausted. - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label return_value(this, {&var_done, &var_value}), return_entry(this), return_end(this, Label::kDeferred); @@ -2007,22 +2095,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { NextSkipHoles<OrderedHashMap>(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset, SmiTag(index)); - var_value.Bind(entry_key); - var_done.Bind(FalseConstant()); + var_value = entry_key; + var_done = FalseConstant(); // Check how to return the {key} (depending on {receiver} type). GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE), &return_value); - var_value.Bind(LoadFixedArrayElement( + var_value = LoadFixedArrayElement( table, entry_start_position, (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) * - kTaggedSize)); + kTaggedSize); Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE), &return_value, &return_entry); BIND(&return_entry); { - Node* result = + TNode<JSObject> result = AllocateJSIteratorResultForEntry(context, entry_key, var_value.value()); Return(result); } @@ -2043,23 +2131,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has"); - TNode<Object> const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode<Object> const table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position, MachineType::PointerRepresentation(), - IntPtrConstant(0)); - VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0)); Label if_key_smi(this), if_key_string(this), if_key_heap_number(this), if_key_bigint(this), entry_found(this), not_found(this), done(this); GotoIf(TaggedIsSmi(key), &if_key_smi); - TNode<Map> key_map = LoadMap(key); + TNode<Map> key_map = LoadMap(CAST(key)); TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map); GotoIf(IsStringInstanceType(key_instance_type), &if_key_string); @@ -2067,30 +2154,34 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint); FindOrderedHashTableEntryForOtherKey<OrderedHashSet>( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, ¬_found); BIND(&if_key_smi); { FindOrderedHashTableEntryForSmiKey<OrderedHashSet>( - table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_string); { FindOrderedHashTableEntryForStringKey<OrderedHashSet>( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_heap_number); { FindOrderedHashTableEntryForHeapNumberKey<OrderedHashSet>( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_bigint); { FindOrderedHashTableEntryForBigIntKey<OrderedHashSet>( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&entry_found); @@ -2101,29 +2192,30 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.entries"); Return(AllocateJSCollectionIterator<JSSetIterator>( - context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "get Set.prototype.size"); TNode<OrderedHashSet> const table = - CAST(LoadObjectField(receiver, JSSet::kTableOffset)); + LoadObjectField<OrderedHashSet>(CAST(receiver), JSSet::kTableOffset); Return(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())); } TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Set.prototype.forEach"; - Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* const context = Parameter(Descriptor::kContext); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + CodeStubArguments args(this, argc); TNode<Object> const receiver = args.GetReceiver(); TNode<Object> const callback = args.GetOptionalArgumentValue(0); TNode<Object> const this_arg = args.GetOptionalArgumentValue(1); @@ -2146,12 +2238,12 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { // the {receiver} while we're iterating. TNode<IntPtrT> index = var_index.value(); TNode<OrderedHashSet> table = var_table.value(); - std::tie(table, index) = - Transition<OrderedHashSet>(table, index, [](Node*, Node*) {}); + std::tie(table, index) = Transition<OrderedHashSet>( + table, index, [](const TNode<OrderedHashSet>, const TNode<IntPtrT>) {}); // Read the next entry from the {table}, skipping holes. - Node* entry_key; - Node* entry_start_position; + TNode<Object> entry_key; + TNode<IntPtrT> entry_start_position; std::tie(entry_key, entry_start_position, index) = NextSkipHoles<OrderedHashSet>(table, index, &done_loop); @@ -2176,23 +2268,24 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.values"); Return(AllocateJSCollectionIterator<JSSetIterator>( - context, Context::SET_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::SET_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Set Iterator.prototype.next"; - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // Ensure that the {receiver} is actually a JSSetIterator. Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid); - TNode<Uint16T> const receiver_instance_type = LoadInstanceType(receiver); + TNode<Uint16T> const receiver_instance_type = + LoadInstanceType(CAST(receiver)); GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE), &if_receiver_valid); Branch( @@ -2204,8 +2297,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&if_receiver_valid); // Check if the {receiver} is exhausted. - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label return_value(this, {&var_done, &var_value}), return_entry(this), return_end(this, Label::kDeferred); @@ -2216,14 +2309,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(receiver)); // Read the next entry from the {table}, skipping holes. - Node* entry_key; - Node* entry_start_position; + TNode<Object> entry_key; + TNode<IntPtrT> entry_start_position; std::tie(entry_key, entry_start_position, index) = NextSkipHoles<OrderedHashSet>(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset, SmiTag(index)); - var_value.Bind(entry_key); - var_done.Bind(FalseConstant()); + var_value = entry_key; + var_done = FalseConstant(); // Check how to return the {key} (depending on {receiver} type). Branch(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE), @@ -2231,8 +2324,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&return_entry); { - Node* result = AllocateJSIteratorResultForEntry(context, var_value.value(), - var_value.value()); + TNode<JSObject> result = AllocateJSIteratorResultForEntry( + context, var_value.value(), var_value.value()); Return(result); } @@ -2253,14 +2346,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { template <typename CollectionType> void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex( - Node* const table, Node* const key, Node* const context, Variable* result, - Label* if_entry_found, Label* if_not_found) { + const TNode<CollectionType> table, const TNode<Object> key, + TVariable<IntPtrT>* result, Label* if_entry_found, Label* if_not_found) { Label if_key_smi(this), if_key_string(this), if_key_heap_number(this), if_key_bigint(this); GotoIf(TaggedIsSmi(key), &if_key_smi); - TNode<Map> key_map = LoadMap(key); + TNode<Map> key_map = LoadMap(CAST(key)); TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map); GotoIf(IsStringInstanceType(key_instance_type), &if_key_string); @@ -2268,44 +2361,42 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex( GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint); FindOrderedHashTableEntryForOtherKey<CollectionType>( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); BIND(&if_key_smi); { FindOrderedHashTableEntryForSmiKey<CollectionType>( - table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_string); { FindOrderedHashTableEntryForStringKey<CollectionType>( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_heap_number); { FindOrderedHashTableEntryForHeapNumberKey<CollectionType>( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_bigint); { FindOrderedHashTableEntryForBigIntKey<CollectionType>( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } } TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) { - Node* const table = Parameter(Descriptor::kTable); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<OrderedHashMap> table = CAST(Parameter(Descriptor::kTable)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); - VARIABLE(entry_start_position, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0)); Label entry_found(this), not_found(this); TryLookupOrderedHashTableIndex<OrderedHashMap>( - table, key, context, &entry_start_position, &entry_found, ¬_found); + table, key, &entry_start_position, &entry_found, ¬_found); BIND(&entry_found); Return(SmiTag(entry_start_position.value())); @@ -2324,8 +2415,8 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode<Object> key, TNode<Object> value, TNode<IntPtrT> number_of_elements); - TNode<Object> AllocateTable(Variant variant, TNode<Context> context, - TNode<IntPtrT> at_least_space_for) override; + TNode<HeapObject> AllocateTable(Variant variant, + TNode<IntPtrT> at_least_space_for) override; // Generates and sets the identity for a JSRececiver. TNode<Smi> CreateIdentityHash(TNode<Object> receiver); @@ -2390,9 +2481,8 @@ void WeakCollectionsBuiltinsAssembler::AddEntry( SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER); } -TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable( - Variant variant, TNode<Context> context, - TNode<IntPtrT> at_least_space_for) { +TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable( + Variant variant, TNode<IntPtrT> at_least_space_for) { // See HashTable::New(). CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for)); @@ -2446,8 +2536,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex( TVARIABLE(IntPtrT, var_entry, WordAnd(key_hash, entry_mask)); TVARIABLE(IntPtrT, var_count, IntPtrConstant(0)); - Variable* loop_vars[] = {&var_count, &var_entry}; - Label loop(this, arraysize(loop_vars), loop_vars), if_found(this); + Label loop(this, {&var_count, &var_entry}), if_found(this); Goto(&loop); BIND(&loop); TNode<IntPtrT> key_index; @@ -2631,9 +2720,9 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) { } TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label return_undefined(this); @@ -2653,9 +2742,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) { } TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label return_false(this); @@ -2817,9 +2906,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) { } TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label return_false(this); diff --git a/chromium/v8/src/builtins/builtins-collections-gen.h b/chromium/v8/src/builtins/builtins-collections-gen.h index 2bde108e9ae..a132557e3cd 100644 --- a/chromium/v8/src/builtins/builtins-collections-gen.h +++ b/chromium/v8/src/builtins/builtins-collections-gen.h @@ -11,13 +11,13 @@ namespace v8 { namespace internal { void BranchIfIterableWithOriginalKeyOrValueMapIterator( - compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable, - compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true, + compiler::CodeAssemblerState* state, TNode<Object> iterable, + TNode<Context> context, compiler::CodeAssemblerLabel* if_true, compiler::CodeAssemblerLabel* if_false); void BranchIfIterableWithOriginalValueSetIterator( - compiler::CodeAssemblerState* state, compiler::TNode<Object> iterable, - compiler::TNode<Context> context, compiler::CodeAssemblerLabel* if_true, + compiler::CodeAssemblerState* state, TNode<Object> iterable, + TNode<Context> context, compiler::CodeAssemblerLabel* if_true, compiler::CodeAssemblerLabel* if_false); } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-console-gen.cc b/chromium/v8/src/builtins/builtins-console-gen.cc index 1d6a22f6111..8a4c8b83da3 100644 --- a/chromium/v8/src/builtins/builtins-console-gen.cc +++ b/chromium/v8/src/builtins/builtins-console-gen.cc @@ -15,15 +15,13 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) { Label runtime(this); Label out(this); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode<Int32T> argc = UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); - Node* context = Parameter(Descriptor::kContext); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget)); GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); BranchIfToBooleanIsTrue(args.AtIndex(0), &out, &runtime); BIND(&out); args.PopAndReturn(UndefinedConstant()); diff --git a/chromium/v8/src/builtins/builtins-console.cc b/chromium/v8/src/builtins/builtins-console.cc index 28c9261ed41..bc743b6e70a 100644 --- a/chromium/v8/src/builtins/builtins-console.cc +++ b/chromium/v8/src/builtins/builtins-console.cc @@ -39,8 +39,7 @@ namespace internal { namespace { void ConsoleCall( - Isolate* isolate, - internal::BuiltinArguments& args, // NOLINT(runtime/references) + Isolate* isolate, const internal::BuiltinArguments& args, void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&, const v8::debug::ConsoleContext&)) { CHECK(!isolate->has_pending_exception()); diff --git a/chromium/v8/src/builtins/builtins-constructor-gen.cc b/chromium/v8/src/builtins/builtins-constructor-gen.cc index 856718cedfb..bc03e86f791 100644 --- a/chromium/v8/src/builtins/builtins-constructor-gen.cc +++ b/chromium/v8/src/builtins/builtins-constructor-gen.cc @@ -57,12 +57,11 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) { using Node = compiler::Node; TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { - Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo); - Node* feedback_cell = Parameter(Descriptor::kFeedbackCell); - Node* context = Parameter(Descriptor::kContext); - - CSA_ASSERT(this, IsFeedbackCell(feedback_cell)); - CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info)); + TNode<SharedFunctionInfo> shared_function_info = + CAST(Parameter(Descriptor::kSharedFunctionInfo)); + TNode<FeedbackCell> feedback_cell = + CAST(Parameter(Descriptor::kFeedbackCell)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1); @@ -90,9 +89,8 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { // The calculation of |function_map_index| must be in sync with // SharedFunctionInfo::function_map_index(). - Node* const flags = - LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset, - MachineType::Uint32()); + TNode<Uint32T> flags = LoadObjectField<Uint32T>( + shared_function_info, SharedFunctionInfo::kFlagsOffset); TNode<IntPtrT> const function_map_index = Signed(IntPtrAdd( DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags), IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX))); @@ -161,7 +159,7 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) { TailCallRuntime(Runtime::kNewObject, context, target, new_target); } -compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( SloppyTNode<Context> context, SloppyTNode<JSFunction> target, SloppyTNode<JSReceiver> new_target) { TVARIABLE(JSObject, var_obj); @@ -178,7 +176,7 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( return var_obj.value(); } -compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( SloppyTNode<Context> context, SloppyTNode<JSFunction> target, SloppyTNode<JSReceiver> new_target, Label* call_runtime) { // Verify that the new target is a JSFunction. @@ -202,17 +200,17 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset); GotoIf(TaggedNotEqual(target, new_target_constructor), call_runtime); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(HeapObject, properties); Label instantiate_map(this), allocate_properties(this); GotoIf(IsDictionaryMap(initial_map), &allocate_properties); { - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); Goto(&instantiate_map); } BIND(&allocate_properties); { - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } @@ -221,11 +219,12 @@ compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject( kNone, kWithSlackTracking); } -Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( - Node* scope_info, Node* slots_uint32, Node* context, ScopeType scope_type) { - TNode<IntPtrT> slots = Signed(ChangeUint32ToWord(slots_uint32)); - TNode<IntPtrT> size = ElementOffsetFromIndex( - slots, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::kTodoHeaderSize); +TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( + TNode<ScopeInfo> scope_info, TNode<Uint32T> slots, TNode<Context> context, + ScopeType scope_type) { + TNode<IntPtrT> slots_intptr = Signed(ChangeUint32ToWord(slots)); + TNode<IntPtrT> size = ElementOffsetFromIndex(slots_intptr, PACKED_ELEMENTS, + Context::kTodoHeaderSize); // Create a new closure from the given function info in new space TNode<Context> function_context = @@ -246,7 +245,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( StoreMapNoWriteBarrier(function_context, context_type); TNode<IntPtrT> min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS); // TODO(ishell): for now, length also includes MIN_CONTEXT_SLOTS. - TNode<IntPtrT> length = IntPtrAdd(slots, min_context_slots); + TNode<IntPtrT> length = IntPtrAdd(slots_intptr, min_context_slots); StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset, SmiTag(length)); StoreObjectFieldNoWriteBarrier(function_context, Context::kScopeInfoOffset, @@ -263,60 +262,60 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( TNode<Oddball> undefined = UndefinedConstant(); TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize); CodeStubAssembler::VariableList vars(0, zone()); - BuildFastLoop( + BuildFastLoop<IntPtrT>( vars, start_offset, size, - [=](Node* offset) { - StoreObjectFieldNoWriteBarrier( - function_context, UncheckedCast<IntPtrT>(offset), undefined); + [=](TNode<IntPtrT> offset) { + StoreObjectFieldNoWriteBarrier(function_context, offset, undefined); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); return function_context; } TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) { - Node* scope_info = Parameter(Descriptor::kScopeInfo); - Node* slots = Parameter(Descriptor::kSlots); - Node* context = Parameter(Descriptor::kContext); + TNode<ScopeInfo> scope_info = CAST(Parameter(Descriptor::kScopeInfo)); + TNode<Uint32T> slots = UncheckedCast<Uint32T>(Parameter(Descriptor::kSlots)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(EmitFastNewFunctionContext(scope_info, slots, context, ScopeType::EVAL_SCOPE)); } TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) { - Node* scope_info = Parameter(Descriptor::kScopeInfo); - Node* slots = Parameter(Descriptor::kSlots); - Node* context = Parameter(Descriptor::kContext); + TNode<ScopeInfo> scope_info = CAST(Parameter(Descriptor::kScopeInfo)); + TNode<Uint32T> slots = UncheckedCast<Uint32T>(Parameter(Descriptor::kSlots)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(EmitFastNewFunctionContext(scope_info, slots, context, ScopeType::FUNCTION_SCOPE)); } -Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( - Node* feedback_vector, Node* slot, Node* pattern, Node* flags, - Node* context) { +TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( + TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot, + TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context) { Label call_runtime(this, Label::kDeferred), end(this); - GotoIf(IsUndefined(feedback_vector), &call_runtime); + GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(JSRegExp, result); + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); TNode<Object> literal_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(literal_site), &call_runtime); { - Node* boilerplate = literal_site; - CSA_ASSERT(this, IsJSRegExp(boilerplate)); + TNode<JSRegExp> boilerplate = CAST(literal_site); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize; TNode<HeapObject> copy = Allocate(size); for (int offset = 0; offset < size; offset += kTaggedSize) { TNode<Object> value = LoadObjectField(boilerplate, offset); StoreObjectFieldNoWriteBarrier(copy, offset, value); } - result.Bind(copy); + result = CAST(copy); Goto(&end); } BIND(&call_runtime); { - result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, - feedback_vector, SmiTag(slot), pattern, flags)); + result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context, + maybe_feedback_vector, SmiTag(Signed(slot)), + pattern, flags)); Goto(&end); } @@ -325,25 +324,26 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( } TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* pattern = Parameter(Descriptor::kPattern); - Node* flags = Parameter(Descriptor::kFlags); - Node* context = Parameter(Descriptor::kContext); - Node* result = - EmitCreateRegExpLiteral(feedback_vector, slot, pattern, flags, context); + TNode<HeapObject> maybe_feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern)); + TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<JSRegExp> result = EmitCreateRegExpLiteral(maybe_feedback_vector, slot, + pattern, flags, context); Return(result); } -Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( - Node* feedback_vector, Node* slot, Node* context, Label* call_runtime, +TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<Context> context, Label* call_runtime, AllocationSiteMode allocation_site_mode) { Label zero_capacity(this), cow_elements(this), fast_elements(this), return_result(this); - VARIABLE(result, MachineRepresentation::kTagged); TNode<Object> maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime); TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site); @@ -358,10 +358,12 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( } TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* constant_elements = Parameter(Descriptor::kConstantElements); - Node* context = Parameter(Descriptor::kContext); + TNode<FeedbackVector> feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode<ArrayBoilerplateDescription> constant_elements = + CAST(Parameter(Descriptor::kConstantElements)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label call_runtime(this, Label::kDeferred); Return(EmitCreateShallowArrayLiteral(feedback_vector, slot, context, &call_runtime, @@ -373,16 +375,18 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) { int const flags = AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow; Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector, - SmiTag(slot), constant_elements, SmiConstant(flags))); + SmiTag(Signed(slot)), constant_elements, + SmiConstant(flags))); } } -Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( - Node* feedback_vector, Node* slot, Node* context) { +TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<Context> context) { // Array literals always have a valid AllocationSite to properly track // elements transitions. TNode<Object> maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); TVARIABLE(AllocationSite, allocation_site); Label create_empty_array(this), @@ -396,7 +400,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( BIND(&initialize_allocation_site); { allocation_site = - CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot)); + CreateAllocationSiteInFeedbackVector(feedback_vector, slot); Goto(&create_empty_array); } @@ -418,17 +422,20 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( } TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* context = Parameter(Descriptor::kContext); - Node* result = EmitCreateEmptyArrayLiteral(feedback_vector, slot, context); + TNode<FeedbackVector> feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<JSArray> result = + EmitCreateEmptyArrayLiteral(feedback_vector, slot, context); Return(result); } -Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( - Node* feedback_vector, Node* slot, Label* call_runtime) { +TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + Label* call_runtime) { TNode<Object> maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime); TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site); @@ -436,7 +443,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TNode<Map> boilerplate_map = LoadMap(boilerplate); CSA_ASSERT(this, IsJSObjectMap(boilerplate_map)); - VARIABLE(var_properties, MachineRepresentation::kTagged); + TVARIABLE(FixedArray, var_properties); { TNode<Uint32T> bit_field_3 = LoadMapBitField3(boilerplate_map); GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime); @@ -447,8 +454,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( BIND(&if_dictionary); { Comment("Copy dictionary properties"); - var_properties.Bind(CopyNameDictionary( - CAST(LoadSlowProperties(boilerplate)), call_runtime)); + var_properties = CopyNameDictionary(CAST(LoadSlowProperties(boilerplate)), + call_runtime); // Slow objects have no in-object properties. Goto(&done); } @@ -458,13 +465,13 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TNode<HeapObject> boilerplate_properties = LoadFastProperties(boilerplate); GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime); - var_properties.Bind(EmptyFixedArrayConstant()); + var_properties = EmptyFixedArrayConstant(); Goto(&done); } BIND(&done); } - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(FixedArrayBase, var_elements); { // Copy the elements backing store, assuming that it's flat. Label if_empty_fixed_array(this), if_copy_elements(this), done(this); @@ -473,7 +480,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( &if_copy_elements); BIND(&if_empty_fixed_array); - var_elements.Bind(boilerplate_elements); + var_elements = boilerplate_elements; Goto(&done); BIND(&if_copy_elements); @@ -483,7 +490,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( flags |= ExtractFixedArrayFlag::kAllFixedArrays; flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly; flags |= ExtractFixedArrayFlag::kDontCopyCOW; - var_elements.Bind(CloneFixedArray(boilerplate_elements, flags)); + var_elements = CloneFixedArray(boilerplate_elements, flags); Goto(&done); BIND(&done); } @@ -563,18 +570,18 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( BIND(&continue_with_write_barrier); { Comment("Copy in-object properties slow"); - BuildFastLoop( + BuildFastLoop<IntPtrT>( offset.value(), instance_size, - [=](Node* offset) { + [=](TNode<IntPtrT> offset) { // TODO(ishell): value decompression is not necessary here. TNode<Object> field = LoadObjectField(boilerplate, offset); StoreObjectFieldNoWriteBarrier(copy, offset, field); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); Comment("Copy mutable HeapNumber values"); - BuildFastLoop( + BuildFastLoop<IntPtrT>( offset.value(), instance_size, - [=](Node* offset) { + [=](TNode<IntPtrT> offset) { TNode<Object> field = LoadObjectField(copy, offset); Label copy_heap_number(this, Label::kDeferred), continue_loop(this); // We only have to clone complex field values. @@ -593,7 +600,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( } BIND(&continue_loop); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); Goto(&done_init); } BIND(&done_init); @@ -603,29 +610,30 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) { Label call_runtime(this); - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode<IntPtrT> slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* copy = + TNode<FeedbackVector> feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode<HeapObject> copy = EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime); Return(copy); BIND(&call_runtime); - Node* object_boilerplate_description = - Parameter(Descriptor::kObjectBoilerplateDescription); - Node* flags = Parameter(Descriptor::kFlags); - Node* context = Parameter(Descriptor::kContext); + TNode<ObjectBoilerplateDescription> object_boilerplate_description = + CAST(Parameter(Descriptor::kObjectBoilerplateDescription)); + TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, - SmiTag(slot), object_boilerplate_description, flags); + SmiTag(Signed(slot)), object_boilerplate_description, flags); } // Used by the CreateEmptyObjectLiteral bytecode and the Object constructor. -Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( - Node* context) { +TNode<JSObject> ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( + TNode<Context> context) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> object_function = CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); - TNode<Map> map = CAST(LoadObjectField( - object_function, JSFunction::kPrototypeOrInitialMapOffset)); + TNode<Map> map = LoadObjectField<Map>( + object_function, JSFunction::kPrototypeOrInitialMapOffset); // Ensure that slack tracking is disabled for the map. STATIC_ASSERT(Map::kNoSlackTracking == 0); CSA_ASSERT( @@ -642,10 +650,10 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { TNode<IntPtrT> argc = ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - VARIABLE(var_result, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); Label if_subclass(this, Label::kDeferred), if_notsubclass(this), return_result(this); GotoIf(IsUndefined(new_target), &if_notsubclass); @@ -654,9 +662,8 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { BIND(&if_subclass); { - TNode<Object> result = + var_result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - var_result.Bind(result); Goto(&return_result); } @@ -672,15 +679,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { BIND(&if_newobject); { - Node* result = EmitCreateEmptyObjectLiteral(context); - var_result.Bind(result); + var_result = EmitCreateEmptyObjectLiteral(context); Goto(&return_result); } BIND(&if_toobject); { - TNode<Object> result = CallBuiltin(Builtins::kToObject, context, value); - var_result.Bind(result); + var_result = CallBuiltin(Builtins::kToObject, context, value); Goto(&return_result); } } @@ -691,13 +696,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { // ES #sec-number-constructor TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<IntPtrT> argc = ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); // 1. If no arguments were passed to this function invocation, let n be +0. - VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0)); + TVARIABLE(Number, var_n, SmiConstant(0)); Label if_nloaded(this, &var_n); GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &if_nloaded); @@ -706,14 +711,14 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { // b. If Type(prim) is BigInt, let n be the Number value for prim. // c. Otherwise, let n be prim. TNode<Object> value = args.AtIndex(0); - var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber)); + var_n = ToNumber(context, value, BigIntHandling::kConvertToNumber); Goto(&if_nloaded); BIND(&if_nloaded); { // 3. If NewTarget is undefined, return n. - Node* n_value = var_n.value(); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode<Number> n_value = var_n.value(); + TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget)); Label return_n(this), constructnumber(this, Label::kDeferred); Branch(IsUndefined(new_target), &return_n, &constructnumber); @@ -740,7 +745,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { } TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) { - Node* result = Parameter(Descriptor::kResult); + TNode<Object> result = CAST(Parameter(Descriptor::kResult)); Return(result); } diff --git a/chromium/v8/src/builtins/builtins-constructor-gen.h b/chromium/v8/src/builtins/builtins-constructor-gen.h index 9208506c79e..761a6c7adbc 100644 --- a/chromium/v8/src/builtins/builtins-constructor-gen.h +++ b/chromium/v8/src/builtins/builtins-constructor-gen.h @@ -15,21 +15,28 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler { explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context, - ScopeType scope_type); - - Node* EmitCreateRegExpLiteral(Node* feedback_vector, Node* slot, - Node* pattern, Node* flags, Node* context); - Node* EmitCreateShallowArrayLiteral(Node* feedback_vector, Node* slot, - Node* context, Label* call_runtime, - AllocationSiteMode allocation_site_mode); - - Node* EmitCreateEmptyArrayLiteral(Node* feedback_vector, Node* slot, - Node* context); - - Node* EmitCreateShallowObjectLiteral(Node* feedback_vector, Node* slot, - Label* call_runtime); - Node* EmitCreateEmptyObjectLiteral(Node* context); + TNode<Context> EmitFastNewFunctionContext(TNode<ScopeInfo> scope_info, + TNode<Uint32T> slots, + TNode<Context> context, + ScopeType scope_type); + + TNode<JSRegExp> EmitCreateRegExpLiteral( + TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot, + TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context); + + TNode<JSArray> EmitCreateShallowArrayLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<Context> context, Label* call_runtime, + AllocationSiteMode allocation_site_mode); + + TNode<JSArray> EmitCreateEmptyArrayLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<Context> context); + + TNode<HeapObject> EmitCreateShallowObjectLiteral( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + Label* call_runtime); + TNode<JSObject> EmitCreateEmptyObjectLiteral(TNode<Context> context); TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context, SloppyTNode<JSFunction> target, diff --git a/chromium/v8/src/builtins/builtins-conversion-gen.cc b/chromium/v8/src/builtins/builtins-conversion-gen.cc index 8a0c73b2928..1666cbf6acc 100644 --- a/chromium/v8/src/builtins/builtins-conversion-gen.cc +++ b/chromium/v8/src/builtins/builtins-conversion-gen.cc @@ -18,16 +18,17 @@ class ConversionBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void Generate_NonPrimitiveToPrimitive(Node* context, Node* input, + void Generate_NonPrimitiveToPrimitive(TNode<Context> context, + TNode<Object> input, ToPrimitiveHint hint); - void Generate_OrdinaryToPrimitive(Node* context, Node* input, + void Generate_OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint); }; // ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] ) void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( - Node* context, Node* input, ToPrimitiveHint hint) { + TNode<Context> context, TNode<Object> input, ToPrimitiveHint hint) { // Lookup the @@toPrimitive property on the {input}. TNode<Object> exotic_to_prim = GetProperty(context, input, factory()->to_primitive_symbol()); @@ -42,14 +43,14 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined); TNode<String> hint_string = HeapConstant(factory()->ToPrimitiveHintString(hint)); - Node* result = + TNode<Object> result = CallJS(callable, context, exotic_to_prim, input, hint_string); // Verify that the {result} is actually a primitive. Label if_resultisprimitive(this), if_resultisnotprimitive(this, Label::kDeferred); GotoIf(TaggedIsSmi(result), &if_resultisprimitive); - TNode<Uint16T> result_instance_type = LoadInstanceType(result); + TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result)); Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive, &if_resultisnotprimitive); @@ -78,22 +79,22 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( } TF_BUILTIN(NonPrimitiveToPrimitive_Default, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kDefault); } TF_BUILTIN(NonPrimitiveToPrimitive_Number, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kNumber); } TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kString); } @@ -105,22 +106,22 @@ TF_BUILTIN(StringToNumber, CodeStubAssembler) { } TF_BUILTIN(ToName, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); - VARIABLE(var_input, MachineRepresentation::kTagged, input); + TVARIABLE(Object, var_input, input); Label loop(this, &var_input); Goto(&loop); BIND(&loop); { // Load the current {input} value. - Node* input = var_input.value(); + TNode<Object> input = var_input.value(); // Dispatch based on the type of the {input.} Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this), if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred); GotoIf(TaggedIsSmi(input), &if_inputisnumber); - TNode<Uint16T> input_instance_type = LoadInstanceType(input); + TNode<Uint16T> input_instance_type = LoadInstanceType(CAST(input)); STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname); GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver); @@ -151,7 +152,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) { { // Just return the {input}'s string representation. CSA_ASSERT(this, IsOddballInstanceType(input_instance_type)); - Return(LoadObjectField(input, Oddball::kToStringOffset)); + Return(LoadObjectField(CAST(input), Oddball::kToStringOffset)); } BIND(&if_inputisreceiver); @@ -159,23 +160,23 @@ TF_BUILTIN(ToName, CodeStubAssembler) { // Convert the JSReceiver {input} to a primitive first, // and then run the loop again with the new {input}, // which is then a primitive value. - var_input.Bind(CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, - context, input)); + var_input = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, + context, input); Goto(&loop); } } } TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument)); Return(NonNumberToNumber(context, input)); } TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument)); Return(NonNumberToNumeric(context, input)); } @@ -191,16 +192,19 @@ TF_BUILTIN(ToNumeric, CodeStubAssembler) { // ES6 section 7.1.3 ToNumber ( argument ) TF_BUILTIN(ToNumber, CodeStubAssembler) { + // TODO(solanes, v8:6949): Changing this to a TNode<Context> crashes with the + // empty context. Context might not be needed, but it is propagated all over + // the place and hard to pull out. Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Return(ToNumber(context, input)); } // Like ToNumber, but also converts BigInts. TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Return(ToNumber(context, input, BigIntHandling::kConvertToNumber)); } @@ -214,8 +218,8 @@ TF_BUILTIN(NumberToString, CodeStubAssembler) { // 7.1.1.1 OrdinaryToPrimitive ( O, hint ) void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( - Node* context, Node* input, OrdinaryToPrimitiveHint hint) { - VARIABLE(var_result, MachineRepresentation::kTagged); + TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) { + TVARIABLE(Object, var_result); Label return_result(this, &var_result); Handle<String> method_names[2]; @@ -246,12 +250,12 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( // Call the {method} on the {input}. Callable callable = CodeFactory::Call( isolate(), ConvertReceiverMode::kNotNullOrUndefined); - Node* result = CallJS(callable, context, method, input); - var_result.Bind(result); + TNode<Object> result = CallJS(callable, context, method, input); + var_result = result; // Return the {result} if it is a primitive. GotoIf(TaggedIsSmi(result), &return_result); - TNode<Uint16T> result_instance_type = LoadInstanceType(result); + TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result)); GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result); } @@ -267,22 +271,22 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( } TF_BUILTIN(OrdinaryToPrimitive_Number, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Generate_OrdinaryToPrimitive(context, input, OrdinaryToPrimitiveHint::kNumber); } TF_BUILTIN(OrdinaryToPrimitive_String, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Generate_OrdinaryToPrimitive(context, input, OrdinaryToPrimitiveHint::kString); } // ES6 section 7.1.2 ToBoolean ( argument ) TF_BUILTIN(ToBoolean, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kArgument); + TNode<Object> value = CAST(Parameter(Descriptor::kArgument)); Label return_true(this), return_false(this); BranchIfToBooleanIsTrue(value, &return_true, &return_false); @@ -298,7 +302,7 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) { // Requires parameter on stack so that it can be used as a continuation from a // LAZY deopt. TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kArgument); + TNode<Object> value = CAST(Parameter(Descriptor::kArgument)); Label return_true(this), return_false(this); BranchIfToBooleanIsTrue(value, &return_true, &return_false); @@ -311,11 +315,10 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) { } TF_BUILTIN(ToLength, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // We might need to loop once for ToNumber conversion. - VARIABLE(var_len, MachineRepresentation::kTagged, - Parameter(Descriptor::kArgument)); + TVARIABLE(Object, var_len, CAST(Parameter(Descriptor::kArgument))); Label loop(this, &var_len); Goto(&loop); BIND(&loop); @@ -325,7 +328,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { return_zero(this, Label::kDeferred); // Load the current {len} value. - Node* len = var_len.value(); + TNode<Object> len = var_len.value(); // Check if {len} is a positive Smi. GotoIf(TaggedIsPositiveSmi(len), &return_len); @@ -334,14 +337,16 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { GotoIf(TaggedIsSmi(len), &return_zero); // Check if {len} is a HeapNumber. + TNode<HeapObject> len_heap_object = CAST(len); Label if_lenisheapnumber(this), if_lenisnotheapnumber(this, Label::kDeferred); - Branch(IsHeapNumber(len), &if_lenisheapnumber, &if_lenisnotheapnumber); + Branch(IsHeapNumber(len_heap_object), &if_lenisheapnumber, + &if_lenisnotheapnumber); BIND(&if_lenisheapnumber); { // Load the floating-point value of {len}. - TNode<Float64T> len_value = LoadHeapNumberValue(len); + TNode<Float64T> len_value = LoadHeapNumberValue(len_heap_object); // Check if {len} is not greater than zero. GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)), @@ -361,7 +366,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { BIND(&if_lenisnotheapnumber); { // Need to convert {len} to a Number first. - var_len.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, len)); + var_len = CallBuiltin(Builtins::kNonNumberToNumber, context, len); Goto(&loop); } @@ -377,15 +382,15 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { } TF_BUILTIN(ToInteger, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Return(ToInteger(context, input, kNoTruncation)); } TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kArgument)); Return(ToInteger(context, input, kTruncateMinusZero)); } @@ -396,15 +401,14 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { if_noconstructor(this, Label::kDeferred), if_wrapjs_primitive_wrapper(this); - Node* context = Parameter(Descriptor::kContext); - Node* object = Parameter(Descriptor::kArgument); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> object = CAST(Parameter(Descriptor::kArgument)); - VARIABLE(constructor_function_index_var, - MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, constructor_function_index_var); GotoIf(TaggedIsSmi(object), &if_smi); - TNode<Map> map = LoadMap(object); + TNode<Map> map = LoadMap(CAST(object)); TNode<Uint16T> instance_type = LoadMapInstanceType(map); GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver); @@ -413,12 +417,12 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { GotoIf(WordEqual(constructor_function_index, IntPtrConstant(Map::kNoConstructorFunctionIndex)), &if_noconstructor); - constructor_function_index_var.Bind(constructor_function_index); + constructor_function_index_var = constructor_function_index; Goto(&if_wrapjs_primitive_wrapper); BIND(&if_smi); - constructor_function_index_var.Bind( - IntPtrConstant(Context::NUMBER_FUNCTION_INDEX)); + constructor_function_index_var = + IntPtrConstant(Context::NUMBER_FUNCTION_INDEX); Goto(&if_wrapjs_primitive_wrapper); BIND(&if_wrapjs_primitive_wrapper); @@ -449,7 +453,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { // ES6 section 12.5.5 typeof operator TF_BUILTIN(Typeof, CodeStubAssembler) { - Node* object = Parameter(Descriptor::kObject); + TNode<Object> object = CAST(Parameter(Descriptor::kObject)); Return(Typeof(object)); } diff --git a/chromium/v8/src/builtins/builtins-date-gen.cc b/chromium/v8/src/builtins/builtins-date-gen.cc index 97600efaa49..98c1343d2c8 100644 --- a/chromium/v8/src/builtins/builtins-date-gen.cc +++ b/chromium/v8/src/builtins/builtins-date-gen.cc @@ -18,23 +18,23 @@ class DateBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void Generate_DatePrototype_GetField(Node* context, Node* receiver, - int field_index); + void Generate_DatePrototype_GetField(TNode<Context> context, + TNode<Object> receiver, int field_index); }; -void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, - Node* receiver, - int field_index) { +void DateBuiltinsAssembler::Generate_DatePrototype_GetField( + TNode<Context> context, TNode<Object> receiver, int field_index) { Label receiver_not_date(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &receiver_not_date); - TNode<Uint16T> receiver_instance_type = LoadInstanceType(receiver); + TNode<Uint16T> receiver_instance_type = LoadInstanceType(CAST(receiver)); GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE), &receiver_not_date); + TNode<JSDate> date_receiver = CAST(receiver); // Load the specified date field, falling back to the runtime as necessary. if (field_index == JSDate::kDateValue) { - Return(LoadObjectField(receiver, JSDate::kValueOffset)); + Return(LoadObjectField(date_receiver, JSDate::kValueOffset)); } else { if (field_index < JSDate::kFirstUncachedField) { Label stamp_mismatch(this, Label::kDeferred); @@ -42,9 +42,9 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, ExternalConstant(ExternalReference::date_cache_stamp(isolate()))); TNode<Object> cache_stamp = - LoadObjectField(receiver, JSDate::kCacheStampOffset); + LoadObjectField(date_receiver, JSDate::kCacheStampOffset); GotoIf(TaggedNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch); - Return(LoadObjectField(receiver, + Return(LoadObjectField(date_receiver, JSDate::kValueOffset + field_index * kTaggedSize)); BIND(&stamp_mismatch); @@ -53,10 +53,10 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, TNode<Smi> field_index_smi = SmiConstant(field_index); TNode<ExternalReference> function = ExternalConstant(ExternalReference::get_date_field_function()); - Node* result = CallCFunction( + TNode<Object> result = CAST(CallCFunction( function, MachineType::AnyTagged(), - std::make_pair(MachineType::AnyTagged(), receiver), - std::make_pair(MachineType::AnyTagged(), field_index_smi)); + std::make_pair(MachineType::AnyTagged(), date_receiver), + std::make_pair(MachineType::AnyTagged(), field_index_smi))); Return(result); } @@ -66,128 +66,128 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, } TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDay); } TF_BUILTIN(DatePrototypeGetDay, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekday); } TF_BUILTIN(DatePrototypeGetFullYear, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kYear); } TF_BUILTIN(DatePrototypeGetHours, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kHour); } TF_BUILTIN(DatePrototypeGetMilliseconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecond); } TF_BUILTIN(DatePrototypeGetMinutes, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMinute); } TF_BUILTIN(DatePrototypeGetMonth, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMonth); } TF_BUILTIN(DatePrototypeGetSeconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kSecond); } TF_BUILTIN(DatePrototypeGetTime, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue); } TF_BUILTIN(DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kTimezoneOffset); } TF_BUILTIN(DatePrototypeGetUTCDate, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDayUTC); } TF_BUILTIN(DatePrototypeGetUTCDay, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekdayUTC); } TF_BUILTIN(DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kYearUTC); } TF_BUILTIN(DatePrototypeGetUTCHours, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kHourUTC); } TF_BUILTIN(DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecondUTC); } TF_BUILTIN(DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMinuteUTC); } TF_BUILTIN(DatePrototypeGetUTCMonth, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMonthUTC); } TF_BUILTIN(DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kSecondUTC); } TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue); } TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); TNode<Object> hint = CAST(Parameter(Descriptor::kHint)); // Check if the {receiver} is actually a JSReceiver. Label receiver_is_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &receiver_is_invalid); - GotoIfNot(IsJSReceiver(receiver), &receiver_is_invalid); + GotoIfNot(IsJSReceiver(CAST(receiver)), &receiver_is_invalid); // Dispatch to the appropriate OrdinaryToPrimitive builtin. Label hint_is_number(this), hint_is_string(this), diff --git a/chromium/v8/src/builtins/builtins-date.cc b/chromium/v8/src/builtins/builtins-date.cc index c3e76018321..258b1022da3 100644 --- a/chromium/v8/src/builtins/builtins-date.cc +++ b/chromium/v8/src/builtins/builtins-date.cc @@ -854,16 +854,18 @@ BUILTIN(DatePrototypeToLocaleDateString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleDateString"); + const char* method = "Date.prototype.toLocaleDateString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kDate, // required - JSDateTimeFormat::DefaultsOption::kDate)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kDate, // required + JSDateTimeFormat::DefaultsOption::kDate, // defaults + method)); // method } // ecma402 #sup-date.prototype.tolocalestring @@ -872,16 +874,18 @@ BUILTIN(DatePrototypeToLocaleString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleString"); + const char* method = "Date.prototype.toLocaleString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kAny, // required - JSDateTimeFormat::DefaultsOption::kAll)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kAny, // required + JSDateTimeFormat::DefaultsOption::kAll, // defaults + method)); // method } // ecma402 #sup-date.prototype.tolocaletimestring @@ -890,16 +894,18 @@ BUILTIN(DatePrototypeToLocaleTimeString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleTimeString"); + const char* method = "Date.prototype.toLocaleTimeString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kTime, // required - JSDateTimeFormat::DefaultsOption::kTime)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kTime, // required + JSDateTimeFormat::DefaultsOption::kTime, // defaults + method)); // method } #endif // V8_INTL_SUPPORT diff --git a/chromium/v8/src/builtins/builtins-definitions.h b/chromium/v8/src/builtins/builtins-definitions.h index 95f5273f14f..2489538d192 100644 --- a/chromium/v8/src/builtins/builtins-definitions.h +++ b/chromium/v8/src/builtins/builtins-definitions.h @@ -102,7 +102,6 @@ namespace internal { ASM(ResumeGeneratorTrampoline, ResumeGenerator) \ \ /* String helpers */ \ - TFC(StringCharAt, StringAt) \ TFC(StringCodePointAt, StringAt) \ TFC(StringFromCodePointAt, StringAtAsString) \ TFC(StringEqual, Compare) \ @@ -219,9 +218,7 @@ namespace internal { TFH(KeyedLoadIC_Slow, LoadWithVector) \ TFH(KeyedStoreIC_Megamorphic, Store) \ TFH(KeyedStoreIC_Slow, StoreWithVector) \ - TFH(LoadGlobalIC_Slow, LoadWithVector) \ TFH(LoadIC_FunctionPrototype, LoadWithVector) \ - TFH(LoadIC_Slow, LoadWithVector) \ TFH(LoadIC_StringLength, LoadWithVector) \ TFH(LoadIC_StringWrapperLength, LoadWithVector) \ TFH(LoadIC_NoFeedback, Load) \ @@ -230,7 +227,6 @@ namespace internal { TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \ TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \ TFH(LoadIndexedInterceptorIC, LoadWithVector) \ - TFH(StoreInterceptorIC, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB, StoreWithVector) \ @@ -568,6 +564,9 @@ namespace internal { SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ CPP(AsyncFunctionConstructor) \ \ + /* Iterator Protocol */ \ + TFC(GetIteratorWithFeedbackLazyDeoptContinuation, GetIteratorStackParameter) \ + \ /* Global object */ \ CPP(GlobalDecodeURI) \ CPP(GlobalDecodeURIComponent) \ @@ -616,6 +615,10 @@ namespace internal { TFS(IterableToList, kIterable, kIteratorFn) \ TFS(IterableToListWithSymbolLookup, kIterable) \ TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \ + TFS(IterableToFixedArrayForWasm, kIterable, kExpectedLength) \ + \ + /* #sec-createstringlistfromiterable */ \ + TFS(StringListFromIterable, kIterable) \ \ /* Map */ \ TFS(FindOrderedHashMapEntry, kTable, kKey) \ @@ -845,28 +848,13 @@ namespace internal { CPP(RegExpLeftContextGetter) \ /* ES #sec-regexp.prototype.compile */ \ TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \ - /* ES #sec-regexp.prototype.exec */ \ - TFJ(RegExpPrototypeExec, 1, kReceiver, kString) \ - /* https://tc39.github.io/proposal-string-matchall/ */ \ - TFJ(RegExpPrototypeMatchAll, 1, kReceiver, kString) \ - /* ES #sec-regexp.prototype-@@search */ \ - TFJ(RegExpPrototypeSearch, 1, kReceiver, kString) \ CPP(RegExpPrototypeToString) \ CPP(RegExpRightContextGetter) \ \ - /* ES #sec-regexp.prototype-@@split */ \ - TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* RegExp helpers */ \ TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \ TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \ ASM(RegExpInterpreterTrampoline, CCall) \ - TFS(RegExpPrototypeExecSlow, kReceiver, kString) \ - TFS(RegExpSearchFast, kReceiver, kPattern) \ - TFS(RegExpSplit, kRegExp, kString, kLimit) \ - \ - /* RegExp String Iterator */ \ - /* https://tc39.github.io/proposal-string-matchall/ */ \ - TFJ(RegExpStringIteratorPrototypeNext, 0, kReceiver) \ \ /* Set */ \ TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ @@ -1117,7 +1105,6 @@ namespace internal { TFS(SetProperty, kReceiver, kKey, kValue) \ TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \ ASM(MemCopyUint8Uint8, CCall) \ - ASM(MemCopyUint16Uint8, CCall) \ ASM(MemMove, CCall) \ \ /* Trace */ \ @@ -1131,7 +1118,14 @@ namespace internal { CPP(FinalizationGroupRegister) \ CPP(FinalizationGroupUnregister) \ CPP(WeakRefConstructor) \ - CPP(WeakRefDeref) + CPP(WeakRefDeref) \ + \ + /* Async modules */ \ + TFJ(AsyncModuleEvaluate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + \ + /* CallAsyncModule* are spec anonymyous functions */ \ + CPP(CallAsyncModuleFulfilled) \ + CPP(CallAsyncModuleRejected) #ifdef V8_INTL_SUPPORT #define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ diff --git a/chromium/v8/src/builtins/builtins-function-gen.cc b/chromium/v8/src/builtins/builtins-function-gen.cc index ee1f67d4342..f0853e9bd97 100644 --- a/chromium/v8/src/builtins/builtins-function-gen.cc +++ b/chromium/v8/src/builtins/builtins-function-gen.cc @@ -15,14 +15,12 @@ namespace internal { TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { Label slow(this); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode<Int32T> argc = UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); - Node* context = Parameter(Descriptor::kContext); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); // Check that receiver has instance type of JS_FUNCTION_TYPE TNode<Object> receiver = args.GetReceiver(); @@ -85,21 +83,20 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Choose the right bound function map based on whether the target is // constructable. Comment("Choose the right bound function map"); - VARIABLE(bound_function_map, MachineRepresentation::kTagged); + TVARIABLE(Map, bound_function_map); { Label with_constructor(this); - VariableList vars({&bound_function_map}, zone()); TNode<NativeContext> native_context = LoadNativeContext(context); - Label map_done(this, vars); + Label map_done(this, &bound_function_map); GotoIf(IsConstructorMap(receiver_map), &with_constructor); - bound_function_map.Bind(LoadContextElement( + bound_function_map = CAST(LoadContextElement( native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX)); Goto(&map_done); BIND(&with_constructor); - bound_function_map.Bind(LoadContextElement( + bound_function_map = CAST(LoadContextElement( native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX)); Goto(&map_done); @@ -115,30 +112,28 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Allocate the arguments array. Comment("Allocate the arguments array"); - VARIABLE(argument_array, MachineRepresentation::kTagged); + TVARIABLE(FixedArray, argument_array); { Label empty_arguments(this); Label arguments_done(this, &argument_array); GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments); TNode<IntPtrT> elements_length = Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))))); - TNode<FixedArray> elements = CAST(AllocateFixedArray( - PACKED_ELEMENTS, elements_length, kAllowLargeObjectAllocation)); - VARIABLE(index, MachineType::PointerRepresentation()); - index.Bind(IntPtrConstant(0)); + argument_array = CAST(AllocateFixedArray(PACKED_ELEMENTS, elements_length, + kAllowLargeObjectAllocation)); + TVARIABLE(IntPtrT, index, IntPtrConstant(0)); VariableList foreach_vars({&index}, zone()); args.ForEach( foreach_vars, - [this, elements, &index](Node* arg) { - StoreFixedArrayElement(elements, index.value(), arg); + [&](TNode<Object> arg) { + StoreFixedArrayElement(argument_array.value(), index.value(), arg); Increment(&index); }, IntPtrConstant(1)); - argument_array.Bind(elements); Goto(&arguments_done); BIND(&empty_arguments); - argument_array.Bind(EmptyFixedArrayConstant()); + argument_array = EmptyFixedArrayConstant(); Goto(&arguments_done); BIND(&arguments_done); @@ -146,16 +141,16 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Determine bound receiver. Comment("Determine bound receiver"); - VARIABLE(bound_receiver, MachineRepresentation::kTagged); + TVARIABLE(Object, bound_receiver); { Label has_receiver(this); Label receiver_done(this, &bound_receiver); GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver); - bound_receiver.Bind(UndefinedConstant()); + bound_receiver = UndefinedConstant(); Goto(&receiver_done); BIND(&has_receiver); - bound_receiver.Bind(args.AtIndex(0)); + bound_receiver = args.AtIndex(0); Goto(&receiver_done); BIND(&receiver_done); @@ -196,10 +191,10 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // ES6 #sec-function.prototype-@@hasinstance TF_BUILTIN(FunctionPrototypeHasInstance, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* f = Parameter(Descriptor::kReceiver); - Node* v = Parameter(Descriptor::kV); - Node* result = OrdinaryHasInstance(context, f, v); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> f = CAST(Parameter(Descriptor::kReceiver)); + TNode<Object> v = CAST(Parameter(Descriptor::kV)); + TNode<Oddball> result = OrdinaryHasInstance(context, f, v); Return(result); } diff --git a/chromium/v8/src/builtins/builtins-function.cc b/chromium/v8/src/builtins/builtins-function.cc index f75014d0346..f9a356f94bf 100644 --- a/chromium/v8/src/builtins/builtins-function.cc +++ b/chromium/v8/src/builtins/builtins-function.cc @@ -93,17 +93,6 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate, function->shared().set_name_should_print_as_anonymous(true); } - // The spec says that we have to wrap code created via the function - // constructor in e.g. 'function anonymous(' as above, including with extra - // line breaks. Ths is confusing when reporting stack traces from the eval'd - // code as the line number of the error is always reported with 2 extra line - // breaks e.g. line 1 is reported as line 3. We fix this up here by setting - // line_offset which is read by stack trace code. - Handle<Script> script(Script::cast(function->shared().script()), isolate); - if (script->line_offset() == 0) { - script->set_line_offset(-2); - } - // If new.target is equal to target then the function created // is already correctly setup and nothing else should be done // here. But if new.target is not equal to target then we are diff --git a/chromium/v8/src/builtins/builtins-generator-gen.cc b/chromium/v8/src/builtins/builtins-generator-gen.cc index d884c417fc0..0a4b3b205b9 100644 --- a/chromium/v8/src/builtins/builtins-generator-gen.cc +++ b/chromium/v8/src/builtins/builtins-generator-gen.cc @@ -19,19 +19,25 @@ class GeneratorBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: + // Currently, AsyncModules in V8 are built on top of JSAsyncFunctionObjects + // with an initial yield. Thus, we need some way to 'resume' the + // underlying JSAsyncFunctionObject owned by an AsyncModule. To support this + // the body of resume is factored out below, and shared by JSGeneratorObject + // prototype methods as well as AsyncModuleEvaluate. The only difference + // between AsyncModuleEvaluate and JSGeneratorObject::PrototypeNext is + // the expected reciever. + void InnerResume(CodeStubArguments* args, Node* receiver, Node* value, + Node* context, JSGeneratorObject::ResumeMode resume_mode, + char const* const method_name); void GeneratorPrototypeResume(CodeStubArguments* args, Node* receiver, Node* value, Node* context, JSGeneratorObject::ResumeMode resume_mode, char const* const method_name); }; -void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( +void GeneratorBuiltinsAssembler::InnerResume( CodeStubArguments* args, Node* receiver, Node* value, Node* context, JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) { - // Check if the {receiver} is actually a JSGeneratorObject. - ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE, - method_name); - // Check if the {receiver} is running or already closed. TNode<Smi> receiver_continuation = CAST(LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset)); @@ -111,17 +117,46 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( } } +void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( + CodeStubArguments* args, Node* receiver, Node* value, Node* context, + JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) { + // Check if the {receiver} is actually a JSGeneratorObject. + ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE, + method_name); + InnerResume(args, receiver, value, context, resume_mode, method_name); +} + +TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) { + const int kValueArg = 0; + + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); + CodeStubArguments args(this, argc); + + TNode<Object> receiver = args.GetReceiver(); + TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); + TNode<Context> context = Cast(Parameter(Descriptor::kContext)); + + // AsyncModules act like JSAsyncFunctions. Thus we check here + // that the {receiver} is a JSAsyncFunction. + char const* const method_name = "[AsyncModule].evaluate"; + ThrowIfNotInstanceType(context, receiver, JS_ASYNC_FUNCTION_OBJECT_TYPE, + method_name); + InnerResume(&args, receiver, value, context, JSGeneratorObject::kNext, + method_name); +} + // ES6 #sec-generator.prototype.next TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) { const int kValueArg = 0; - TNode<IntPtrT> argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode<Object> receiver = args.GetReceiver(); TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, value, context, JSGeneratorObject::kNext, @@ -132,13 +167,13 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) { TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) { const int kValueArg = 0; - TNode<IntPtrT> argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode<Object> receiver = args.GetReceiver(); TNode<Object> value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, value, context, JSGeneratorObject::kReturn, @@ -149,13 +184,13 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) { TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) { const int kExceptionArg = 0; - TNode<IntPtrT> argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode<Object> receiver = args.GetReceiver(); TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, exception, context, JSGeneratorObject::kThrow, diff --git a/chromium/v8/src/builtins/builtins-handler-gen.cc b/chromium/v8/src/builtins/builtins-handler-gen.cc index eae8690f1fa..ef912eabf1f 100644 --- a/chromium/v8/src/builtins/builtins-handler-gen.cc +++ b/chromium/v8/src/builtins/builtins-handler-gen.cc @@ -48,8 +48,8 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) { } TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kReceiver); - Node* string = LoadJSPrimitiveWrapperValue(value); + TNode<JSPrimitiveWrapper> value = CAST(Parameter(Descriptor::kReceiver)); + TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(value)); Return(LoadStringLengthAsSmi(string)); } @@ -388,15 +388,6 @@ TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) { Generate_StoreFastElementIC(STORE_HANDLE_COW); } -TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) { - Node* name = Parameter(Descriptor::kName); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector); -} - TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -411,14 +402,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) { TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector); } -TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kGetProperty, context, receiver, name); -} - TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -491,17 +474,6 @@ TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW, Generate_KeyedStoreIC_SloppyArguments(); } -TF_BUILTIN(StoreInterceptorIC, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context, value, slot, - vector, receiver, name); -} - TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* key = Parameter(Descriptor::kName); diff --git a/chromium/v8/src/builtins/builtins-internal-gen.cc b/chromium/v8/src/builtins/builtins-internal-gen.cc index 445c8c95173..0625b8affcd 100644 --- a/chromium/v8/src/builtins/builtins-internal-gen.cc +++ b/chromium/v8/src/builtins/builtins-internal-gen.cc @@ -18,9 +18,6 @@ namespace v8 { namespace internal { -template <typename T> -using TNode = compiler::TNode<T>; - // ----------------------------------------------------------------------------- // Stack checks. @@ -32,12 +29,14 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) { // TurboFan support builtins. TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) { - Node* object = Parameter(Descriptor::kObject); + TNode<JSObject> js_object = CAST(Parameter(Descriptor::kObject)); // Load the {object}s elements. - TNode<Object> source = LoadObjectField(object, JSObject::kElementsOffset); - Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays); - StoreObjectField(object, JSObject::kElementsOffset, target); + TNode<FixedArrayBase> source = + CAST(LoadObjectField(js_object, JSObject::kElementsOffset)); + TNode<FixedArrayBase> target = + CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays); + StoreObjectField(js_object, JSObject::kElementsOffset, target); Return(target); } @@ -47,7 +46,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Label runtime(this, Label::kDeferred); - Node* elements = LoadElements(object); + TNode<FixedArrayBase> elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS, key, &runtime); Return(elements); @@ -62,7 +61,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Label runtime(this, Label::kDeferred); - Node* elements = LoadElements(object); + TNode<FixedArrayBase> elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime); Return(elements); @@ -274,25 +273,24 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET)); } - void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type, - MachineType arg0_type, - Node* function, Node* arg0, - Node* mode, Label* next) { + void CallCFunction2WithCallerSavedRegistersMode( + MachineType return_type, MachineType arg0_type, MachineType arg1_type, + Node* function, Node* arg0, Node* arg1, Node* mode, Label* next) { Label dont_save_fp(this), save_fp(this); Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp); BIND(&dont_save_fp); { - CallCFunctionWithCallerSavedRegisters(function, return_type, - kDontSaveFPRegs, - std::make_pair(arg0_type, arg0)); + CallCFunctionWithCallerSavedRegisters( + function, return_type, kDontSaveFPRegs, + std::make_pair(arg0_type, arg0), std::make_pair(arg1_type, arg1)); Goto(next); } BIND(&save_fp); { - CallCFunctionWithCallerSavedRegisters(function, return_type, - kSaveFPRegs, - std::make_pair(arg0_type, arg0)); + CallCFunctionWithCallerSavedRegisters(function, return_type, kSaveFPRegs, + std::make_pair(arg0_type, arg0), + std::make_pair(arg1_type, arg1)); Goto(next); } } @@ -321,34 +319,82 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { } } - void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode, - Label* next) { - TNode<ExternalReference> store_buffer_top_addr = - ExternalConstant(ExternalReference::store_buffer_top(this->isolate())); - Node* store_buffer_top = - Load(MachineType::Pointer(), store_buffer_top_addr); - StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top, - slot); - TNode<WordT> new_store_buffer_top = - IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize)); - StoreNoWriteBarrier(MachineType::PointerRepresentation(), - store_buffer_top_addr, new_store_buffer_top); - - TNode<WordT> test = - WordAnd(new_store_buffer_top, - IntPtrConstant(Heap::store_buffer_mask_constant())); - - Label overflow(this); - Branch(IntPtrEqual(test, IntPtrConstant(0)), &overflow, next); - - BIND(&overflow); - { - TNode<ExternalReference> function = - ExternalConstant(ExternalReference::store_buffer_overflow_function()); - CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(), - MachineType::Pointer(), - function, isolate, mode, next); - } + void InsertIntoRememberedSetAndGotoSlow(Node* isolate, TNode<IntPtrT> object, + TNode<IntPtrT> slot, Node* mode, + Label* next) { + TNode<IntPtrT> page = PageFromAddress(object); + TNode<ExternalReference> function = + ExternalConstant(ExternalReference::insert_remembered_set_function()); + CallCFunction2WithCallerSavedRegistersMode( + MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(), + function, page, slot, mode, next); + } + + void InsertIntoRememberedSetAndGoto(Node* isolate, TNode<IntPtrT> object, + TNode<IntPtrT> slot, Node* mode, + Label* next) { + Label slow_path(this); + TNode<IntPtrT> page = PageFromAddress(object); + + // Load address of SlotSet + TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path); + TNode<IntPtrT> slot_offset = IntPtrSub(slot, page); + + // Load bucket + TNode<IntPtrT> bucket = LoadBucket(slot_set_array, slot_offset, &slow_path); + + // Update cell + SetBitInCell(bucket, slot_offset); + + Goto(next); + + BIND(&slow_path); + InsertIntoRememberedSetAndGotoSlow(isolate, object, slot, mode, next); + } + + TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) { + TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>( + Load(MachineType::Pointer(), page, + IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset))); + GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path); + + return slot_set_array; + } + + TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set_array, + TNode<WordT> slot_offset, Label* slow_path) { + // Assume here that SlotSet only contains of buckets + DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket)); + TNode<WordT> bucket_index = + WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2); + TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>( + Load(MachineType::Pointer(), slot_set_array, + WordShl(bucket_index, kSystemPointerSizeLog2))); + GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path); + return bucket; + } + + void SetBitInCell(TNode<IntPtrT> bucket, TNode<WordT> slot_offset) { + // Load cell value + TNode<WordT> cell_offset = WordAnd( + WordShr(slot_offset, SlotSet::kBitsPerCellLog2 + kTaggedSizeLog2 - + SlotSet::kCellSizeBytesLog2), + IntPtrConstant((SlotSet::kCellsPerBucket - 1) + << SlotSet::kCellSizeBytesLog2)); + TNode<IntPtrT> cell_address = + UncheckedCast<IntPtrT>(IntPtrAdd(bucket, cell_offset)); + TNode<IntPtrT> old_cell_value = + ChangeInt32ToIntPtr(Load<Int32T>(cell_address)); + + // Calculate new cell value + TNode<WordT> bit_index = WordAnd(WordShr(slot_offset, kTaggedSizeLog2), + IntPtrConstant(SlotSet::kBitsPerCell - 1)); + TNode<IntPtrT> new_cell_value = UncheckedCast<IntPtrT>( + WordOr(old_cell_value, WordShl(IntPtrConstant(1), bit_index))); + + // Update cell value + StoreNoWriteBarrier(MachineRepresentation::kWord32, cell_address, + TruncateIntPtrToInt32(new_cell_value)); } }; @@ -399,7 +445,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { TNode<ExternalReference> isolate_constant = ExternalConstant(ExternalReference::isolate_address(isolate())); Node* fp_mode = Parameter(Descriptor::kFPMode); - InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit); + TNode<IntPtrT> object = + BitcastTaggedToWord(Parameter(Descriptor::kObject)); + InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode, + &exit); } BIND(&store_buffer_incremental_wb); @@ -407,8 +456,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { TNode<ExternalReference> isolate_constant = ExternalConstant(ExternalReference::isolate_address(isolate())); Node* fp_mode = Parameter(Descriptor::kFPMode); - InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, - &incremental_wb); + TNode<IntPtrT> object = + BitcastTaggedToWord(Parameter(Descriptor::kObject)); + InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode, + &incremental_wb); } } @@ -532,8 +583,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged, key); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), if_notunique(this), if_notfound(this), slow(this), if_proxy(this); @@ -554,8 +605,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { BIND(&if_unique_name); { Comment("key is unique name"); - TNode<Name> unique = CAST(var_unique.value()); - CheckForAssociatedProtector(unique, &slow); + CheckForAssociatedProtector(var_unique.value(), &slow); Label dictionary(this), dont_delete(this); GotoIf(IsDictionaryMap(receiver_map), &dictionary); @@ -570,8 +620,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver))); - DeleteDictionaryProperty(receiver, properties, unique, context, - &dont_delete, &if_notfound); + DeleteDictionaryProperty(receiver, properties, var_unique.value(), + context, &dont_delete, &if_notfound); } BIND(&dont_delete); @@ -587,7 +637,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { { // If the string was not found in the string table, then no object can // have a property with that name. - TryInternalizeString(key, &if_index, &var_index, &if_unique_name, + TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, &if_notfound, &slow); } @@ -719,11 +769,11 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) { } TF_BUILTIN(ForInEnumerate, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode<HeapObject> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label if_empty(this), if_runtime(this, Label::kDeferred); - Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); + TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); Return(receiver_map); BIND(&if_empty); @@ -934,12 +984,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { } #endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) -#ifndef V8_TARGET_ARCH_ARM -void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) { - masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); -} -#endif // V8_TARGET_ARCH_ARM - #ifndef V8_TARGET_ARCH_IA32 void Builtins::Generate_MemMove(MacroAssembler* masm) { masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); diff --git a/chromium/v8/src/builtins/builtins-intl-gen.cc b/chromium/v8/src/builtins/builtins-intl-gen.cc index 1a9a3b7fd9a..23305537210 100644 --- a/chromium/v8/src/builtins/builtins-intl-gen.cc +++ b/chromium/v8/src/builtins/builtins-intl-gen.cc @@ -17,9 +17,6 @@ namespace v8 { namespace internal { -template <class T> -using TNode = compiler::TNode<T>; - class IntlBuiltinsAssembler : public CodeStubAssembler { public: explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -30,6 +27,16 @@ class IntlBuiltinsAssembler : public CodeStubAssembler { const char* method_name); TNode<JSArray> AllocateEmptyJSArray(TNode<Context> context); + + TNode<IntPtrT> PointerToSeqStringData(TNode<String> seq_string) { + CSA_ASSERT(this, + IsSequentialStringInstanceType(LoadInstanceType(seq_string))); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == + SeqTwoByteString::kHeaderSize); + return IntPtrAdd( + BitcastTaggedToWord(seq_string), + IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + } }; TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { @@ -61,35 +68,35 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { &call_c); { - Node* const dst_ptr = PointerToSeqStringData(dst); - VARIABLE(var_cursor, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + const TNode<IntPtrT> dst_ptr = PointerToSeqStringData(dst); + TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); - TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c); + TNode<IntPtrT> const start_address = + ReinterpretCast<IntPtrT>(to_direct.PointerToData(&call_c)); TNode<IntPtrT> const end_address = Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length))); TNode<ExternalReference> const to_lower_table_addr = ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); - VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0)); + TVARIABLE(Word32T, var_did_change, Int32Constant(0)); VariableList push_vars({&var_cursor, &var_did_change}, zone()); - BuildFastLoop( + BuildFastLoop<IntPtrT>( push_vars, start_address, end_address, - [=, &var_cursor, &var_did_change](Node* current) { + [&](TNode<IntPtrT> current) { TNode<Uint8T> c = Load<Uint8T>(current); TNode<Uint8T> lower = Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c)); StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr, var_cursor.value(), lower); - var_did_change.Bind( - Word32Or(Word32NotEqual(c, lower), var_did_change.value())); + var_did_change = + Word32Or(Word32NotEqual(c, lower), var_did_change.value()); Increment(&var_cursor); }, - kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kCharSize, IndexAdvanceMode::kPost); // Return the original string if it remained unchanged in order to preserve // e.g. internalization and private symbols (such as the preserved object @@ -110,9 +117,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_tagged, src), - std::make_pair(type_tagged, dst)); + const TNode<String> result = CAST(CallCFunction( + function_addr, type_tagged, std::make_pair(type_tagged, src), + std::make_pair(type_tagged, dst))); Return(result); } @@ -142,7 +149,7 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context, TNode<Int32T> argc, Runtime::FunctionId format_func_id, const char* method_name) { - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); // Label has_list(this); // 1. Let lf be this value. @@ -151,32 +158,18 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context, // 3. If lf does not have an [[InitializedListFormat]] internal slot, throw a // TypeError exception. - ThrowIfNotInstanceType(context, receiver, JS_INTL_LIST_FORMAT_TYPE, - method_name); + ThrowIfNotInstanceType(context, receiver, JS_LIST_FORMAT_TYPE, method_name); TNode<JSListFormat> list_format = CAST(receiver); - // 4. If list is not provided or is undefined, then TNode<Object> list = args.GetOptionalArgumentValue(0); - Label has_list(this); - { - GotoIfNot(IsUndefined(list), &has_list); - if (format_func_id == Runtime::kFormatList) { - // a. Return an empty String. - args.PopAndReturn(EmptyStringConstant()); - } else { - DCHECK_EQ(format_func_id, Runtime::kFormatListToParts); - // a. Return an empty Array. - args.PopAndReturn(AllocateEmptyJSArray(context)); - } - } - BIND(&has_list); { - // 5. Let x be ? IterableToList(list). - TNode<Object> x = - CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, list); + // 4. Let stringList be ? StringListFromIterable(list). + TNode<Object> string_list = + CallBuiltin(Builtins::kStringListFromIterable, context, list); - // 6. Return ? FormatList(lf, x). - args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x)); + // 6. Return ? FormatList(lf, stringList). + args.PopAndReturn( + CallRuntime(format_func_id, context, list_format, string_list)); } } diff --git a/chromium/v8/src/builtins/builtins-intl.cc b/chromium/v8/src/builtins/builtins-intl.cc index ff8e96f4f51..81954a481f0 100644 --- a/chromium/v8/src/builtins/builtins-intl.cc +++ b/chromium/v8/src/builtins/builtins-intl.cc @@ -83,13 +83,8 @@ BUILTIN(NumberFormatPrototypeFormatToParts) { Handle<Object> x; if (args.length() >= 2) { - if (FLAG_harmony_intl_bigint) { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, x, Object::ToNumeric(isolate, args.at(1))); - } else { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, - Object::ToNumber(isolate, args.at(1))); - } + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, + Object::ToNumeric(isolate, args.at(1))); } else { x = isolate->factory()->nan_value(); } @@ -282,8 +277,8 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate, // 3. Perform ? Initialize<T>(Format, locales, options). Handle<T> format; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format, - T::New(isolate, map, locales, options)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, format, T::New(isolate, map, locales, options, method)); // 4. Let this be the this value. Handle<Object> receiver = args.receiver(); @@ -367,7 +362,8 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate, * Common code shared by Collator and V8BreakIterator */ template <class T> -Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { +Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate, + const char* method) { Handle<JSReceiver> new_target; if (args.new_target()->IsUndefined(isolate)) { @@ -386,7 +382,8 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); - RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); + RETURN_RESULT_OR_FAILURE(isolate, + T::New(isolate, map, locales, options, method)); } } // namespace @@ -466,13 +463,8 @@ BUILTIN(NumberFormatInternalFormatNumber) { // 4. Let x be ? ToNumeric(value). Handle<Object> numeric_obj; - if (FLAG_harmony_intl_bigint) { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumeric(isolate, value)); - } else { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumber(isolate, value)); - } + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, + Object::ToNumeric(isolate, value)); icu::number::LocalizedNumberFormatter* icu_localized_number_formatter = number_format->icu_number_formatter().raw(); @@ -884,7 +876,7 @@ BUILTIN(CollatorConstructor) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kCollator); - return CallOrConstructConstructor<JSCollator>(args, isolate); + return CallOrConstructConstructor<JSCollator>(args, isolate, "Intl.Collator"); } BUILTIN(CollatorPrototypeResolvedOptions) { @@ -1069,7 +1061,8 @@ BUILTIN(SegmenterPrototypeSegment) { BUILTIN(V8BreakIteratorConstructor) { HandleScope scope(isolate); - return CallOrConstructConstructor<JSV8BreakIterator>(args, isolate); + return CallOrConstructConstructor<JSV8BreakIterator>(args, isolate, + "Intl.v8BreakIterator"); } BUILTIN(V8BreakIteratorPrototypeResolvedOptions) { diff --git a/chromium/v8/src/builtins/builtins-iterator-gen.cc b/chromium/v8/src/builtins/builtins-iterator-gen.cc index 7bd5acfdcda..2f8761902b5 100644 --- a/chromium/v8/src/builtins/builtins-iterator-gen.cc +++ b/chromium/v8/src/builtins/builtins-iterator-gen.cc @@ -241,6 +241,104 @@ TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) { Return(IterableToList(context, iterable, iterator_fn)); } +TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable)); + TNode<Smi> expected_length = CAST(Parameter(Descriptor::kExpectedLength)); + + TNode<Object> iterator_fn = GetIteratorMethod(context, iterable); + + IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn); + + GrowableFixedArray values(state()); + + Variable* vars[] = {values.var_array(), values.var_length(), + values.var_capacity()}; + Label loop_start(this, 3, vars), compare_length(this), done(this); + Goto(&loop_start); + BIND(&loop_start); + { + TNode<JSReceiver> next = + IteratorStep(context, iterator_record, &compare_length); + TNode<Object> next_value = IteratorValue(context, next); + values.Push(next_value); + Goto(&loop_start); + } + + BIND(&compare_length); + GotoIf(WordEqual(SmiUntag(expected_length), values.var_length()->value()), + &done); + Return(CallRuntime( + Runtime::kThrowTypeError, context, + SmiConstant(MessageTemplate::kWasmTrapMultiReturnLengthMismatch))); + + BIND(&done); + Return(values.var_array()->value()); +} + +TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable( + TNode<Context> context, TNode<Object> iterable) { + Label done(this); + GrowableFixedArray list(state()); + // 1. If iterable is undefined, then + // a. Return a new empty List. + GotoIf(IsUndefined(iterable), &done); + + // 2. Let iteratorRecord be ? GetIterator(items). + IteratorRecord iterator_record = GetIterator(context, iterable); + + // 3. Let list be a new empty List. + + Variable* vars[] = {list.var_array(), list.var_length(), list.var_capacity()}; + Label loop_start(this, 3, vars); + Goto(&loop_start); + // 4. Let next be true. + // 5. Repeat, while next is not false + Label if_isnotstringtype(this, Label::kDeferred), + if_exception(this, Label::kDeferred); + BIND(&loop_start); + { + // a. Set next to ? IteratorStep(iteratorRecord). + TNode<JSReceiver> next = IteratorStep(context, iterator_record, &done); + // b. If next is not false, then + // i. Let nextValue be ? IteratorValue(next). + TNode<Object> next_value = IteratorValue(context, next); + // ii. If Type(nextValue) is not String, then + GotoIf(TaggedIsSmi(next_value), &if_isnotstringtype); + TNode<Uint16T> next_value_type = LoadInstanceType(CAST(next_value)); + GotoIfNot(IsStringInstanceType(next_value_type), &if_isnotstringtype); + // iii. Append nextValue to the end of the List list. + list.Push(next_value); + Goto(&loop_start); + // 5.b.ii + BIND(&if_isnotstringtype); + { + // 1. Let error be ThrowCompletion(a newly created TypeError object). + TVARIABLE(Object, var_exception); + TNode<Object> ret = CallRuntime( + Runtime::kThrowTypeError, context, + SmiConstant(MessageTemplate::kIterableYieldedNonString), next_value); + GotoIfException(ret, &if_exception, &var_exception); + Unreachable(); + + // 2. Return ? IteratorClose(iteratorRecord, error). + BIND(&if_exception); + IteratorCloseOnException(context, iterator_record, var_exception.value()); + } + } + + BIND(&done); + // 6. Return list. + return list.ToJSArray(context); +} + +TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable)); + + Return(StringListFromIterable(context, iterable)); +} + // This builtin always returns a new JSArray and is thus safe to use even in the // presence of code that may call back into user-JS. This builtin will take the // fast path if the iterable is a fast array and the Array prototype and the @@ -354,5 +452,19 @@ TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) { } } +TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation, + IteratorBuiltinsAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Smi> callSlot = CAST(Parameter(Descriptor::kCallSlot)); + TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback)); + TNode<Object> iteratorMethod = CAST(Parameter(Descriptor::kResult)); + + TNode<Object> result = + CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver, + iteratorMethod, callSlot, feedback); + Return(result); +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/builtins/builtins-iterator-gen.h b/chromium/v8/src/builtins/builtins-iterator-gen.h index 2a0a510f738..7d6e7d5b811 100644 --- a/chromium/v8/src/builtins/builtins-iterator-gen.h +++ b/chromium/v8/src/builtins/builtins-iterator-gen.h @@ -68,6 +68,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn); + // Currently at https://tc39.github.io/proposal-intl-list-format/ + // #sec-createstringlistfromiterable + TNode<JSArray> StringListFromIterable(TNode<Context> context, + TNode<Object> iterable); + void FastIterableToList(TNode<Context> context, TNode<Object> iterable, TVariable<Object>* var_result, Label* slow); }; diff --git a/chromium/v8/src/builtins/builtins-math-gen.cc b/chromium/v8/src/builtins/builtins-math-gen.cc index 42d0162f388..3bae7c06c35 100644 --- a/chromium/v8/src/builtins/builtins-math-gen.cc +++ b/chromium/v8/src/builtins/builtins-math-gen.cc @@ -143,20 +143,18 @@ void MathBuiltinsAssembler::MathRoundingOperation( } void MathBuiltinsAssembler::MathMaxMin( - Node* context, Node* argc, + TNode<Context> context, TNode<Int32T> argc, TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>, SloppyTNode<Float64T>), double default_val) { - CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); - argc = arguments.GetLength(INTPTR_PARAMETERS); + CodeStubArguments arguments(this, argc); - VARIABLE(result, MachineRepresentation::kFloat64); - result.Bind(Float64Constant(default_val)); + TVARIABLE(Float64T, result, Float64Constant(default_val)); CodeStubAssembler::VariableList vars({&result}, zone()); - arguments.ForEach(vars, [=, &result](Node* arg) { - Node* float_value = TruncateTaggedToFloat64(context, arg); - result.Bind((this->*float64op)(result.value(), float_value)); + arguments.ForEach(vars, [&](TNode<Object> arg) { + TNode<Float64T> float_value = TruncateTaggedToFloat64(context, arg); + result = (this->*float64op)(result.value(), float_value); }); arguments.PopAndReturn(ChangeFloat64ToTagged(result.value())); @@ -181,8 +179,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* x = Parameter(Descriptor::kX); Node* y = Parameter(Descriptor::kY); - Node* x_value = TruncateTaggedToWord32(context, x); - Node* y_value = TruncateTaggedToWord32(context, y); + TNode<Word32T> x_value = TruncateTaggedToWord32(context, x); + TNode<Word32T> y_value = TruncateTaggedToWord32(context, y); TNode<Int32T> value = Signed(Int32Mul(x_value, y_value)); TNode<Number> result = ChangeInt32ToTagged(value); Return(result); @@ -191,8 +189,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) { CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context, Node* base, Node* exponent) { - Node* base_value = TruncateTaggedToFloat64(context, base); - Node* exponent_value = TruncateTaggedToFloat64(context, exponent); + TNode<Float64T> base_value = TruncateTaggedToFloat64(context, base); + TNode<Float64T> exponent_value = TruncateTaggedToFloat64(context, exponent); TNode<Float64T> value = Float64Pow(base_value, exponent_value); return ChangeFloat64ToTagged(value); } @@ -260,19 +258,17 @@ TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) { // ES6 #sec-math.max TF_BUILTIN(MathMax, MathBuiltinsAssembler) { - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. - Node* context = Parameter(Descriptor::kContext); - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); MathMaxMin(context, argc, &CodeStubAssembler::Float64Max, -1.0 * V8_INFINITY); } // ES6 #sec-math.min TF_BUILTIN(MathMin, MathBuiltinsAssembler) { - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. - Node* context = Parameter(Descriptor::kContext); - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); MathMaxMin(context, argc, &CodeStubAssembler::Float64Min, V8_INFINITY); } diff --git a/chromium/v8/src/builtins/builtins-math-gen.h b/chromium/v8/src/builtins/builtins-math-gen.h index 4bb76d96922..4de654fa201 100644 --- a/chromium/v8/src/builtins/builtins-math-gen.h +++ b/chromium/v8/src/builtins/builtins-math-gen.h @@ -21,7 +21,7 @@ class MathBuiltinsAssembler : public CodeStubAssembler { void MathRoundingOperation( Node* context, Node* x, TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>)); - void MathMaxMin(Node* context, Node* argc, + void MathMaxMin(TNode<Context> context, TNode<Int32T> argc, TNode<Float64T> (CodeStubAssembler::*float64op)( SloppyTNode<Float64T>, SloppyTNode<Float64T>), double default_val); diff --git a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc index 427fd6edb65..62aee3b300b 100644 --- a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -14,9 +14,6 @@ namespace v8 { namespace internal { -template <typename T> -using TNode = compiler::TNode<T>; - class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler { public: explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -60,23 +57,20 @@ TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue( TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer( TNode<RawPtrT> microtask_queue) { - return UncheckedCast<RawPtrT>( - Load(MachineType::Pointer(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kRingBufferOffset))); + return Load<RawPtrT>(microtask_queue, + IntPtrConstant(MicrotaskQueue::kRingBufferOffset)); } TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueCapacity( TNode<RawPtrT> microtask_queue) { - return UncheckedCast<IntPtrT>( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kCapacityOffset))); + return Load<IntPtrT>(microtask_queue, + IntPtrConstant(MicrotaskQueue::kCapacityOffset)); } TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueSize( TNode<RawPtrT> microtask_queue) { - return UncheckedCast<IntPtrT>( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kSizeOffset))); + return Load<IntPtrT>(microtask_queue, + IntPtrConstant(MicrotaskQueue::kSizeOffset)); } void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize( @@ -87,9 +81,8 @@ void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize( TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueStart( TNode<RawPtrT> microtask_queue) { - return UncheckedCast<IntPtrT>( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kStartOffset))); + return Load<IntPtrT>(microtask_queue, + IntPtrConstant(MicrotaskQueue::kStartOffset)); } void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueStart( @@ -125,7 +118,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( TNode<Map> microtask_map = LoadMap(microtask); TNode<Uint16T> microtask_type = LoadMapInstanceType(microtask_map); - VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant()); + TVARIABLE(HeapObject, var_exception, TheHoleConstant()); Label if_exception(this, Label::kDeferred); Label is_callable(this), is_callback(this), is_promise_fulfill_reaction_job(this), @@ -295,9 +288,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount( TNode<RawPtrT> microtask_queue) { - TNode<IntPtrT> count = UncheckedCast<IntPtrT>( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset))); + TNode<IntPtrT> count = Load<IntPtrT>( + microtask_queue, + IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset)); TNode<IntPtrT> new_count = IntPtrAdd(count, IntPtrConstant(1)); StoreNoWriteBarrier( MachineType::PointerRepresentation(), microtask_queue, @@ -306,6 +299,8 @@ void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount( TNode<Context> MicrotaskQueueBuiltinsAssembler::GetCurrentContext() { auto ref = ExternalReference::Create(kContextAddress, isolate()); + // TODO(delphick): Add a checked cast. For now this is not possible as context + // can actually be Smi(0). return TNode<Context>::UncheckedCast(LoadFullTagged(ExternalConstant(ref))); } @@ -317,15 +312,13 @@ void MicrotaskQueueBuiltinsAssembler::SetCurrentContext( TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() { auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref)); using ContextStack = DetachableVector<Context>; TNode<IntPtrT> size_offset = IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kSizeOffset); - TNode<IntPtrT> size = - UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset)); - return size; + return Load<IntPtrT>(hsi, size_offset); } void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( @@ -333,7 +326,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( CSA_ASSERT(this, IsNativeContext(native_context)); auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref)); using ContextStack = DetachableVector<Context>; TNode<IntPtrT> capacity_offset = @@ -343,10 +336,8 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kSizeOffset); - TNode<IntPtrT> capacity = - UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, capacity_offset)); - TNode<IntPtrT> size = - UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset)); + TNode<IntPtrT> capacity = Load<IntPtrT>(hsi, capacity_offset); + TNode<IntPtrT> size = Load<IntPtrT>(hsi, size_offset); Label if_append(this), if_grow(this, Label::kDeferred), done(this); Branch(WordEqual(size, capacity), &if_grow, &if_append); @@ -355,7 +346,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( TNode<IntPtrT> data_offset = IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kDataOffset); - Node* data = Load(MachineType::Pointer(), hsi, data_offset); + TNode<RawPtrT> data = Load<RawPtrT>(hsi, data_offset); StoreFullTaggedNoWriteBarrier(data, TimesSystemPointerSize(size), native_context); @@ -367,7 +358,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( TNode<IntPtrT> flag_data_offset = IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + FlagStack::kDataOffset); - Node* flag_data = Load(MachineType::Pointer(), hsi, flag_data_offset); + TNode<RawPtrT> flag_data = Load<RawPtrT>(hsi, flag_data_offset); StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size, BoolConstant(true)); StoreNoWriteBarrier( @@ -396,7 +387,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext( TNode<IntPtrT> saved_entered_context_count) { auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref)); using ContextStack = DetachableVector<Context>; TNode<IntPtrT> size_offset = @@ -404,8 +395,7 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext( ContextStack::kSizeOffset); #ifdef ENABLE_VERIFY_CSA - TNode<IntPtrT> size = - UncheckedCast<IntPtrT>(Load(MachineType::IntPtr(), hsi, size_offset)); + TNode<IntPtrT> size = Load<IntPtrT>(hsi, size_offset); CSA_ASSERT(this, IntPtrLessThan(IntPtrConstant(0), size)); CSA_ASSERT(this, IntPtrLessThanOrEqual(saved_entered_context_count, size)); #endif @@ -446,8 +436,7 @@ void MicrotaskQueueBuiltinsAssembler::RunPromiseHook( } TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) { - TNode<Microtask> microtask = - UncheckedCast<Microtask>(Parameter(Descriptor::kMicrotask)); + TNode<Microtask> microtask = CAST(Parameter(Descriptor::kMicrotask)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<NativeContext> native_context = LoadNativeContext(context); TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context); @@ -517,8 +506,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) { TNode<IntPtrT> offset = CalculateRingBufferOffset(capacity, start, IntPtrConstant(0)); - TNode<RawPtrT> microtask_pointer = - UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), ring_buffer, offset)); + TNode<RawPtrT> microtask_pointer = Load<RawPtrT>(ring_buffer, offset); TNode<Microtask> microtask = CAST(BitcastWordToTagged(microtask_pointer)); TNode<IntPtrT> new_size = IntPtrSub(size, IntPtrConstant(1)); diff --git a/chromium/v8/src/builtins/builtins-number-gen.cc b/chromium/v8/src/builtins/builtins-number-gen.cc index 2aa996eba0d..fc737b793be 100644 --- a/chromium/v8/src/builtins/builtins-number-gen.cc +++ b/chromium/v8/src/builtins/builtins-number-gen.cc @@ -22,57 +22,58 @@ class NumberBuiltinsAssembler : public CodeStubAssembler { protected: template <typename Descriptor> void EmitBitwiseOp(Operation op) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); - - VARIABLE(var_left_word32, MachineRepresentation::kWord32); - VARIABLE(var_right_word32, MachineRepresentation::kWord32); - VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left); - VARIABLE(var_right_bigint, MachineRepresentation::kTagged); + TNode<Object> left = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> right = CAST(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + + TVARIABLE(Word32T, var_left_word32); + TVARIABLE(Word32T, var_right_word32); + TVARIABLE(Object, var_left_maybe_bigint, left); + TVARIABLE(Object, var_right_maybe_bigint); Label if_left_number(this), do_number_op(this); Label if_left_bigint(this), do_bigint_op(this); TaggedToWord32OrBigInt(context, left, &if_left_number, &var_left_word32, - &if_left_bigint, &var_left_bigint); + &if_left_bigint, &var_left_maybe_bigint); BIND(&if_left_number); TaggedToWord32OrBigInt(context, right, &do_number_op, &var_right_word32, - &do_bigint_op, &var_right_bigint); + &do_bigint_op, &var_right_maybe_bigint); BIND(&do_number_op); Return(BitwiseOp(var_left_word32.value(), var_right_word32.value(), op)); // BigInt cases. BIND(&if_left_bigint); - TaggedToNumeric(context, right, &do_bigint_op, &var_right_bigint); + TaggedToNumeric(context, right, &do_bigint_op, &var_right_maybe_bigint); BIND(&do_bigint_op); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, - var_left_bigint.value(), var_right_bigint.value(), - SmiConstant(op))); + var_left_maybe_bigint.value(), + var_right_maybe_bigint.value(), SmiConstant(op))); } template <typename Descriptor> void RelationalComparisonBuiltin(Operation op) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(RelationalComparison(op, lhs, rhs, context)); } template <typename Descriptor> - void UnaryOp(Variable* var_input, Label* do_smi, Label* do_double, - Variable* var_input_double, Label* do_bigint); + void UnaryOp(TVariable<Object>* var_input, Label* do_smi, Label* do_double, + TVariable<Float64T>* var_input_double, Label* do_bigint); template <typename Descriptor> - void BinaryOp(Label* smis, Variable* var_left, Variable* var_right, - Label* doubles, Variable* var_left_double, - Variable* var_right_double, Label* bigints); + void BinaryOp(Label* smis, TVariable<Object>* var_left, + TVariable<Object>* var_right, Label* doubles, + TVariable<Float64T>* var_left_double, + TVariable<Float64T>* var_right_double, Label* bigints); }; // ES6 #sec-number.isfinite TF_BUILTIN(NumberIsFinite, CodeStubAssembler) { - Node* number = Parameter(Descriptor::kNumber); + TNode<Object> number = CAST(Parameter(Descriptor::kNumber)); Label return_true(this), return_false(this); @@ -80,10 +81,11 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) { GotoIf(TaggedIsSmi(number), &return_true); // Check if {number} is a HeapNumber. - GotoIfNot(IsHeapNumber(number), &return_false); + TNode<HeapObject> number_heap_object = CAST(number); + GotoIfNot(IsHeapNumber(number_heap_object), &return_false); // Check if {number} contains a finite, non-NaN value. - TNode<Float64T> number_value = LoadHeapNumberValue(number); + TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object); BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false, &return_true); @@ -107,7 +109,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) { // ES6 #sec-number.isnan TF_BUILTIN(NumberIsNaN, CodeStubAssembler) { - Node* number = Parameter(Descriptor::kNumber); + TNode<Object> number = CAST(Parameter(Descriptor::kNumber)); Label return_true(this), return_false(this); @@ -115,10 +117,11 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) { GotoIf(TaggedIsSmi(number), &return_false); // Check if {number} is a HeapNumber. - GotoIfNot(IsHeapNumber(number), &return_false); + TNode<HeapObject> number_heap_object = CAST(number); + GotoIfNot(IsHeapNumber(number_heap_object), &return_false); // Check if {number} contains a NaN value. - TNode<Float64T> number_value = LoadHeapNumberValue(number); + TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object); BranchIfFloat64IsNaN(number_value, &return_true, &return_false); BIND(&return_true); @@ -136,17 +139,16 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) { // ES6 #sec-number.parsefloat TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // We might need to loop once for ToString conversion. - VARIABLE(var_input, MachineRepresentation::kTagged, - Parameter(Descriptor::kString)); + TVARIABLE(Object, var_input, CAST(Parameter(Descriptor::kString))); Label loop(this, &var_input); Goto(&loop); BIND(&loop); { // Load the current {input} value. - Node* input = var_input.value(); + TNode<Object> input = var_input.value(); // Check if the {input} is a HeapObject or a Smi. Label if_inputissmi(this), if_inputisnotsmi(this); @@ -161,8 +163,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { BIND(&if_inputisnotsmi); { // The {input} is a HeapObject, check if it's already a String. + TNode<HeapObject> input_heap_object = CAST(input); Label if_inputisstring(this), if_inputisnotstring(this); - TNode<Map> input_map = LoadMap(input); + TNode<Map> input_map = LoadMap(input_heap_object); TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map); Branch(IsStringInstanceType(input_instance_type), &if_inputisstring, &if_inputisnotstring); @@ -172,7 +175,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { // The {input} is already a String, check if {input} contains // a cached array index. Label if_inputcached(this), if_inputnotcached(this); - TNode<Uint32T> input_hash = LoadNameHashField(input); + TNode<Uint32T> input_hash = LoadNameHashField(CAST(input)); Branch(IsClearWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask), &if_inputcached, &if_inputnotcached); @@ -204,7 +207,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { { // The {input} is already a Number, take care of -0. Label if_inputiszero(this), if_inputisnotzero(this); - TNode<Float64T> input_value = LoadHeapNumberValue(input); + TNode<Float64T> input_value = LoadHeapNumberValue(input_heap_object); Branch(Float64Equal(input_value, Float64Constant(0.0)), &if_inputiszero, &if_inputisnotzero); @@ -219,7 +222,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { { // Need to convert the {input} to String first. // TODO(bmeurer): This could be more efficient if necessary. - var_input.Bind(CallBuiltin(Builtins::kToString, context, input)); + var_input = CallBuiltin(Builtins::kToString, context, input); Goto(&loop); } } @@ -309,9 +312,9 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) { // ES6 #sec-number.parseint TF_BUILTIN(NumberParseInt, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kString); - Node* radix = Parameter(Descriptor::kRadix); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> input = CAST(Parameter(Descriptor::kString)); + TNode<Object> radix = CAST(Parameter(Descriptor::kRadix)); Return(CallBuiltin(Builtins::kParseInt, context, input, radix)); } @@ -331,27 +334,29 @@ class AddStubAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void ConvertReceiverAndLoop(Variable* var_value, Label* loop, Node* context) { + TNode<Object> ConvertReceiver(TNode<JSReceiver> js_receiver, + TNode<Context> context) { // Call ToPrimitive explicitly without hint (whereas ToNumber // would pass a "number" hint). Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); - var_value->Bind(CallStub(callable, context, var_value->value())); - Goto(loop); + return CallStub(callable, context, js_receiver); } - void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop, - Node* context) { - var_value->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_value->value())); + void ConvertNonReceiverAndLoop(TVariable<Object>* var_value, Label* loop, + TNode<Context> context) { + *var_value = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_value->value()); Goto(loop); } - void ConvertAndLoop(Variable* var_value, Node* instance_type, Label* loop, - Node* context) { + void ConvertAndLoop(TVariable<Object>* var_value, + TNode<Uint16T> instance_type, Label* loop, + TNode<Context> context) { Label is_not_receiver(this, Label::kDeferred); GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver); - ConvertReceiverAndLoop(var_value, loop, context); + *var_value = ConvertReceiver(CAST(var_value->value()), context); + Goto(loop); BIND(&is_not_receiver); ConvertNonReceiverAndLoop(var_value, loop, context); @@ -359,30 +364,26 @@ class AddStubAssembler : public CodeStubAssembler { }; TF_BUILTIN(Add, AddStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_left, MachineRepresentation::kTagged, - Parameter(Descriptor::kLeft)); - VARIABLE(var_right, MachineRepresentation::kTagged, - Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_left, CAST(Parameter(Descriptor::kLeft))); + TVARIABLE(Object, var_right, CAST(Parameter(Descriptor::kRight))); // Shared entry for floating point addition. Label do_double_add(this); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); // We might need to loop several times due to ToPrimitive, ToString and/or // ToNumeric conversions. - VARIABLE(var_result, MachineRepresentation::kTagged); - Variable* loop_vars[2] = {&var_left, &var_right}; - Label loop(this, 2, loop_vars), + Label loop(this, {&var_left, &var_right}), string_add_convert_left(this, Label::kDeferred), string_add_convert_right(this, Label::kDeferred), do_bigint_add(this, Label::kDeferred); Goto(&loop); BIND(&loop); { - Node* left = var_left.value(); - Node* right = var_right.value(); + TNode<Object> left = var_left.value(); + TNode<Object> right = var_right.value(); Label if_left_smi(this), if_left_heapobject(this); Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject); @@ -395,27 +396,30 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_right_smi); { Label if_overflow(this); - TNode<Smi> result = TrySmiAdd(CAST(left), CAST(right), &if_overflow); + TNode<Smi> left_smi = CAST(left); + TNode<Smi> right_smi = CAST(right); + TNode<Smi> result = TrySmiAdd(left_smi, right_smi, &if_overflow); Return(result); BIND(&if_overflow); { - var_left_double.Bind(SmiToFloat64(left)); - var_right_double.Bind(SmiToFloat64(right)); + var_left_double = SmiToFloat64(left_smi); + var_right_double = SmiToFloat64(right_smi); Goto(&do_double_add); } } // if_right_smi BIND(&if_right_heapobject); { - TNode<Map> right_map = LoadMap(right); + TNode<HeapObject> right_heap_object = CAST(right); + TNode<Map> right_map = LoadMap(right_heap_object); Label if_right_not_number(this, Label::kDeferred); GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); // {right} is a HeapNumber. - var_left_double.Bind(SmiToFloat64(left)); - var_right_double.Bind(LoadHeapNumberValue(right)); + var_left_double = SmiToFloat64(CAST(left)); + var_right_double = LoadHeapNumberValue(right_heap_object); Goto(&do_double_add); BIND(&if_right_not_number); @@ -431,7 +435,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_left_heapobject); { - TNode<Map> left_map = LoadMap(left); + TNode<HeapObject> left_heap_object = CAST(left); + TNode<Map> left_map = LoadMap(left_heap_object); Label if_right_smi(this), if_right_heapobject(this); Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject); @@ -441,8 +446,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number); // {left} is a HeapNumber, {right} is a Smi. - var_left_double.Bind(LoadHeapNumberValue(left)); - var_right_double.Bind(SmiToFloat64(right)); + var_left_double = LoadHeapNumberValue(left_heap_object); + var_right_double = SmiToFloat64(CAST(right)); Goto(&do_double_add); BIND(&if_left_not_number); @@ -458,7 +463,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_right_heapobject); { - TNode<Map> right_map = LoadMap(right); + TNode<HeapObject> right_heap_object = CAST(right); + TNode<Map> right_map = LoadMap(right_heap_object); Label if_left_number(this), if_left_not_number(this, Label::kDeferred); Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number); @@ -469,8 +475,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); // Both {left} and {right} are HeapNumbers. - var_left_double.Bind(LoadHeapNumberValue(left)); - var_right_double.Bind(LoadHeapNumberValue(right)); + var_left_double = LoadHeapNumberValue(CAST(left)); + var_right_double = LoadHeapNumberValue(right_heap_object); Goto(&do_double_add); BIND(&if_right_not_number); @@ -499,7 +505,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsJSReceiverInstanceType(left_instance_type), &if_left_not_receiver); // {left} is a JSReceiver, convert it first. - ConvertReceiverAndLoop(&var_left, &loop, context); + var_left = ConvertReceiver(CAST(var_left.value()), context); + Goto(&loop); BIND(&if_left_bigint); { @@ -515,7 +522,8 @@ TF_BUILTIN(Add, AddStubAssembler) { &if_right_not_receiver); // {left} is a Primitive, but {right} is a JSReceiver, so convert // {right} with priority. - ConvertReceiverAndLoop(&var_right, &loop, context); + var_right = ConvertReceiver(CAST(var_right.value()), context); + Goto(&loop); BIND(&if_right_not_receiver); // Neither {left} nor {right} are JSReceivers. @@ -553,54 +561,46 @@ TF_BUILTIN(Add, AddStubAssembler) { } template <typename Descriptor> -void NumberBuiltinsAssembler::UnaryOp(Variable* var_input, Label* do_smi, - Label* do_double, - Variable* var_input_double, +void NumberBuiltinsAssembler::UnaryOp(TVariable<Object>* var_input, + Label* do_smi, Label* do_double, + TVariable<Float64T>* var_input_double, Label* do_bigint) { - DCHECK_EQ(var_input->rep(), MachineRepresentation::kTagged); - DCHECK_IMPLIES(var_input_double != nullptr, - var_input_double->rep() == MachineRepresentation::kFloat64); - - Node* context = Parameter(Descriptor::kContext); - var_input->Bind(Parameter(Descriptor::kValue)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + *var_input = CAST(Parameter(Descriptor::kValue)); // We might need to loop for ToNumeric conversion. Label loop(this, {var_input}); Goto(&loop); BIND(&loop); - Node* input = var_input->value(); + TNode<Object> input = var_input->value(); Label not_number(this); GotoIf(TaggedIsSmi(input), do_smi); - GotoIfNot(IsHeapNumber(input), ¬_number); + TNode<HeapObject> input_heap_object = CAST(input); + GotoIfNot(IsHeapNumber(input_heap_object), ¬_number); if (var_input_double != nullptr) { - var_input_double->Bind(LoadHeapNumberValue(input)); + *var_input_double = LoadHeapNumberValue(input_heap_object); } Goto(do_double); BIND(¬_number); - GotoIf(IsBigInt(input), do_bigint); - var_input->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, input)); + GotoIf(IsBigInt(input_heap_object), do_bigint); + *var_input = CallBuiltin(Builtins::kNonNumberToNumeric, context, input); Goto(&loop); } template <typename Descriptor> -void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, - Variable* var_right, Label* doubles, - Variable* var_left_double, - Variable* var_right_double, +void NumberBuiltinsAssembler::BinaryOp(Label* smis, TVariable<Object>* var_left, + TVariable<Object>* var_right, + Label* doubles, + TVariable<Float64T>* var_left_double, + TVariable<Float64T>* var_right_double, Label* bigints) { - DCHECK_EQ(var_left->rep(), MachineRepresentation::kTagged); - DCHECK_EQ(var_right->rep(), MachineRepresentation::kTagged); - DCHECK_IMPLIES(var_left_double != nullptr, - var_left_double->rep() == MachineRepresentation::kFloat64); - DCHECK_IMPLIES(var_right_double != nullptr, - var_right_double->rep() == MachineRepresentation::kFloat64); DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr); - Node* context = Parameter(Descriptor::kContext); - var_left->Bind(Parameter(Descriptor::kLeft)); - var_right->Bind(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + *var_left = CAST(Parameter(Descriptor::kLeft)); + *var_right = CAST(Parameter(Descriptor::kRight)); // We might need to loop for ToNumeric conversions. Label loop(this, {var_left, var_right}); @@ -613,32 +613,36 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, GotoIf(TaggedIsSmi(var_right->value()), smis); // At this point, var_left is a Smi but var_right is not. - GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number); + TNode<Smi> var_left_smi = CAST(var_left->value()); + TNode<HeapObject> var_right_heap_object = CAST(var_right->value()); + GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); if (var_left_double != nullptr) { - var_left_double->Bind(SmiToFloat64(var_left->value())); - var_right_double->Bind(LoadHeapNumberValue(var_right->value())); + *var_left_double = SmiToFloat64(var_left_smi); + *var_right_double = LoadHeapNumberValue(var_right_heap_object); } Goto(doubles); BIND(&left_not_smi); { - GotoIfNot(IsHeapNumber(var_left->value()), &left_not_number); + TNode<HeapObject> var_left_heap_object = CAST(var_left->value()); + GotoIfNot(IsHeapNumber(var_left_heap_object), &left_not_number); GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi); // At this point, var_left is a HeapNumber and var_right is a Smi. if (var_left_double != nullptr) { - var_left_double->Bind(LoadHeapNumberValue(var_left->value())); - var_right_double->Bind(SmiToFloat64(var_right->value())); + *var_left_double = LoadHeapNumberValue(var_left_heap_object); + *var_right_double = SmiToFloat64(CAST(var_right->value())); } Goto(doubles); } BIND(&right_not_smi); { - GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number); + TNode<HeapObject> var_right_heap_object = CAST(var_right->value()); + GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); if (var_left_double != nullptr) { - var_left_double->Bind(LoadHeapNumberValue(var_left->value())); - var_right_double->Bind(LoadHeapNumberValue(var_right->value())); + *var_left_double = LoadHeapNumberValue(CAST(var_left->value())); + *var_right_double = LoadHeapNumberValue(var_right_heap_object); } Goto(doubles); } @@ -646,37 +650,38 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, BIND(&left_not_number); { Label left_bigint(this); - GotoIf(IsBigInt(var_left->value()), &left_bigint); - var_left->Bind( - CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value())); + GotoIf(IsBigInt(CAST(var_left->value())), &left_bigint); + *var_left = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value()); Goto(&loop); BIND(&left_bigint); { // Jump to {bigints} if {var_right} is already a Numeric. GotoIf(TaggedIsSmi(var_right->value()), bigints); - GotoIf(IsBigInt(var_right->value()), bigints); - GotoIf(IsHeapNumber(var_right->value()), bigints); - var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_right->value())); + TNode<HeapObject> var_right_heap_object = CAST(var_right->value()); + GotoIf(IsBigInt(var_right_heap_object), bigints); + GotoIf(IsHeapNumber(var_right_heap_object), bigints); + *var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, + var_right->value()); Goto(&loop); } } BIND(&right_not_number); { - GotoIf(IsBigInt(var_right->value()), bigints); - var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_right->value())); + GotoIf(IsBigInt(CAST(var_right->value())), bigints); + *var_right = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_right->value()); Goto(&loop); } } TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this); BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub, @@ -685,14 +690,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { BIND(&do_smi_sub); { Label if_overflow(this); - TNode<Smi> result = TrySmiSub(CAST(var_left.value()), - CAST(var_right.value()), &if_overflow); + TNode<Smi> var_left_smi = CAST(var_left.value()); + TNode<Smi> var_right_smi = CAST(var_right.value()); + TNode<Smi> result = TrySmiSub(var_left_smi, var_right_smi, &if_overflow); Return(result); BIND(&if_overflow); { - var_left_double.Bind(SmiToFloat64(var_left.value())); - var_right_double.Bind(SmiToFloat64(var_right.value())); + var_left_double = SmiToFloat64(var_left_smi); + var_right_double = SmiToFloat64(var_right_smi); Goto(&do_double_sub); } } @@ -706,15 +712,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { BIND(&do_bigint_sub); { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kSubtract))); } } TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -733,8 +739,8 @@ TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) { } TF_BUILTIN(Decrement, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -753,8 +759,8 @@ TF_BUILTIN(Decrement, NumberBuiltinsAssembler) { } TF_BUILTIN(Increment, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -772,8 +778,8 @@ TF_BUILTIN(Increment, NumberBuiltinsAssembler) { } TF_BUILTIN(Negate, NumberBuiltinsAssembler) { - VARIABLE(var_input, MachineRepresentation::kTagged); - VARIABLE(var_input_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_input); + TVARIABLE(Float64T, var_input_double); Label do_smi(this), do_double(this), do_bigint(this); UnaryOp<Descriptor>(&var_input, &do_smi, &do_double, &var_input_double, @@ -791,17 +797,17 @@ TF_BUILTIN(Negate, NumberBuiltinsAssembler) { BIND(&do_bigint); { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), SmiConstant(Operation::kNegate))); } } TF_BUILTIN(Multiply, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this); BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul, @@ -818,17 +824,17 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) { BIND(&do_bigint_mul); { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kMultiply))); } } TF_BUILTIN(Divide, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_div(this), do_double_div(this), do_bigint_div(this); BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div, @@ -889,8 +895,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) { // division. BIND(&bailout); { - var_left_double.Bind(SmiToFloat64(dividend)); - var_right_double.Bind(SmiToFloat64(divisor)); + var_left_double = SmiToFloat64(dividend); + var_right_double = SmiToFloat64(divisor); Goto(&do_double_div); } } @@ -904,17 +910,17 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) { BIND(&do_bigint_div); { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kDivide))); } } TF_BUILTIN(Modulus, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this); BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod, @@ -930,17 +936,17 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) { BIND(&do_bigint_mod); { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kModulus))); } } TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); Label do_number_exp(this), do_bigint_exp(this); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); BinaryOp<Descriptor>(&do_number_exp, &var_left, &var_right, &do_number_exp, nullptr, nullptr, &do_bigint_exp); @@ -997,9 +1003,9 @@ TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) { } TF_BUILTIN(Equal, CodeStubAssembler) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(Equal(lhs, rhs, context)); } diff --git a/chromium/v8/src/builtins/builtins-number.cc b/chromium/v8/src/builtins/builtins-number.cc index d2fb0ff74c3..49e7ff27b85 100644 --- a/chromium/v8/src/builtins/builtins-number.cc +++ b/chromium/v8/src/builtins/builtins-number.cc @@ -111,6 +111,7 @@ BUILTIN(NumberPrototypeToFixed) { // ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] ) BUILTIN(NumberPrototypeToLocaleString) { HandleScope scope(isolate); + const char* method = "Number.prototype.toLocaleString"; isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString); @@ -123,17 +124,17 @@ BUILTIN(NumberPrototypeToLocaleString) { // 1. Let x be ? thisNumberValue(this value) if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kNotGeneric, - isolate->factory()->NewStringFromAsciiChecked( - "Number.prototype.toLocaleString"), - isolate->factory()->Number_string())); + isolate, + NewTypeError(MessageTemplate::kNotGeneric, + isolate->factory()->NewStringFromAsciiChecked(method), + isolate->factory()->Number_string())); } #ifdef V8_INTL_SUPPORT RETURN_RESULT_OR_FAILURE( isolate, Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1), - args.atOrUndefined(isolate, 2))); + args.atOrUndefined(isolate, 2), method)); #else // Turn the {value} into a String. return *isolate->factory()->NumberToString(value); diff --git a/chromium/v8/src/builtins/builtins-object-gen.cc b/chromium/v8/src/builtins/builtins-object-gen.cc index db9d4ed6579..a35990e2f57 100644 --- a/chromium/v8/src/builtins/builtins-object-gen.cc +++ b/chromium/v8/src/builtins/builtins-object-gen.cc @@ -22,29 +22,35 @@ namespace internal { // ----------------------------------------------------------------------------- // ES6 section 19.1 Object Objects -using Node = compiler::Node; -template <class T> -using TNode = CodeStubAssembler::TNode<T>; - class ObjectBuiltinsAssembler : public CodeStubAssembler { public: explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} protected: - void ReturnToStringFormat(Node* context, Node* string); + void ReturnToStringFormat(TNode<Context> context, TNode<String> string); void AddToDictionaryIf(TNode<BoolT> condition, TNode<NameDictionary> name_dictionary, Handle<Name> name, TNode<Object> value, Label* bailout); - Node* FromPropertyDescriptor(Node* context, Node* desc); - Node* FromPropertyDetails(Node* context, Node* raw_value, Node* details, - Label* if_bailout); - Node* ConstructAccessorDescriptor(Node* context, Node* getter, Node* setter, - Node* enumerable, Node* configurable); - Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable, - Node* enumerable, Node* configurable); - Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout); + TNode<JSObject> FromPropertyDescriptor(TNode<Context> context, + TNode<FixedArray> desc); + TNode<JSObject> FromPropertyDetails(TNode<Context> context, + TNode<Object> raw_value, + TNode<Word32T> details, + Label* if_bailout); + TNode<JSObject> ConstructAccessorDescriptor(TNode<Context> context, + TNode<Object> getter, + TNode<Object> setter, + TNode<BoolT> enumerable, + TNode<BoolT> configurable); + TNode<JSObject> ConstructDataDescriptor(TNode<Context> context, + TNode<Object> value, + TNode<BoolT> writable, + TNode<BoolT> enumerable, + TNode<BoolT> configurable); + TNode<HeapObject> GetAccessorOrUndefined(TNode<HeapObject> accessor, + Label* if_bailout); }; class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { @@ -79,8 +85,8 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty); }; -void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, - Node* string) { +void ObjectBuiltinsAssembler::ReturnToStringFormat(TNode<Context> context, + TNode<String> string) { TNode<String> lhs = StringConstant("[object "); TNode<String> rhs = StringConstant("]"); @@ -90,11 +96,9 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, rhs)); } -Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context, - Node* getter, - Node* setter, - Node* enumerable, - Node* configurable) { +TNode<JSObject> ObjectBuiltinsAssembler::ConstructAccessorDescriptor( + TNode<Context> context, TNode<Object> getter, TNode<Object> setter, + TNode<BoolT> enumerable, TNode<BoolT> configurable) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> map = CAST(LoadContextElement( native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX)); @@ -114,11 +118,9 @@ Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context, return js_desc; } -Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context, - Node* value, - Node* writable, - Node* enumerable, - Node* configurable) { +TNode<JSObject> ObjectBuiltinsAssembler::ConstructDataDescriptor( + TNode<Context> context, TNode<Object> value, TNode<BoolT> writable, + TNode<BoolT> enumerable, TNode<BoolT> configurable) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> map = CAST(LoadContextElement( native_context, Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX)); @@ -260,10 +262,10 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0)); TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0)); - Variable* vars[] = {&var_descriptor_number, &var_result_index}; + VariableList vars({&var_descriptor_number, &var_result_index}, zone()); // Let desc be ? O.[[GetOwnProperty]](key). TNode<DescriptorArray> descriptors = LoadMapDescriptors(map); - Label loop(this, 2, vars), after_loop(this), next_descriptor(this); + Label loop(this, vars), after_loop(this), next_descriptor(this); Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length), &after_loop, &loop); @@ -309,11 +311,10 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( if (collect_type == CollectType::kEntries) { // Let entry be CreateArrayFromList(« key, value »). - Node* array = nullptr; - Node* elements = nullptr; + TNode<JSArray> array; + TNode<FixedArrayBase> elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr, - IntPtrConstant(2)); + PACKED_ELEMENTS, array_map, SmiConstant(2), {}, IntPtrConstant(2)); StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER); StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER); value = TNode<JSArray>::UncheckedCast(array); @@ -321,12 +322,12 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( StoreFixedArrayElement(values_or_entries, var_result_index.value(), value); - Increment(&var_result_index, 1); + Increment(&var_result_index); Goto(&next_descriptor); BIND(&next_descriptor); { - Increment(&var_descriptor_number, 1); + Increment(&var_descriptor_number); Branch(IntPtrEqual(var_result_index.value(), object_enum_length), &after_loop, &loop); } @@ -366,9 +367,9 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) { } TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> object = CAST(Parameter(Descriptor::kReceiver)); + TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label call_runtime(this), return_true(this), return_false(this), to_primitive(this); @@ -379,12 +380,12 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi); BIND(&if_objectisnotsmi); - TNode<Map> map = LoadMap(object); + TNode<Map> map = LoadMap(CAST(object)); TNode<Uint16T> instance_type = LoadMapInstanceType(map); { - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), if_notunique_name(this); TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, @@ -407,7 +408,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { BIND(&if_notunique_name); { Label not_in_string_table(this); - TryInternalizeString(key, &if_index, &var_index, &if_unique_name, + TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, ¬_in_string_table, &call_runtime); BIND(¬_in_string_table); @@ -422,7 +423,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { } BIND(&to_primitive); GotoIf(IsNumber(key), &return_false); - Branch(IsName(key), &return_false, &call_runtime); + Branch(IsName(CAST(key)), &return_false, &call_runtime); BIND(&return_true); Return(TrueConstant()); @@ -454,7 +455,7 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { // second argument. // 4. For each element nextSource of sources, in ascending index order, args.ForEach( - [=](Node* next_source) { + [=](TNode<Object> next_source) { CallBuiltin(Builtins::kSetDataProperties, context, to, next_source); }, IntPtrConstant(1)); @@ -467,17 +468,18 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { // ES #sec-object.keys TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> object = CAST(Parameter(Descriptor::kObject)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_length, MachineRepresentation::kTagged); - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_length); + TVARIABLE(FixedArrayBase, var_elements); Label if_empty(this, Label::kDeferred), if_empty_elements(this), if_fast(this), if_slow(this, Label::kDeferred), if_join(this); // Check if the {object} has a usable enum cache. GotoIf(TaggedIsSmi(object), &if_slow); - TNode<Map> object_map = LoadMap(object); + + TNode<Map> object_map = LoadMap(CAST(object)); TNode<Uint32T> object_bit_field3 = LoadMapBitField3(object_map); TNode<UintPtrT> object_enum_length = DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3); @@ -487,7 +489,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { // Ensure that the {object} doesn't have any elements. CSA_ASSERT(this, IsJSObjectMap(object_map)); - TNode<FixedArrayBase> object_elements = LoadElements(object); + TNode<FixedArrayBase> object_elements = LoadElements(CAST(object)); GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, &if_slow); @@ -500,20 +502,20 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { { // The {object} has a usable enum cache, use that. TNode<DescriptorArray> object_descriptors = LoadMapDescriptors(object_map); - TNode<EnumCache> object_enum_cache = CAST( - LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset)); + TNode<EnumCache> object_enum_cache = LoadObjectField<EnumCache>( + object_descriptors, DescriptorArray::kEnumCacheOffset); TNode<Object> object_enum_keys = LoadObjectField(object_enum_cache, EnumCache::kKeysOffset); // Allocate a JSArray and copy the elements from the {object_enum_keys}. - Node* array = nullptr; - Node* elements = nullptr; + TNode<JSArray> array; + TNode<FixedArrayBase> elements; TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); TNode<Smi> array_length = SmiTag(Signed(object_enum_length)); std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length, + PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length, INTPTR_PARAMETERS); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, object_enum_length, SKIP_WRITE_BARRIER); @@ -523,8 +525,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { BIND(&if_empty); { // The {object} doesn't have any enumerable keys. - var_length.Bind(SmiConstant(0)); - var_elements.Bind(EmptyFixedArrayConstant()); + var_length = SmiConstant(0); + var_elements = EmptyFixedArrayConstant(); Goto(&if_join); } @@ -533,8 +535,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { // Let the runtime compute the elements. TNode<FixedArray> elements = CAST(CallRuntime(Runtime::kObjectKeys, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } @@ -544,19 +546,19 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode<JSArray> array = AllocateJSArray( - array_map, CAST(var_elements.value()), CAST(var_length.value())); + TNode<JSArray> array = + AllocateJSArray(array_map, var_elements.value(), var_length.value()); Return(array); } } // ES #sec-object.getOwnPropertyNames TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> object = CAST(Parameter(Descriptor::kObject)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_length, MachineRepresentation::kTagged); - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_length); + TVARIABLE(FixedArrayBase, var_elements); Label if_empty(this, Label::kDeferred), if_empty_elements(this), if_fast(this), try_fast(this, Label::kDeferred), if_slow(this, Label::kDeferred), if_join(this); @@ -564,10 +566,11 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or // has any elements. GotoIf(TaggedIsSmi(object), &if_slow); - TNode<Map> object_map = LoadMap(object); + + TNode<Map> object_map = LoadMap(CAST(object)); TNode<Uint16T> instance_type = LoadMapInstanceType(object_map); GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow); - TNode<FixedArrayBase> object_elements = LoadElements(object); + TNode<FixedArrayBase> object_elements = LoadElements(CAST(object)); GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, &if_slow); @@ -600,14 +603,14 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { LoadObjectField(object_enum_cache, EnumCache::kKeysOffset); // Allocate a JSArray and copy the elements from the {object_enum_keys}. - Node* array = nullptr; - Node* elements = nullptr; TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); TNode<Smi> array_length = SmiTag(Signed(object_enum_length)); + TNode<JSArray> array; + TNode<FixedArrayBase> elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length, + PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length, INTPTR_PARAMETERS); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, object_enum_length, SKIP_WRITE_BARRIER); @@ -619,16 +622,16 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Let the runtime compute the elements and try initializing enum cache. TNode<FixedArray> elements = CAST(CallRuntime( Runtime::kObjectGetOwnPropertyNamesTryFast, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } BIND(&if_empty); { // The {object} doesn't have any enumerable keys. - var_length.Bind(SmiConstant(0)); - var_elements.Bind(EmptyFixedArrayConstant()); + var_length = SmiConstant(0); + var_elements = EmptyFixedArrayConstant(); Goto(&if_join); } @@ -637,8 +640,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Let the runtime compute the elements. TNode<FixedArray> elements = CAST(CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField<Smi>(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } @@ -648,8 +651,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode<JSArray> array = AllocateJSArray( - array_map, CAST(var_elements.value()), CAST(var_length.value())); + TNode<JSArray> array = + AllocateJSArray(array_map, var_elements.value(), var_length.value()); Return(array); } } @@ -672,9 +675,9 @@ TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) { // ES #sec-object.prototype.isprototypeof TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label if_receiverisnullorundefined(this, Label::kDeferred), if_valueisnotreceiver(this, Label::kDeferred); @@ -685,31 +688,35 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { // immediately aborts and returns false anyways. GotoIf(TaggedIsSmi(value), &if_valueisnotreceiver); - // Check if {receiver} is either null or undefined and in that case, - // invoke the ToObject builtin, which raises the appropriate error. - // Otherwise we don't need to invoke ToObject, since {receiver} is - // either already a JSReceiver, in which case ToObject is a no-op, - // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper - // wrapper, which wouldn't be identical to any existing JSReceiver - // found in the prototype chain of {value}, hence it will return - // false no matter if we search for the Primitive {receiver} or - // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. - GotoIf(IsNull(receiver), &if_receiverisnullorundefined); - GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); - - // Loop through the prototype chain looking for the {receiver}. - Return(HasInPrototypeChain(context, value, receiver)); - - BIND(&if_receiverisnullorundefined); { - // If {value} is a primitive HeapObject, we need to return - // false instead of throwing an exception per order of the - // steps in the specification, so check that first here. - GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver); - - // Simulate the ToObject invocation on {receiver}. - ToObject(context, receiver); - Unreachable(); + TNode<HeapObject> value_heap_object = CAST(value); + + // Check if {receiver} is either null or undefined and in that case, + // invoke the ToObject builtin, which raises the appropriate error. + // Otherwise we don't need to invoke ToObject, since {receiver} is + // either already a JSReceiver, in which case ToObject is a no-op, + // or it's a Primitive and ToObject would allocate a fresh + // JSPrimitiveWrapper wrapper, which wouldn't be identical to any existing + // JSReceiver found in the prototype chain of {value}, hence it will return + // false no matter if we search for the Primitive {receiver} or + // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. + GotoIf(IsNull(receiver), &if_receiverisnullorundefined); + GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); + + // Loop through the prototype chain looking for the {receiver}. + Return(HasInPrototypeChain(context, value_heap_object, receiver)); + + BIND(&if_receiverisnullorundefined); + { + // If {value} is a primitive HeapObject, we need to return + // false instead of throwing an exception per order of the + // steps in the specification, so check that first here. + GotoIfNot(IsJSReceiver(value_heap_object), &if_valueisnotreceiver); + + // Simulate the ToObject invocation on {receiver}. + ToObject(context, receiver); + Unreachable(); + } } BIND(&if_valueisnotreceiver); @@ -731,14 +738,18 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred), if_value(this), if_bigint(this, Label::kDeferred); - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + + TVARIABLE(String, var_default); + TVARIABLE(HeapObject, var_holder); // This is arranged to check the likely cases first. - VARIABLE(var_default, MachineRepresentation::kTagged); - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); GotoIf(TaggedIsSmi(receiver), &if_number); - TNode<Map> receiver_map = LoadMap(receiver); + + TNode<HeapObject> reciever_heap_object = CAST(receiver); + TNode<Map> receiver_map = LoadMap(reciever_heap_object); + var_holder = reciever_heap_object; TNode<Uint16T> receiver_instance_type = LoadMapInstanceType(receiver_map); GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive); const struct { @@ -747,8 +758,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { } kJumpTable[] = {{JS_OBJECT_TYPE, &if_object}, {JS_ARRAY_TYPE, &if_array}, {JS_FUNCTION_TYPE, &if_function}, - {JS_REGEXP_TYPE, &if_regexp}, - {JS_ARGUMENTS_TYPE, &if_arguments}, + {JS_REG_EXP_TYPE, &if_regexp}, + {JS_ARGUMENTS_OBJECT_TYPE, &if_arguments}, {JS_DATE_TYPE, &if_date}, {JS_BOUND_FUNCTION_TYPE, &if_function}, {JS_API_OBJECT_TYPE, &if_apiobject}, @@ -769,30 +780,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { BIND(&if_apiobject); { // Lookup the @@toStringTag property on the {receiver}. - VARIABLE(var_tag, MachineRepresentation::kTagged, - GetProperty(context, receiver, - isolate()->factory()->to_string_tag_symbol())); + TVARIABLE(Object, var_tag, + GetProperty(context, receiver, + isolate()->factory()->to_string_tag_symbol())); Label if_tagisnotstring(this), if_tagisstring(this); GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring); - Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring); + Branch(IsString(CAST(var_tag.value())), &if_tagisstring, + &if_tagisnotstring); BIND(&if_tagisnotstring); { - var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver)); + var_tag = CallRuntime(Runtime::kClassOf, context, receiver); Goto(&if_tagisstring); } BIND(&if_tagisstring); - ReturnToStringFormat(context, var_tag.value()); + ReturnToStringFormat(context, CAST(var_tag.value())); } BIND(&if_arguments); { - var_default.Bind(ArgumentsToStringConstant()); + var_default = ArgumentsToStringConstant(); Goto(&checkstringtag); } BIND(&if_array); { - var_default.Bind(ArrayToStringConstant()); + var_default = ArrayToStringConstant(); Goto(&checkstringtag); } @@ -801,30 +813,30 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> boolean_constructor = CAST( LoadContextElement(native_context, Context::BOOLEAN_FUNCTION_INDEX)); - TNode<Map> boolean_initial_map = CAST(LoadObjectField( - boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<Object> boolean_prototype = - LoadObjectField(boolean_initial_map, Map::kPrototypeOffset); - var_default.Bind(BooleanToStringConstant()); - var_holder.Bind(boolean_prototype); + TNode<Map> boolean_initial_map = LoadObjectField<Map>( + boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode<HeapObject> boolean_prototype = + LoadObjectField<HeapObject>(boolean_initial_map, Map::kPrototypeOffset); + var_default = BooleanToStringConstant(); + var_holder = boolean_prototype; Goto(&checkstringtag); } BIND(&if_date); { - var_default.Bind(DateToStringConstant()); + var_default = DateToStringConstant(); Goto(&checkstringtag); } BIND(&if_error); { - var_default.Bind(ErrorToStringConstant()); + var_default = ErrorToStringConstant(); Goto(&checkstringtag); } BIND(&if_function); { - var_default.Bind(FunctionToStringConstant()); + var_default = FunctionToStringConstant(); Goto(&checkstringtag); } @@ -833,19 +845,19 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> number_constructor = CAST( LoadContextElement(native_context, Context::NUMBER_FUNCTION_INDEX)); - TNode<Map> number_initial_map = CAST(LoadObjectField( - number_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<Object> number_prototype = - LoadObjectField(number_initial_map, Map::kPrototypeOffset); - var_default.Bind(NumberToStringConstant()); - var_holder.Bind(number_prototype); + TNode<Map> number_initial_map = LoadObjectField<Map>( + number_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode<HeapObject> number_prototype = + LoadObjectField<HeapObject>(number_initial_map, Map::kPrototypeOffset); + var_default = NumberToStringConstant(); + var_holder = number_prototype; Goto(&checkstringtag); } BIND(&if_object); { - CSA_ASSERT(this, IsJSReceiver(receiver)); - var_default.Bind(ObjectToStringConstant()); + CSA_ASSERT(this, IsJSReceiver(CAST(receiver))); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } @@ -885,24 +897,25 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { }); // Lookup the @@toStringTag property on the {receiver}. - VARIABLE(var_tag, MachineRepresentation::kTagged, - GetProperty(context, receiver, - isolate()->factory()->to_string_tag_symbol())); + TVARIABLE(Object, var_tag, + GetProperty(context, receiver, + isolate()->factory()->to_string_tag_symbol())); Label if_tagisnotstring(this), if_tagisstring(this); GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring); - Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring); + Branch(IsString(CAST(var_tag.value())), &if_tagisstring, + &if_tagisnotstring); BIND(&if_tagisnotstring); { - var_tag.Bind(builtin_tag); + var_tag = builtin_tag; Goto(&if_tagisstring); } BIND(&if_tagisstring); - ReturnToStringFormat(context, var_tag.value()); + ReturnToStringFormat(context, CAST(var_tag.value())); } BIND(&if_regexp); { - var_default.Bind(RegexpToStringConstant()); + var_default = RegexpToStringConstant(); Goto(&checkstringtag); } @@ -911,12 +924,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> string_constructor = CAST( LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX)); - TNode<Map> string_initial_map = CAST(LoadObjectField( - string_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<Object> string_prototype = - LoadObjectField(string_initial_map, Map::kPrototypeOffset); - var_default.Bind(StringToStringConstant()); - var_holder.Bind(string_prototype); + TNode<Map> string_initial_map = LoadObjectField<Map>( + string_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode<HeapObject> string_prototype = + LoadObjectField<HeapObject>(string_initial_map, Map::kPrototypeOffset); + var_default = StringToStringConstant(); + var_holder = string_prototype; Goto(&checkstringtag); } @@ -925,12 +938,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> symbol_constructor = CAST( LoadContextElement(native_context, Context::SYMBOL_FUNCTION_INDEX)); - TNode<Map> symbol_initial_map = CAST(LoadObjectField( - symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<Object> symbol_prototype = - LoadObjectField(symbol_initial_map, Map::kPrototypeOffset); - var_default.Bind(ObjectToStringConstant()); - var_holder.Bind(symbol_prototype); + TNode<Map> symbol_initial_map = LoadObjectField<Map>( + symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode<HeapObject> symbol_prototype = + LoadObjectField<HeapObject>(symbol_initial_map, Map::kPrototypeOffset); + var_default = ObjectToStringConstant(); + var_holder = symbol_prototype; Goto(&checkstringtag); } @@ -939,12 +952,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> bigint_constructor = CAST( LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX)); - TNode<Map> bigint_initial_map = CAST(LoadObjectField( - bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<Object> bigint_prototype = - LoadObjectField(bigint_initial_map, Map::kPrototypeOffset); - var_default.Bind(ObjectToStringConstant()); - var_holder.Bind(bigint_prototype); + TNode<Map> bigint_initial_map = LoadObjectField<Map>( + bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode<HeapObject> bigint_prototype = + LoadObjectField<HeapObject>(bigint_initial_map, Map::kPrototypeOffset); + var_default = ObjectToStringConstant(); + var_holder = bigint_prototype; Goto(&checkstringtag); } @@ -956,12 +969,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_value_is_bigint(this, Label::kDeferred), if_value_is_string(this, Label::kDeferred); - Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); + TNode<Object> receiver_value = + LoadJSPrimitiveWrapperValue(CAST(reciever_heap_object)); // We need to start with the object to see if the value was a subclass // which might have interesting properties. - var_holder.Bind(receiver); + var_holder = reciever_heap_object; GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number); - TNode<Map> receiver_value_map = LoadMap(receiver_value); + TNode<Map> receiver_value_map = LoadMap(CAST(receiver_value)); GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number); GotoIf(IsBooleanMap(receiver_value_map), &if_value_is_boolean); GotoIf(IsSymbolMap(receiver_value_map), &if_value_is_symbol); @@ -974,31 +988,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { BIND(&if_value_is_number); { - var_default.Bind(NumberToStringConstant()); + var_default = NumberToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_boolean); { - var_default.Bind(BooleanToStringConstant()); + var_default = BooleanToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_string); { - var_default.Bind(StringToStringConstant()); + var_default = StringToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_bigint); { - var_default.Bind(ObjectToStringConstant()); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_symbol); { - var_default.Bind(ObjectToStringConstant()); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } } @@ -1013,13 +1027,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { Goto(&loop); BIND(&loop); { - Node* holder = var_holder.value(); + TNode<HeapObject> holder = var_holder.value(); GotoIf(IsNull(holder), &return_default); TNode<Map> holder_map = LoadMap(holder); TNode<Uint32T> holder_bit_field3 = LoadMapBitField3(holder_map); GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3), &return_generic); - var_holder.Bind(LoadMapPrototype(holder_map)); + var_holder = LoadMapPrototype(holder_map); Goto(&loop); } @@ -1029,7 +1043,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { ToStringTagSymbolConstant()); GotoIf(TaggedIsSmi(tag), &return_default); GotoIfNot(IsString(CAST(tag)), &return_default); - ReturnToStringFormat(context, tag); + ReturnToStringFormat(context, CAST(tag)); } BIND(&return_default); @@ -1058,28 +1072,28 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) { BranchIfJSReceiver(prototype, &prototype_jsreceiver, &call_runtime); } - VARIABLE(map, MachineRepresentation::kTagged); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(Map, map); + TVARIABLE(HeapObject, properties); Label instantiate_map(this); BIND(&prototype_null); { Comment("Prototype is null"); - map.Bind(LoadContextElement(native_context, - Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + map = CAST(LoadContextElement( + native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } BIND(&prototype_jsreceiver); { Comment("Prototype is JSReceiver"); - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); TNode<HeapObject> object_function = CAST( LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); - TNode<Object> object_function_map = LoadObjectField( + TNode<Map> object_function_map = LoadObjectField<Map>( object_function, JSFunction::kPrototypeOrInitialMapOffset); - map.Bind(object_function_map); + map = object_function_map; GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())), &instantiate_map); Comment("Try loading the prototype info"); @@ -1087,8 +1101,8 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) { LoadMapPrototypeInfo(LoadMap(CAST(prototype)), &call_runtime); TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField( prototype_info, PrototypeInfo::kObjectCreateMapOffset); - GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime); - map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); + GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime); + map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); Goto(&instantiate_map); } @@ -1153,28 +1167,28 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { // Create a new object with the given prototype. BIND(&no_properties); { - VARIABLE(map, MachineRepresentation::kTagged); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(Map, map); + TVARIABLE(HeapObject, properties); Label non_null_proto(this), instantiate_map(this), good(this); Branch(IsNull(prototype), &good, &non_null_proto); BIND(&good); { - map.Bind(LoadContextElement( + map = CAST(LoadContextElement( context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } BIND(&non_null_proto); { - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); TNode<HeapObject> object_function = CAST(LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX)); - TNode<Object> object_function_map = LoadObjectField( + TNode<Map> object_function_map = LoadObjectField<Map>( object_function, JSFunction::kPrototypeOrInitialMapOffset); - map.Bind(object_function_map); + map = object_function_map; GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())), &instantiate_map); // Try loading the prototype info. @@ -1183,9 +1197,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { Comment("Load ObjectCreateMap from PrototypeInfo"); TNode<MaybeObject> maybe_map = LoadMaybeWeakObjectField( prototype_info, PrototypeInfo::kObjectCreateMapOffset); - GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), - &call_runtime); - map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); + GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime); + map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); Goto(&instantiate_map); } @@ -1207,8 +1220,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { // ES #sec-object.is TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) { - Node* const left = Parameter(Descriptor::kLeft); - Node* const right = Parameter(Descriptor::kRight); + TNode<Object> const left = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> const right = CAST(Parameter(Descriptor::kRight)); Label return_true(this), return_false(this); BranchIfSameValue(left, right, &return_true, &return_false); @@ -1221,9 +1234,9 @@ TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) { } TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) { - Node* const value = Parameter(Descriptor::kValue); - Node* const done = Parameter(Descriptor::kDone); - Node* const context = Parameter(Descriptor::kContext); + TNode<Object> const value = CAST(Parameter(Descriptor::kValue)); + TNode<Oddball> const done = CAST(Parameter(Descriptor::kDone)); + TNode<Context> const context = CAST(Parameter(Descriptor::kContext)); TNode<NativeContext> const native_context = LoadNativeContext(context); TNode<Map> const map = CAST( @@ -1238,53 +1251,53 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) { } TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) { - Node* key = Parameter(Descriptor::kKey); - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> key = CAST(Parameter(Descriptor::kKey)); + TNode<Object> object = CAST(Parameter(Descriptor::kObject)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(HasProperty(context, object, key, kHasProperty)); } TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kLeft); - Node* callable = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> object = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> callable = CAST(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(InstanceOf(object, callable, context)); } // ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) { - Node* constructor = Parameter(Descriptor::kLeft); - Node* object = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> constructor = CAST(Parameter(Descriptor::kLeft)); + TNode<Object> object = CAST(Parameter(Descriptor::kRight)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(OrdinaryHasInstance(context, constructor, object)); } TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode<JSFunction> object = CAST(Parameter(Descriptor::kObject)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(GetSuperConstructor(context, object)); } TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { - Node* closure = Parameter(Descriptor::kClosure); - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // Get the initial map from the function, jumping to the runtime if we don't // have one. Label done(this), runtime(this); GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime); - TNode<HeapObject> maybe_map = - CAST(LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset)); + TNode<HeapObject> maybe_map = LoadObjectField<HeapObject>( + closure, JSFunction::kPrototypeOrInitialMapOffset); GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime); TNode<Map> map = CAST(maybe_map); - TNode<SharedFunctionInfo> shared = - CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset)); + TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>( + closure, JSFunction::kSharedFunctionInfoOffset); TNode<BytecodeArray> bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared); @@ -1293,7 +1306,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { MachineType::Uint16())); TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField( bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32())); - TNode<WordT> size = + TNode<IntPtrT> size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)), formal_parameter_count); TNode<FixedArrayBase> parameters_and_registers = @@ -1337,16 +1350,17 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { // ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P ) TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* context = Parameter(Descriptor::kContext); + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode<Object> object_input = args.GetOptionalArgumentValue(0); TNode<Object> key = args.GetOptionalArgumentValue(1); // 1. Let obj be ? ToObject(O). - TNode<JSReceiver> object = ToObject_Inline(CAST(context), object_input); + TNode<JSReceiver> object = ToObject_Inline(context, object_input); // 2. Let key be ? ToPropertyKey(P). key = CallBuiltin(Builtins::kToName, context, key); @@ -1359,9 +1373,8 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { TNode<Uint16T> instance_type = LoadMapInstanceType(map); GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime); { - VARIABLE(var_index, MachineType::PointerRepresentation(), - IntPtrConstant(0)); - VARIABLE(var_name, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); + TVARIABLE(Name, var_name); TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name, &call_runtime, &if_notunique_name); @@ -1369,8 +1382,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { BIND(&if_notunique_name); { Label not_in_string_table(this); - TryInternalizeString(key, &if_keyisindex, &var_index, &if_iskeyunique, - &var_name, ¬_in_string_table, &call_runtime); + TryInternalizeString(CAST(key), &if_keyisindex, &var_index, + &if_iskeyunique, &var_name, ¬_in_string_table, + &call_runtime); BIND(¬_in_string_table); { @@ -1384,9 +1398,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { { Label if_found_value(this), return_empty(this), if_not_found(this); - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_details, MachineRepresentation::kWord32); - VARIABLE(var_raw_value, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); + TVARIABLE(Word32T, var_details); + TVARIABLE(Object, var_raw_value); TryGetOwnProperty(context, object, object, map, instance_type, var_name.value(), &if_found_value, &var_value, @@ -1394,13 +1408,13 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { &if_not_found, kReturnAccessorPair); BIND(&if_found_value); - // 4. Return FromPropertyDescriptor(desc). - Node* js_desc = FromPropertyDetails(context, var_value.value(), - var_details.value(), &call_runtime); + // 4. Return FromPropertyDetails(desc). + TNode<JSObject> js_desc = FromPropertyDetails( + context, var_value.value(), var_details.value(), &call_runtime); args.PopAndReturn(js_desc); BIND(&return_empty); - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); args.PopAndReturn(UndefinedConstant()); BIND(&if_not_found); @@ -1421,7 +1435,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { TNode<FixedArray> desc_array = CAST(desc); // 4. Return FromPropertyDescriptor(desc). - Node* js_desc = FromPropertyDescriptor(context, desc_array); + TNode<JSObject> js_desc = FromPropertyDescriptor(context, desc_array); args.PopAndReturn(js_desc); } BIND(&return_undefined); @@ -1440,14 +1454,14 @@ void ObjectBuiltinsAssembler::AddToDictionaryIf( BIND(&done); } -Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, - Node* desc) { - VARIABLE(js_descriptor, MachineRepresentation::kTagged); +TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor( + TNode<Context> context, TNode<FixedArray> desc) { + TVARIABLE(JSObject, js_descriptor); TNode<Int32T> flags = LoadAndUntagToWord32ObjectField( desc, PropertyDescriptorObject::kFlagsOffset); - TNode<Word32T> has_flags = + TNode<Int32T> has_flags = Word32And(flags, Int32Constant(PropertyDescriptorObject::kHasMask)); Label if_accessor_desc(this), if_data_desc(this), if_generic_desc(this), @@ -1465,21 +1479,21 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, BIND(&if_accessor_desc); { - js_descriptor.Bind(ConstructAccessorDescriptor( + js_descriptor = ConstructAccessorDescriptor( context, LoadObjectField(desc, PropertyDescriptorObject::kGetOffset), LoadObjectField(desc, PropertyDescriptorObject::kSetOffset), IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags), - IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags))); + IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)); Goto(&return_desc); } BIND(&if_data_desc); { - js_descriptor.Bind(ConstructDataDescriptor( + js_descriptor = ConstructDataDescriptor( context, LoadObjectField(desc, PropertyDescriptorObject::kValueOffset), IsSetWord32<PropertyDescriptorObject::IsWritableBit>(flags), IsSetWord32<PropertyDescriptorObject::IsEnumerableBit>(flags), - IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags))); + IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)); Goto(&return_desc); } @@ -1529,7 +1543,7 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, IsSetWord32<PropertyDescriptorObject::IsConfigurableBit>(flags)), &bailout); - js_descriptor.Bind(js_desc); + js_descriptor = js_desc; Goto(&return_desc); BIND(&bailout); @@ -1541,36 +1555,36 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, return js_descriptor.value(); } -Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context, - Node* raw_value, - Node* details, - Label* if_bailout) { - VARIABLE(js_descriptor, MachineRepresentation::kTagged); +TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDetails( + TNode<Context> context, TNode<Object> raw_value, TNode<Word32T> details, + Label* if_bailout) { + TVARIABLE(JSObject, js_descriptor); Label if_accessor_desc(this), if_data_desc(this), return_desc(this); BranchIfAccessorPair(raw_value, &if_accessor_desc, &if_data_desc); BIND(&if_accessor_desc); { - TNode<Object> getter = - LoadObjectField(raw_value, AccessorPair::kGetterOffset); - TNode<Object> setter = - LoadObjectField(raw_value, AccessorPair::kSetterOffset); - js_descriptor.Bind(ConstructAccessorDescriptor( + TNode<AccessorPair> accessor_pair_value = CAST(raw_value); + TNode<HeapObject> getter = LoadObjectField<HeapObject>( + accessor_pair_value, AccessorPair::kGetterOffset); + TNode<HeapObject> setter = LoadObjectField<HeapObject>( + accessor_pair_value, AccessorPair::kSetterOffset); + js_descriptor = ConstructAccessorDescriptor( context, GetAccessorOrUndefined(getter, if_bailout), GetAccessorOrUndefined(setter, if_bailout), IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask), - IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask))); + IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)); Goto(&return_desc); } BIND(&if_data_desc); { - js_descriptor.Bind(ConstructDataDescriptor( + js_descriptor = ConstructDataDescriptor( context, raw_value, IsNotSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask), - IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask))); + IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)); Goto(&return_desc); } @@ -1578,20 +1592,20 @@ Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context, return js_descriptor.value(); } -Node* ObjectBuiltinsAssembler::GetAccessorOrUndefined(Node* accessor, - Label* if_bailout) { +TNode<HeapObject> ObjectBuiltinsAssembler::GetAccessorOrUndefined( + TNode<HeapObject> accessor, Label* if_bailout) { Label bind_undefined(this, Label::kDeferred), return_result(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(HeapObject, result); GotoIf(IsNull(accessor), &bind_undefined); - result.Bind(accessor); + result = accessor; TNode<Map> map = LoadMap(accessor); // TODO(ishell): probe template instantiations cache. GotoIf(IsFunctionTemplateInfoMap(map), if_bailout); Goto(&return_result); BIND(&bind_undefined); - result.Bind(UndefinedConstant()); + result = UndefinedConstant(); Goto(&return_result); BIND(&return_result); diff --git a/chromium/v8/src/builtins/builtins-promise-gen.cc b/chromium/v8/src/builtins/builtins-promise-gen.cc index a1da55e0d93..b20b288c3d6 100644 --- a/chromium/v8/src/builtins/builtins-promise-gen.cc +++ b/chromium/v8/src/builtins/builtins-promise-gen.cc @@ -21,11 +21,10 @@ namespace v8 { namespace internal { using Node = compiler::Node; -template <class T> -using TNode = CodeStubAssembler::TNode<T>; using IteratorRecord = TorqueStructIteratorRecord; -Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) { +TNode<JSPromise> PromiseBuiltinsAssembler::AllocateJSPromise( + TNode<Context> context) { TNode<NativeContext> const native_context = LoadNativeContext(context); TNode<JSFunction> const promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); @@ -39,7 +38,7 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) { RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(promise, JSPromise::kElementsOffset, RootIndex::kEmptyFixedArray); - return promise; + return CAST(promise); } void PromiseBuiltinsAssembler::PromiseInit(Node* promise) { @@ -54,13 +53,14 @@ void PromiseBuiltinsAssembler::PromiseInit(Node* promise) { } } -Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) { +TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndInitJSPromise( + TNode<Context> context) { return AllocateAndInitJSPromise(context, UndefinedConstant()); } -Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context, - Node* parent) { - Node* const instance = AllocateJSPromise(context); +TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndInitJSPromise( + TNode<Context> context, TNode<Object> parent) { + const TNode<JSPromise> instance = AllocateJSPromise(context); PromiseInit(instance); Label out(this); @@ -72,11 +72,12 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context, return instance; } -Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise( - Node* context, v8::Promise::PromiseState status, Node* result) { +TNode<JSPromise> PromiseBuiltinsAssembler::AllocateAndSetJSPromise( + TNode<Context> context, v8::Promise::PromiseState status, + TNode<Object> result) { DCHECK_NE(Promise::kPending, status); - Node* const instance = AllocateJSPromise(context); + const TNode<JSPromise> instance = AllocateJSPromise(context); StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset, result); STATIC_ASSERT(JSPromise::kStatusShift == 0); @@ -97,22 +98,23 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise( return instance; } -std::pair<Node*, Node*> +std::pair<TNode<JSFunction>, TNode<JSFunction>> PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions( - Node* promise, Node* debug_event, Node* native_context) { - Node* const promise_context = CreatePromiseResolvingFunctionsContext( + TNode<JSPromise> promise, TNode<Object> debug_event, + TNode<NativeContext> native_context) { + const TNode<Context> promise_context = CreatePromiseResolvingFunctionsContext( promise, debug_event, native_context); - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const resolve_info = LoadContextElement( + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> resolve_info = CAST(LoadContextElement( native_context, - Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX); - Node* const resolve = + Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX)); + const TNode<JSFunction> resolve = AllocateFunctionWithMapAndContext(map, resolve_info, promise_context); - TNode<Object> const reject_info = LoadContextElement( + const TNode<SharedFunctionInfo> reject_info = CAST(LoadContextElement( native_context, - Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX); - Node* const reject = + Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX)); + const TNode<JSFunction> reject = AllocateFunctionWithMapAndContext(map, reject_info, promise_context); return std::make_pair(resolve, reject); } @@ -196,7 +198,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { BIND(&if_fast_promise_capability); { - Node* promise = + TNode<JSPromise> promise = AllocateAndInitJSPromise(native_context, UndefinedConstant()); Node* resolve = nullptr; @@ -226,14 +228,15 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset, RootIndex::kUndefinedValue); - Node* executor_context = - CreatePromiseGetCapabilitiesExecutorContext(capability, native_context); - TNode<Object> executor_info = LoadContextElement( - native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN); - TNode<Object> function_map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<JSFunction> executor = CAST(AllocateFunctionWithMapAndContext( - function_map, executor_info, executor_context)); + TNode<Context> executor_context = + CAST(CreatePromiseGetCapabilitiesExecutorContext(capability, + native_context)); + TNode<SharedFunctionInfo> executor_info = CAST(LoadContextElement( + native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN)); + TNode<Map> function_map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + TNode<JSFunction> executor = AllocateFunctionWithMapAndContext( + function_map, executor_info, executor_context); TNode<JSReceiver> promise = Construct(native_context, CAST(constructor), executor); @@ -258,14 +261,14 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { ThrowTypeError(context, MessageTemplate::kPromiseNonCallable); } -Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context, - int slots) { +TNode<Context> PromiseBuiltinsAssembler::CreatePromiseContext( + TNode<NativeContext> native_context, int slots) { DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); TNode<HeapObject> const context = AllocateInNewSpace(FixedArray::SizeFor(slots)); InitializeFunctionContext(native_context, context, slots); - return context; + return CAST(context); } Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext( @@ -278,8 +281,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext( TNode<JSArray> values_array = AllocateJSArray( PACKED_ELEMENTS, array_map, IntPtrConstant(0), SmiConstant(0)); - Node* const context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseAllResolveElementLength); + TNode<Context> const context = CreatePromiseContext( + CAST(native_context), PromiseBuiltins::kPromiseAllResolveElementLength); StoreContextElementNoWriteBarrier( context, PromiseBuiltins::kPromiseAllResolveElementRemainingSlot, SmiConstant(1)); @@ -301,12 +304,12 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction( index, SmiConstant(PropertyArray::HashField::kMax))); CSA_ASSERT(this, IsNativeContext(native_context)); - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const resolve_info = - LoadContextElement(native_context, slot_index); + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> resolve_info = + CAST(LoadContextElement(native_context, slot_index)); TNode<JSFunction> resolve = - Cast(AllocateFunctionWithMapAndContext(map, resolve_info, context)); + AllocateFunctionWithMapAndContext(map, resolve_info, CAST(context)); STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0); StoreObjectFieldNoWriteBarrier(resolve, JSFunction::kPropertiesOrHashOffset, @@ -315,9 +318,10 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction( return resolve; } -Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( - Node* promise, Node* debug_event, Node* native_context) { - Node* const context = CreatePromiseContext( +TNode<Context> PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( + TNode<JSPromise> promise, TNode<Object> debug_event, + TNode<NativeContext> native_context) { + const TNode<Context> context = CreatePromiseContext( native_context, PromiseBuiltins::kPromiseContextLength); StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kPromiseSlot, promise); @@ -331,7 +335,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext( Node* promise_capability, Node* native_context) { int kContextLength = PromiseBuiltins::kCapabilitiesContextLength; - Node* context = CreatePromiseContext(native_context, kContextLength); + TNode<Context> context = + CreatePromiseContext(CAST(native_context), kContextLength); StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kCapabilitySlot, promise_capability); return context; @@ -386,14 +391,12 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) { // ES #sec-performpromisethen void PromiseBuiltinsAssembler::PerformPromiseThen( - Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected, - Node* result_promise_or_capability) { - CSA_ASSERT(this, TaggedIsNotSmi(promise)); - CSA_ASSERT(this, IsJSPromise(promise)); + TNode<Context> context, TNode<JSPromise> promise, + TNode<HeapObject> on_fulfilled, TNode<HeapObject> on_rejected, + TNode<HeapObject> result_promise_or_capability) { CSA_ASSERT(this, Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled))); CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected))); - CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability)); CSA_ASSERT( this, Word32Or(Word32Or(IsJSPromise(result_promise_or_capability), @@ -411,9 +414,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( // PromiseReaction holding both the onFulfilled and onRejected callbacks. // Once the {promise} is resolved we decide on the concrete handler to // push onto the microtask queue. - TNode<Object> const promise_reactions = + const TNode<Object> promise_reactions = LoadObjectField(promise, JSPromise::kReactionsOrResultOffset); - Node* const reaction = + const TNode<PromiseReaction> reaction = AllocatePromiseReaction(promise_reactions, result_promise_or_capability, on_fulfilled, on_rejected); StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction); @@ -422,10 +425,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_notpending); { - VARIABLE(var_map, MachineRepresentation::kTagged); - VARIABLE(var_handler, MachineRepresentation::kTagged); - VARIABLE(var_handler_context, MachineRepresentation::kTagged, - UndefinedConstant()); + TVARIABLE(Map, var_map); + TVARIABLE(HeapObject, var_handler); + TVARIABLE(Object, var_handler_context, UndefinedConstant()); Label if_fulfilled(this), if_rejected(this, Label::kDeferred), enqueue(this); Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled, @@ -433,15 +435,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_fulfilled); { - var_map.Bind(PromiseFulfillReactionJobTaskMapConstant()); - var_handler.Bind(on_fulfilled); + var_map = PromiseFulfillReactionJobTaskMapConstant(); + var_handler = on_fulfilled; Label use_fallback(this, Label::kDeferred), done(this); ExtractHandlerContext(on_fulfilled, &var_handler_context); Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done); BIND(&use_fallback); - var_handler_context.Bind(context); + var_handler_context = context; ExtractHandlerContext(on_rejected, &var_handler_context); Goto(&done); @@ -452,15 +454,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_rejected); { CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected)); - var_map.Bind(PromiseRejectReactionJobTaskMapConstant()); - var_handler.Bind(on_rejected); + var_map = PromiseRejectReactionJobTaskMapConstant(); + var_handler = on_rejected; Label use_fallback(this, Label::kDeferred), done(this); ExtractHandlerContext(on_rejected, &var_handler_context); Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done); BIND(&use_fallback); - var_handler_context.Bind(context); + var_handler_context = context; ExtractHandlerContext(on_fulfilled, &var_handler_context); Goto(&done); BIND(&done); @@ -474,8 +476,8 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( { TNode<Object> argument = LoadObjectField(promise, JSPromise::kReactionsOrResultOffset); - Node* microtask = AllocatePromiseReactionJobTask( - var_map.value(), var_handler_context.value(), argument, + TNode<PromiseReactionJobTask> microtask = AllocatePromiseReactionJobTask( + var_map.value(), CAST(var_handler_context.value()), argument, var_handler.value(), result_promise_or_capability); CallBuiltin(Builtins::kEnqueueMicrotask, var_handler_context.value(), microtask); @@ -489,13 +491,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( // ES #sec-performpromisethen TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const promise = Parameter(Descriptor::kPromise); - Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled); - Node* const on_rejected = Parameter(Descriptor::kOnRejected); - Node* const result_promise = Parameter(Descriptor::kResultPromise); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + const TNode<JSPromise> promise = CAST(Parameter(Descriptor::kPromise)); + const TNode<HeapObject> on_fulfilled = + CAST(Parameter(Descriptor::kOnFulfilled)); + const TNode<HeapObject> on_rejected = + CAST(Parameter(Descriptor::kOnRejected)); + const TNode<HeapObject> result_promise = + CAST(Parameter(Descriptor::kResultPromise)); - CSA_ASSERT(this, TaggedIsNotSmi(result_promise)); CSA_ASSERT( this, Word32Or(IsJSPromise(result_promise), IsUndefined(result_promise))); @@ -504,9 +508,9 @@ TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) { Return(result_promise); } -Node* PromiseBuiltinsAssembler::AllocatePromiseReaction( - Node* next, Node* promise_or_capability, Node* fulfill_handler, - Node* reject_handler) { +TNode<PromiseReaction> PromiseBuiltinsAssembler::AllocatePromiseReaction( + TNode<Object> next, TNode<HeapObject> promise_or_capability, + TNode<HeapObject> fulfill_handler, TNode<HeapObject> reject_handler) { TNode<HeapObject> const reaction = Allocate(PromiseReaction::kSize); StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap); StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next); @@ -517,12 +521,13 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction( reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler); StoreObjectFieldNoWriteBarrier( reaction, PromiseReaction::kRejectHandlerOffset, reject_handler); - return reaction; + return CAST(reaction); } -Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( - Node* map, Node* context, Node* argument, Node* handler, - Node* promise_or_capability) { +TNode<PromiseReactionJobTask> +PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( + TNode<Map> map, TNode<Context> context, TNode<Object> argument, + TNode<HeapObject> handler, TNode<HeapObject> promise_or_capability) { TNode<HeapObject> const microtask = Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks); StoreMapNoWriteBarrier(microtask, map); @@ -535,12 +540,14 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( StoreObjectFieldNoWriteBarrier( microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset, promise_or_capability); - return microtask; + return CAST(microtask); } -Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( - Node* promise_to_resolve, Node* then, Node* thenable, Node* context) { - TNode<HeapObject> const microtask = +TNode<PromiseResolveThenableJobTask> +PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( + TNode<JSPromise> promise_to_resolve, TNode<JSReceiver> then, + TNode<JSReceiver> thenable, TNode<Context> context) { + const TNode<HeapObject> microtask = Allocate(PromiseResolveThenableJobTask::kSize); StoreMapNoWriteBarrier(microtask, RootIndex::kPromiseResolveThenableJobTaskMap); @@ -553,7 +560,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( microtask, PromiseResolveThenableJobTask::kThenOffset, then); StoreObjectFieldNoWriteBarrier( microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable); - return microtask; + return CAST(microtask); } // ES #sec-triggerpromisereactions @@ -1003,7 +1010,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { BIND(&if_targetisnotmodified); { - Node* const instance = AllocateAndInitJSPromise(context); + TNode<JSPromise> const instance = AllocateAndInitJSPromise(context); var_result.Bind(instance); Goto(&debug_push); } @@ -1035,7 +1042,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { Node *resolve, *reject; std::tie(resolve, reject) = CreatePromiseResolvingFunctions( - var_result.value(), TrueConstant(), native_context); + CAST(var_result.value()), TrueConstant(), native_context); Node* const maybe_exception = CallJS( CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), @@ -1080,8 +1087,8 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { // V8 Extras: v8.createPromise(parent) TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) { - Node* const parent = Parameter(Descriptor::kParent); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> parent = CAST(Parameter(Descriptor::kParent)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Return(AllocateAndInitJSPromise(context, parent)); } @@ -1127,14 +1134,15 @@ TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) { // Promise.prototype.then ( onFulfilled, onRejected ) TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { // 1. Let promise be the this value. - Node* const promise = Parameter(Descriptor::kReceiver); - Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled); - Node* const on_rejected = Parameter(Descriptor::kOnRejected); - Node* const context = Parameter(Descriptor::kContext); + const TNode<Object> maybe_promise = CAST(Parameter(Descriptor::kReceiver)); + const TNode<Object> on_fulfilled = CAST(Parameter(Descriptor::kOnFulfilled)); + const TNode<Object> on_rejected = CAST(Parameter(Descriptor::kOnRejected)); + const TNode<Context> context = CAST(Parameter(Descriptor::kContext)); // 2. If IsPromise(promise) is false, throw a TypeError exception. - ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE, + ThrowIfNotInstanceType(context, maybe_promise, JS_PROMISE_TYPE, "Promise.prototype.then"); + TNode<JSPromise> js_promise = CAST(maybe_promise); // 3. Let C be ? SpeciesConstructor(promise, %Promise%). Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred), @@ -1142,26 +1150,27 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { TNode<NativeContext> const native_context = LoadNativeContext(context); TNode<JSFunction> promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); - TNode<Map> const promise_map = LoadMap(promise); + TNode<Map> const promise_map = LoadMap(js_promise); BranchIfPromiseSpeciesLookupChainIntact( native_context, promise_map, &fast_promise_capability, &slow_constructor); BIND(&slow_constructor); TNode<JSReceiver> constructor = - SpeciesConstructor(native_context, promise, promise_fun); + SpeciesConstructor(native_context, js_promise, promise_fun); Branch(TaggedEqual(constructor, promise_fun), &fast_promise_capability, &slow_promise_capability); // 4. Let resultCapability be ? NewPromiseCapability(C). Label perform_promise_then(this); - VARIABLE(var_result_promise, MachineRepresentation::kTagged); - VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result_promise); + TVARIABLE(HeapObject, var_result_promise_or_capability); BIND(&fast_promise_capability); { - Node* const result_promise = AllocateAndInitJSPromise(context, promise); - var_result_promise_or_capability.Bind(result_promise); - var_result_promise.Bind(result_promise); + const TNode<JSPromise> result_promise = + AllocateAndInitJSPromise(context, js_promise); + var_result_promise_or_capability = result_promise; + var_result_promise = result_promise; Goto(&perform_promise_then); } @@ -1170,9 +1179,9 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { TNode<Oddball> const debug_event = TrueConstant(); TNode<PromiseCapability> const capability = CAST(CallBuiltin( Builtins::kNewPromiseCapability, context, constructor, debug_event)); - var_result_promise.Bind( - LoadObjectField(capability, PromiseCapability::kPromiseOffset)); - var_result_promise_or_capability.Bind(capability); + var_result_promise = + LoadObjectField(capability, PromiseCapability::kPromiseOffset); + var_result_promise_or_capability = capability; Goto(&perform_promise_then); } @@ -1187,30 +1196,30 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { // 3. If IsCallable(onFulfilled) is false, then // a. Set onFulfilled to undefined. - VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled); + TVARIABLE(Object, var_on_fulfilled, on_fulfilled); Label if_fulfilled_done(this), if_fulfilled_notcallable(this); GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable); - Branch(IsCallable(on_fulfilled), &if_fulfilled_done, + Branch(IsCallable(CAST(on_fulfilled)), &if_fulfilled_done, &if_fulfilled_notcallable); BIND(&if_fulfilled_notcallable); - var_on_fulfilled.Bind(UndefinedConstant()); + var_on_fulfilled = UndefinedConstant(); Goto(&if_fulfilled_done); BIND(&if_fulfilled_done); // 4. If IsCallable(onRejected) is false, then // a. Set onRejected to undefined. - VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected); + TVARIABLE(Object, var_on_rejected, on_rejected); Label if_rejected_done(this), if_rejected_notcallable(this); GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable); - Branch(IsCallable(on_rejected), &if_rejected_done, + Branch(IsCallable(CAST(on_rejected)), &if_rejected_done, &if_rejected_notcallable); BIND(&if_rejected_notcallable); - var_on_rejected.Bind(UndefinedConstant()); + var_on_rejected = UndefinedConstant(); Goto(&if_rejected_done); BIND(&if_rejected_done); - PerformPromiseThen(context, promise, var_on_fulfilled.value(), - var_on_rejected.value(), + PerformPromiseThen(context, js_promise, CAST(var_on_fulfilled.value()), + CAST(var_on_rejected.value()), var_result_promise_or_capability.value()); Return(var_result_promise.value()); } @@ -1522,7 +1531,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) { // create NewPromiseCapability. BIND(&if_nativepromise); { - Node* const result = AllocateAndInitJSPromise(context); + TNode<JSPromise> const result = AllocateAndInitJSPromise(context); CallBuiltin(Builtins::kResolvePromise, context, result, value); Return(result); } @@ -1592,7 +1601,7 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { BIND(&if_nativepromise); { - Node* const promise = + TNode<JSPromise> const promise = AllocateAndSetJSPromise(context, v8::Promise::kRejected, reason); CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise, reason); @@ -1621,21 +1630,21 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions( Node* on_finally, Node* constructor, Node* native_context) { - Node* const promise_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseFinallyContextLength); + const TNode<Context> promise_context = CreatePromiseContext( + CAST(native_context), PromiseBuiltins::kPromiseFinallyContextLength); StoreContextElementNoWriteBarrier( promise_context, PromiseBuiltins::kOnFinallySlot, on_finally); StoreContextElementNoWriteBarrier( promise_context, PromiseBuiltins::kConstructorSlot, constructor); - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const then_finally_info = LoadContextElement( - native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN); - Node* const then_finally = AllocateFunctionWithMapAndContext( + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> then_finally_info = CAST(LoadContextElement( + native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN)); + TNode<JSFunction> const then_finally = AllocateFunctionWithMapAndContext( map, then_finally_info, promise_context); - TNode<Object> const catch_finally_info = LoadContextElement( - native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN); - Node* const catch_finally = AllocateFunctionWithMapAndContext( + const TNode<SharedFunctionInfo> catch_finally_info = CAST(LoadContextElement( + native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN)); + TNode<JSFunction> const catch_finally = AllocateFunctionWithMapAndContext( map, catch_finally_info, promise_context); return std::make_pair(then_finally, catch_finally); } @@ -1650,15 +1659,16 @@ TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) { Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value, Node* native_context) { - Node* const value_thunk_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); + const TNode<Context> value_thunk_context = CreatePromiseContext( + CAST(native_context), + PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); StoreContextElementNoWriteBarrier(value_thunk_context, PromiseBuiltins::kValueSlot, value); - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const value_thunk_info = LoadContextElement( - native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN); - Node* const value_thunk = AllocateFunctionWithMapAndContext( + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> value_thunk_info = CAST(LoadContextElement( + native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN)); + TNode<JSFunction> const value_thunk = AllocateFunctionWithMapAndContext( map, value_thunk_info, value_thunk_context); return value_thunk; } @@ -1711,15 +1721,16 @@ TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) { Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason, Node* native_context) { - Node* const thrower_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); + const TNode<Context> thrower_context = CreatePromiseContext( + CAST(native_context), + PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); StoreContextElementNoWriteBarrier(thrower_context, PromiseBuiltins::kValueSlot, reason); - TNode<Object> const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const thrower_info = LoadContextElement( - native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN); - Node* const thrower = + const TNode<Map> map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> thrower_info = CAST(LoadContextElement( + native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN)); + TNode<JSFunction> const thrower = AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context); return thrower; } @@ -1919,7 +1930,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred), if_runtime(this, Label::kDeferred); TVARIABLE(Object, var_reason); - TVARIABLE(Object, var_then); + TVARIABLE(JSReceiver, var_then); // If promise hook is enabled or the debugger is active, let // the runtime handle this operation, which greatly reduces @@ -1955,7 +1966,8 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { BIND(&if_fast); { // The {resolution} is a native Promise in this case. - var_then = LoadContextElement(native_context, Context::PROMISE_THEN_INDEX); + var_then = + CAST(LoadContextElement(native_context, Context::PROMISE_THEN_INDEX)); Goto(&do_enqueue); } @@ -1987,7 +1999,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { GotoIf(TaggedIsSmi(then), &if_fulfill); TNode<Map> const then_map = LoadMap(CAST(then)); GotoIfNot(IsCallableMap(then_map), &if_fulfill); - var_then = then; + var_then = CAST(then); Goto(&do_enqueue); } @@ -1995,8 +2007,9 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { { // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob, // «promise, resolution, thenAction»). - Node* const task = AllocatePromiseResolveThenableJobTask( - promise, var_then.value(), resolution, native_context); + const TNode<PromiseResolveThenableJobTask> task = + AllocatePromiseResolveThenableJobTask(promise, var_then.value(), + CAST(resolution), native_context); TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task); } @@ -2150,8 +2163,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // Register the PromiseReaction immediately on the {next_value}, not // passing any chained promise since neither async_hooks nor DevTools // are enabled, so there's no use of the resulting promise. - PerformPromiseThen(native_context, next_value, resolve_element_fun, - reject_element_fun, UndefinedConstant()); + PerformPromiseThen(native_context, CAST(next_value), + CAST(resolve_element_fun), CAST(reject_element_fun), + UndefinedConstant()); Goto(&loop); } diff --git a/chromium/v8/src/builtins/builtins-promise-gen.h b/chromium/v8/src/builtins/builtins-promise-gen.h index 633e3321aa1..b2ae8fe8765 100644 --- a/chromium/v8/src/builtins/builtins-promise-gen.h +++ b/chromium/v8/src/builtins/builtins-promise-gen.h @@ -22,29 +22,34 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { // // This uses undefined as the parent promise for the promise init // hook. - Node* AllocateAndInitJSPromise(Node* context); + TNode<JSPromise> AllocateAndInitJSPromise(TNode<Context> context); // This uses the given parent as the parent promise for the promise // init hook. - Node* AllocateAndInitJSPromise(Node* context, Node* parent); + TNode<JSPromise> AllocateAndInitJSPromise(TNode<Context> context, + TNode<Object> parent); // This allocates and initializes a promise with the given state and // fields. - Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status, - Node* result); + TNode<JSPromise> AllocateAndSetJSPromise(TNode<Context> context, + v8::Promise::PromiseState status, + TNode<Object> result); - Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability, - Node* fulfill_handler, Node* reject_handler); + TNode<PromiseReaction> AllocatePromiseReaction( + TNode<Object> next, TNode<HeapObject> promise_or_capability, + TNode<HeapObject> fulfill_handler, TNode<HeapObject> reject_handler); - Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument, - Node* handler, - Node* promise_or_capability); - Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve, - Node* then, Node* thenable, - Node* context); + TNode<PromiseReactionJobTask> AllocatePromiseReactionJobTask( + TNode<Map> map, TNode<Context> context, TNode<Object> argument, + TNode<HeapObject> handler, TNode<HeapObject> promise_or_capability); - std::pair<Node*, Node*> CreatePromiseResolvingFunctions(Node* promise, - Node* debug_event, - Node* native_context); + TNode<PromiseResolveThenableJobTask> AllocatePromiseResolveThenableJobTask( + TNode<JSPromise> promise_to_resolve, TNode<JSReceiver> then, + TNode<JSReceiver> thenable, TNode<Context> context); + + std::pair<TNode<JSFunction>, TNode<JSFunction>> + CreatePromiseResolvingFunctions(TNode<JSPromise> promise, + TNode<Object> debug_event, + TNode<NativeContext> native_context); Node* PromiseHasHandler(Node* promise); @@ -62,8 +67,9 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { Node* native_context, int slot_index); - Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event, - Node* native_context); + TNode<Context> CreatePromiseResolvingFunctionsContext( + TNode<JSPromise> promise, TNode<Object> debug_event, + TNode<NativeContext> native_context); Node* CreatePromiseGetCapabilitiesExecutorContext(Node* promise_capability, Node* native_context); @@ -74,11 +80,13 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { void PromiseSetHasHandler(Node* promise); void PromiseSetHandledHint(Node* promise); - void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled, - Node* on_rejected, - Node* result_promise_or_capability); + void PerformPromiseThen(TNode<Context> context, TNode<JSPromise> promise, + TNode<HeapObject> on_fulfilled, + TNode<HeapObject> on_rejected, + TNode<HeapObject> result_promise_or_capability); - Node* CreatePromiseContext(Node* native_context, int slots); + TNode<Context> CreatePromiseContext(TNode<NativeContext> native_context, + int slots); Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result, PromiseReaction::Type type); @@ -161,7 +169,7 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { v8::Promise::PromiseState expected); void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status); - Node* AllocateJSPromise(Node* context); + TNode<JSPromise> AllocateJSPromise(TNode<Context> context); void ExtractHandlerContext(Node* handler, Variable* var_context); void Generate_PromiseAll( diff --git a/chromium/v8/src/builtins/builtins-proxy-gen.cc b/chromium/v8/src/builtins/builtins-proxy-gen.cc index bb1137735cd..71d4e8226f5 100644 --- a/chromium/v8/src/builtins/builtins-proxy-gen.cc +++ b/chromium/v8/src/builtins/builtins-proxy-gen.cc @@ -14,7 +14,7 @@ namespace v8 { namespace internal { -compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy( +TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy( TNode<Context> context, TNode<JSReceiver> target, TNode<JSReceiver> handler) { VARIABLE(map, MachineRepresentation::kTagged); @@ -59,7 +59,8 @@ compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy( } Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( - Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) { + Node* context, const CodeStubArguments& args, Node* argc, + ParameterMode mode) { Comment("AllocateJSArrayForCodeStubArguments"); Label if_empty_array(this), allocate_js_array(this); @@ -80,7 +81,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)), &if_large_object); - args.ForEach(list, [=, &offset](Node* arg) { + args.ForEach(list, [&](TNode<Object> arg) { StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements, offset.value(), arg); Increment(&offset, kTaggedSize); @@ -89,7 +90,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( BIND(&if_large_object); { - args.ForEach(list, [=, &offset](Node* arg) { + args.ForEach(list, [&](TNode<Object> arg) { Store(allocated_elements, offset.value(), arg); Increment(&offset, kTaggedSize); }); @@ -124,20 +125,19 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( return context; } -compiler::TNode<JSFunction> -ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context, - TNode<JSProxy> proxy) { +TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( + TNode<Context> context, TNode<JSProxy> proxy) { TNode<NativeContext> const native_context = LoadNativeContext(context); - Node* const proxy_context = - CreateProxyRevokeFunctionContext(proxy, native_context); - TNode<Object> const revoke_map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode<Object> const revoke_info = - LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN); + const TNode<Context> proxy_context = + CAST(CreateProxyRevokeFunctionContext(proxy, native_context)); + const TNode<Map> revoke_map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode<SharedFunctionInfo> revoke_info = CAST( + LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN)); - return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info, - proxy_context)); + return AllocateFunctionWithMapAndContext(revoke_map, revoke_info, + proxy_context); } TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { diff --git a/chromium/v8/src/builtins/builtins-proxy-gen.h b/chromium/v8/src/builtins/builtins-proxy-gen.h index cb51faf5755..03b3749bf5d 100644 --- a/chromium/v8/src/builtins/builtins-proxy-gen.h +++ b/chromium/v8/src/builtins/builtins-proxy-gen.h @@ -39,10 +39,9 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { kProxyContextLength, }; - Node* AllocateJSArrayForCodeStubArguments( - Node* context, - CodeStubArguments& args, // NOLINT(runtime/references) - Node* argc, ParameterMode mode); + Node* AllocateJSArrayForCodeStubArguments(Node* context, + const CodeStubArguments& args, + Node* argc, ParameterMode mode); private: Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context); diff --git a/chromium/v8/src/builtins/builtins-reflect-gen.cc b/chromium/v8/src/builtins/builtins-reflect-gen.cc index 744a443ecc2..6cffd6ed55b 100644 --- a/chromium/v8/src/builtins/builtins-reflect-gen.cc +++ b/chromium/v8/src/builtins/builtins-reflect-gen.cc @@ -11,12 +11,12 @@ namespace internal { // ES section #sec-reflect.has TF_BUILTIN(ReflectHas, CodeStubAssembler) { - Node* target = Parameter(Descriptor::kTarget); - Node* key = Parameter(Descriptor::kKey); + TNode<Object> target = CAST(Parameter(Descriptor::kTarget)); + TNode<Object> key = CAST(Parameter(Descriptor::kKey)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - ThrowIfNotJSReceiver(context, CAST(target), - MessageTemplate::kCalledOnNonObject, "Reflect.has"); + ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject, + "Reflect.has"); Return(CallBuiltin(Builtins::kHasProperty, context, target, key)); } diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.cc b/chromium/v8/src/builtins/builtins-regexp-gen.cc index f879d70c676..4bc0b6ad74f 100644 --- a/chromium/v8/src/builtins/builtins-regexp-gen.cc +++ b/chromium/v8/src/builtins/builtins-regexp-gen.cc @@ -23,8 +23,6 @@ namespace v8 { namespace internal { using compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; // Tail calls the regular expression interpreter. // static @@ -80,7 +78,8 @@ TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<Code> code) { TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult( TNode<Context> context, TNode<Smi> length, TNode<Smi> index, - TNode<String> input, TNode<FixedArray>* elements_out) { + TNode<String> input, TNode<RegExpMatchInfo> match_info, + TNode<FixedArray>* elements_out) { CSA_ASSERT(this, SmiLessThanOrEqual( length, SmiConstant(JSArray::kMaxFastArrayLength))); CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0))); @@ -90,9 +89,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult( const ElementsKind elements_kind = PACKED_ELEMENTS; TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context), Context::REGEXP_RESULT_MAP_INDEX)); - Node* no_allocation_site = nullptr; + TNode<AllocationSite> no_allocation_site = {}; TNode<IntPtrT> length_intptr = SmiUntag(length); - TNode<IntPtrT> capacity = length_intptr; // Note: The returned `elements` may be in young large object space, but // `array` is guaranteed to be in new space so we could skip write barriers @@ -100,18 +98,29 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult( TNode<JSArray> array; TNode<FixedArrayBase> elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - elements_kind, map, length, no_allocation_site, capacity, + elements_kind, map, length, no_allocation_site, length_intptr, INTPTR_PARAMETERS, kAllowLargeObjectAllocation, JSRegExpResult::kSize); // Finish result initialization. TNode<JSRegExpResult> result = CAST(array); + // Load undefined value once here to avoid multiple LoadRoots. + TNode<Oddball> undefined_value = UncheckedCast<Oddball>( + CodeAssembler::LoadRoot(RootIndex::kUndefinedValue)); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index); // TODO(jgruber,tebbi): Could skip barrier but the MemoryOptimizer complains. StoreObjectField(result, JSRegExpResult::kInputOffset, input); StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset, - UndefinedConstant()); + undefined_value); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kNamesOffset, + undefined_value); + + // Stash match_info in order to build JSRegExpResultIndices lazily when the + // 'indices' property is accessed. + StoreObjectField(result, JSRegExpResult::kCachedIndicesOrMatchInfoOffset, + match_info); // Finish elements initialization. @@ -213,7 +222,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode<FixedArray> result_elements; TNode<JSRegExpResult> result = AllocateRegExpResult( - context, num_results, start, string, &result_elements); + context, num_results, start, string, match_info, &result_elements); UnsafeStoreFixedArrayElement(result_elements, 0, first); @@ -228,8 +237,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2)); TVARIABLE(IntPtrT, var_to_cursor, IntPtrConstant(1)); - Variable* vars[] = {&var_from_cursor, &var_to_cursor}; - Label loop(this, 2, vars); + Label loop(this, {&var_from_cursor, &var_to_cursor}); Goto(&loop); BIND(&loop); @@ -289,6 +297,9 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names); CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero())); + // Stash names in case we need them to build the indices array later. + StoreObjectField(result, JSRegExpResult::kNamesOffset, names); + // Allocate a new object to store the named capture properties. // TODO(jgruber): Could be optimized by adding the object map to the heap // root list. @@ -305,9 +316,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TVARIABLE(IntPtrT, var_i, IntPtrZero()); - Variable* vars[] = {&var_i}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label loop(this, vars_count, vars); + Label loop(this, &var_i); Goto(&loop); BIND(&loop); @@ -355,9 +364,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( } void RegExpBuiltinsAssembler::GetStringPointers( - Node* const string_data, Node* const offset, Node* const last_index, - Node* const string_length, String::Encoding encoding, - Variable* var_string_start, Variable* var_string_end) { + TNode<RawPtrT> string_data, TNode<IntPtrT> offset, + TNode<IntPtrT> last_index, TNode<IntPtrT> string_length, + String::Encoding encoding, TVariable<RawPtrT>* var_string_start, + TVariable<RawPtrT>* var_string_end) { DCHECK_EQ(var_string_start->rep(), MachineType::PointerRepresentation()); DCHECK_EQ(var_string_end->rep(), MachineType::PointerRepresentation()); @@ -365,13 +375,14 @@ void RegExpBuiltinsAssembler::GetStringPointers( ? UINT8_ELEMENTS : UINT16_ELEMENTS; - TNode<IntPtrT> const from_offset = ElementOffsetFromIndex( - IntPtrAdd(offset, last_index), kind, INTPTR_PARAMETERS); - var_string_start->Bind(IntPtrAdd(string_data, from_offset)); + TNode<IntPtrT> from_offset = + ElementOffsetFromIndex(IntPtrAdd(offset, last_index), kind); + *var_string_start = + ReinterpretCast<RawPtrT>(IntPtrAdd(string_data, from_offset)); - TNode<IntPtrT> const to_offset = ElementOffsetFromIndex( - IntPtrAdd(offset, string_length), kind, INTPTR_PARAMETERS); - var_string_end->Bind(IntPtrAdd(string_data, to_offset)); + TNode<IntPtrT> to_offset = + ElementOffsetFromIndex(IntPtrAdd(offset, string_length), kind); + *var_string_end = ReinterpretCast<RawPtrT>(IntPtrAdd(string_data, to_offset)); } TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( @@ -507,27 +518,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( GotoIf(TaggedIsSmi(var_code.value()), &runtime); TNode<Code> code = CAST(var_code.value()); - // Tier-up in runtime if ticks are non-zero and tier-up hasn't happened yet - // and ensure that a RegExp stack is allocated when using compiled Irregexp. + // Ensure that a RegExp stack is allocated when using compiled Irregexp. + // TODO(jgruber): Guarantee an allocated stack and remove this check. { - Label next(this), check_tier_up(this); - GotoIfNot(TaggedIsSmi(var_bytecode.value()), &check_tier_up); + Label next(this); + GotoIfNot(TaggedIsSmi(var_bytecode.value()), &next); CSA_ASSERT(this, SmiEqual(CAST(var_bytecode.value()), SmiConstant(JSRegExp::kUninitializedValue))); - // Ensure RegExp stack is allocated. TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>( Load(MachineType::IntPtr(), regexp_stack_memory_size_address)); - GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime); - Goto(&next); - - // Check if tier-up is requested. - BIND(&check_tier_up); - TNode<Smi> ticks = CAST( - UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpTierUpTicksIndex)); - GotoIf(SmiToInt32(ticks), &runtime); + Branch(IntPtrEqual(stack_size, IntPtrZero()), &runtime, &next); - Goto(&next); BIND(&next); } @@ -656,18 +658,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( // Fill match and capture offsets in match_info. { - TNode<IntPtrT> limit_offset = ElementOffsetFromIndex( - register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0); + TNode<IntPtrT> limit_offset = + ElementOffsetFromIndex(register_count, INT32_ELEMENTS, 0); TNode<IntPtrT> to_offset = ElementOffsetFromIndex( IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS, - INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag); + RegExpMatchInfo::kHeaderSize - kHeapObjectTag); TVARIABLE(IntPtrT, var_to_offset, to_offset); VariableList vars({&var_to_offset}, zone()); - BuildFastLoop( + BuildFastLoop<IntPtrT>( vars, IntPtrZero(), limit_offset, - [=, &var_to_offset](Node* offset) { + [&](TNode<IntPtrT> offset) { TNode<Int32T> value = UncheckedCast<Int32T>(Load( MachineType::Int32(), static_offsets_vector_address, offset)); TNode<Smi> smi_value = SmiFromInt32(value); @@ -675,7 +677,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( var_to_offset.value(), smi_value); Increment(&var_to_offset, kTaggedSize); }, - kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kInt32Size, IndexAdvanceMode::kPost); } var_result = match_info; @@ -733,7 +735,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult( TNode<Context> context, TNode<JSReceiver> maybe_regexp, TNode<String> string, Label* if_didnotmatch, const bool is_fastpath) { if (!is_fastpath) { - ThrowIfNotInstanceType(context, maybe_regexp, JS_REGEXP_TYPE, + ThrowIfNotInstanceType(context, maybe_regexp, JS_REG_EXP_TYPE, "RegExp.prototype.exec"); } @@ -894,14 +896,13 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsReceiverInitialRegExpPrototype( return TaggedEqual(receiver, initial_prototype); } -Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( - SloppyTNode<Context> context, SloppyTNode<Object> object, - SloppyTNode<Map> map) { +TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + TNode<Context> context, TNode<Object> object, TNode<Map> map) { Label out(this); - VARIABLE(var_result, MachineRepresentation::kWord32); + TVARIABLE(BoolT, var_result); #ifdef V8_ENABLE_FORCE_SLOW_PATH - var_result.Bind(Int32Constant(0)); + var_result = Int32FalseConstant(); GotoIfForceSlowPath(&out); #endif @@ -912,13 +913,13 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset); TNode<BoolT> const has_initialmap = TaggedEqual(map, initial_map); - var_result.Bind(has_initialmap); + var_result = has_initialmap; GotoIfNot(has_initialmap, &out); // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); - var_result.Bind(TaggedIsPositiveSmi(last_index)); + var_result = TaggedIsPositiveSmi(last_index); Goto(&out); BIND(&out); @@ -939,7 +940,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( GotoIfForceSlowPath(&out); #endif - TNode<BoolT> is_regexp = HasInstanceType(object, JS_REGEXP_TYPE); + TNode<BoolT> is_regexp = HasInstanceType(object, JS_REG_EXP_TYPE); var_result = is_regexp; GotoIfNot(is_regexp, &out); @@ -970,8 +971,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( return var_result.value(); } -Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( - SloppyTNode<Context> context, SloppyTNode<Object> object) { +TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + TNode<Context> context, TNode<Object> object) { CSA_ASSERT(this, TaggedIsNotSmi(object)); return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object))); } @@ -1046,10 +1047,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive( if_isunmodified, if_ismodified); } -void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context, - Node* const object, - Label* if_isunmodified, - Label* if_ismodified) { +void RegExpBuiltinsAssembler::BranchIfFastRegExpResult( + const TNode<Context> context, const TNode<Object> object, + Label* if_isunmodified, Label* if_ismodified) { // Could be a Smi. TNode<Map> const map = LoadReceiverMap(object); @@ -1061,15 +1061,6 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context, if_ismodified); } -// Slow path stub for RegExpPrototypeExec to decrease code size. -TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) { - TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kReceiver)); - TNode<String> string = CAST(Parameter(Descriptor::kString)); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - Return(RegExpPrototypeExecBody(context, regexp, string, false)); -} - // Fast path stub for ATOM regexps. String matching is done by StringIndexOf, // and {match_info} is updated on success. // The slow path is implemented in RegExp::AtomExec. @@ -1149,33 +1140,6 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) { Return(RegExpExecInternal(context, regexp, string, last_index, match_info)); } -// ES#sec-regexp.prototype.exec -// RegExp.prototype.exec ( string ) -TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) { - TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString)); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSRegExp. - ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE, - "RegExp.prototype.exec"); - TNode<JSRegExp> receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode<String> string = ToString_Inline(context, maybe_string); - - Label if_isfastpath(this), if_isslowpath(this); - Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath, - &if_isslowpath); - - BIND(&if_isfastpath); - Return(RegExpPrototypeExecBody(context, receiver, string, true)); - - BIND(&if_isslowpath); - Return(CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, receiver, - string)); -} - TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context, TNode<Object> regexp, bool is_fastpath) { @@ -1246,8 +1210,8 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context, { TNode<String> const result = AllocateSeqOneByteString(var_length.value()); - VARIABLE(var_offset, MachineType::PointerRepresentation(), - IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + TVARIABLE(IntPtrT, var_offset, + IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); #define CASE_FOR_FLAG(FLAG, CHAR) \ do { \ @@ -1256,7 +1220,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context, TNode<Int32T> const value = Int32Constant(CHAR); \ StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \ var_offset.value(), value); \ - var_offset.Bind(IntPtrAdd(var_offset.value(), int_one)); \ + var_offset = IntPtrAdd(var_offset.value(), int_one); \ Goto(&next); \ BIND(&next); \ } while (false) @@ -1273,64 +1237,11 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context, } } -// ES#sec-isregexp IsRegExp ( argument ) -TNode<BoolT> RegExpBuiltinsAssembler::IsRegExp(TNode<Context> context, - TNode<Object> maybe_receiver) { - Label out(this), if_isregexp(this); - - TVARIABLE(BoolT, var_result, Int32FalseConstant()); - - GotoIf(TaggedIsSmi(maybe_receiver), &out); - GotoIfNot(IsJSReceiver(CAST(maybe_receiver)), &out); - - TNode<JSReceiver> receiver = CAST(maybe_receiver); - - // Check @@match. - { - TNode<Object> value = - GetProperty(context, receiver, isolate()->factory()->match_symbol()); - - Label match_isundefined(this), match_isnotundefined(this); - Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined); - - BIND(&match_isundefined); - Branch(IsJSRegExp(receiver), &if_isregexp, &out); - - BIND(&match_isnotundefined); - Label match_istrueish(this), match_isfalseish(this); - BranchIfToBooleanIsTrue(value, &match_istrueish, &match_isfalseish); - - // The common path. Symbol.match exists, equals the RegExpPrototypeMatch - // function (and is thus trueish), and the receiver is a JSRegExp. - BIND(&match_istrueish); - GotoIf(IsJSRegExp(receiver), &if_isregexp); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp)); - Goto(&if_isregexp); - - BIND(&match_isfalseish); - GotoIfNot(IsJSRegExp(receiver), &out); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp)); - Goto(&out); - } - - BIND(&if_isregexp); - var_result = Int32TrueConstant(); - Goto(&out); - - BIND(&out); - return var_result.value(); -} - // ES#sec-regexpinitialize // Runtime Semantics: RegExpInitialize ( obj, pattern, flags ) -Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context, - Node* const regexp, - Node* const maybe_pattern, - Node* const maybe_flags) { - CSA_ASSERT(this, IsJSRegExp(regexp)); - +TNode<Object> RegExpBuiltinsAssembler::RegExpInitialize( + const TNode<Context> context, const TNode<JSRegExp> regexp, + const TNode<Object> maybe_pattern, const TNode<Object> maybe_flags) { // Normalize pattern. TNode<Object> const pattern = Select<Object>( IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); }, @@ -1437,7 +1348,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) { // Allocate. - VARIABLE(var_regexp, MachineRepresentation::kTagged); + TVARIABLE(JSRegExp, var_regexp); { Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred), next(this); @@ -1448,25 +1359,23 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) { { TNode<Map> const initial_map = CAST(LoadObjectField( regexp_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode<JSObject> const regexp = AllocateJSObjectFromMap(initial_map); - var_regexp.Bind(regexp); + var_regexp = CAST(AllocateJSObjectFromMap(initial_map)); Goto(&next); } BIND(&allocate_generic); { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - TNode<JSObject> const regexp = constructor_assembler.EmitFastNewObject( - context, regexp_function, CAST(var_new_target.value())); - var_regexp.Bind(regexp); + var_regexp = CAST(constructor_assembler.EmitFastNewObject( + context, regexp_function, CAST(var_new_target.value()))); Goto(&next); } BIND(&next); } - Node* const result = RegExpInitialize(context, var_regexp.value(), - var_pattern.value(), var_flags.value()); + const TNode<Object> result = RegExpInitialize( + context, var_regexp.value(), var_pattern.value(), var_flags.value()); Return(result); } @@ -1478,12 +1387,12 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { TNode<Object> maybe_flags = CAST(Parameter(Descriptor::kFlags)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE, + ThrowIfNotInstanceType(context, maybe_receiver, JS_REG_EXP_TYPE, "RegExp.prototype.compile"); - Node* const receiver = maybe_receiver; + const TNode<JSRegExp> receiver = CAST(maybe_receiver); - VARIABLE(var_flags, MachineRepresentation::kTagged, maybe_flags); - VARIABLE(var_pattern, MachineRepresentation::kTagged, maybe_pattern); + TVARIABLE(Object, var_flags, maybe_flags); + TVARIABLE(Object, var_pattern, maybe_pattern); // Handle a JSRegExp pattern. { @@ -1492,8 +1401,6 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { GotoIf(TaggedIsSmi(maybe_pattern), &next); GotoIfNot(IsJSRegExp(CAST(maybe_pattern)), &next); - Node* const pattern = maybe_pattern; - // {maybe_flags} must be undefined in this case, otherwise throw. { Label next(this); @@ -1504,19 +1411,20 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { BIND(&next); } - TNode<String> const new_flags = FlagsGetter(context, CAST(pattern), true); + const TNode<JSRegExp> pattern = CAST(maybe_pattern); + TNode<String> const new_flags = FlagsGetter(context, pattern, true); TNode<Object> const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset); - var_flags.Bind(new_flags); - var_pattern.Bind(new_pattern); + var_flags = new_flags; + var_pattern = new_pattern; Goto(&next); BIND(&next); } - Node* const result = RegExpInitialize(context, receiver, var_pattern.value(), - var_flags.value()); + const TNode<Object> result = RegExpInitialize( + context, receiver, var_pattern.value(), var_flags.value()); Return(result); } @@ -1586,54 +1494,6 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context, : SlowFlagGetter(context, regexp, flag); } -// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S ) -TNode<Object> RegExpBuiltinsAssembler::RegExpExec(TNode<Context> context, - Node* regexp, Node* string) { - TVARIABLE(Object, var_result); - Label out(this); - - // Take the slow path of fetching the exec property, calling it, and - // verifying its return value. - - // Get the exec property. - TNode<Object> const exec = - GetProperty(context, regexp, isolate()->factory()->exec_string()); - - // Is {exec} callable? - Label if_iscallable(this), if_isnotcallable(this); - - GotoIf(TaggedIsSmi(exec), &if_isnotcallable); - - TNode<Map> const exec_map = LoadMap(CAST(exec)); - Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable); - - BIND(&if_iscallable); - { - Callable call_callable = CodeFactory::Call(isolate()); - var_result = CAST(CallJS(call_callable, context, exec, regexp, string)); - - GotoIf(IsNull(var_result.value()), &out); - - ThrowIfNotJSReceiver(context, var_result.value(), - MessageTemplate::kInvalidRegExpExecResult, ""); - - Goto(&out); - } - - BIND(&if_isnotcallable); - { - ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE, - "RegExp.prototype.exec"); - - var_result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, - regexp, string); - Goto(&out); - } - - BIND(&out); - return var_result.value(); -} - TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex( SloppyTNode<String> string, SloppyTNode<Number> index, SloppyTNode<BoolT> is_unicode, bool is_fastpath) { @@ -1717,7 +1577,7 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( { var_result = is_fastpath ? RegExpPrototypeExecBody(context, CAST(regexp), string, true) - : RegExpExec(context, regexp, string); + : RegExpExec(context, CAST(regexp), string); Goto(&done); } @@ -1735,9 +1595,9 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( // Loop preparations. Within the loop, collect results from RegExpExec // and store match strings in the array. - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity()}; - Label loop(this, 3, vars), out(this); + Label loop(this, + {array.var_array(), array.var_length(), array.var_capacity()}), + out(this); // Check if the regexp is an ATOM type. If then, keep the literal string to // search for so that we can avoid calling substring in the loop below. @@ -1758,7 +1618,7 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( BIND(&loop); { - VARIABLE(var_match, MachineRepresentation::kTagged); + TVARIABLE(String, var_match); Label if_didmatch(this), if_didnotmatch(this); if (is_fastpath) { @@ -1776,24 +1636,24 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( match_indices, RegExpMatchInfo::kFirstCaptureIndex); TNode<Object> const match_to = UnsafeLoadFixedArrayElement( match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1); - var_match.Bind(CallBuiltin(Builtins::kSubString, context, string, - match_from, match_to)); + var_match = CAST(CallBuiltin(Builtins::kSubString, context, string, + match_from, match_to)); Goto(&if_didmatch); } BIND(&donotsubstring); - var_match.Bind(var_search_string.value()); + var_match = var_search_string.value(); Goto(&if_didmatch); } else { DCHECK(!is_fastpath); - TNode<Object> const result = RegExpExec(context, regexp, string); + TNode<Object> const result = RegExpExec(context, CAST(regexp), string); Label load_match(this); Branch(IsNull(result), &if_didnotmatch, &load_match); BIND(&load_match); - var_match.Bind( - ToString_Inline(context, GetProperty(context, result, SmiZero()))); + var_match = + ToString_Inline(context, GetProperty(context, result, SmiZero())); Goto(&if_didmatch); } @@ -1807,11 +1667,11 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( BIND(&if_didmatch); { - Node* match = var_match.value(); + TNode<String> match = var_match.value(); // Store the match, growing the fixed array if needed. - array.Push(CAST(match)); + array.Push(match); // Advance last index if the match is the empty string. @@ -1855,128 +1715,11 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( return var_result.value(); } -void RegExpMatchAllAssembler::Generate(TNode<Context> context, - TNode<Context> native_context, - TNode<Object> receiver, - TNode<Object> maybe_string) { - // 1. Let R be the this value. - // 2. If Type(R) is not Object, throw a TypeError exception. - ThrowIfNotJSReceiver(context, receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@matchAll"); - - // 3. Let S be ? ToString(O). - TNode<String> string = ToString_Inline(context, maybe_string); - - TVARIABLE(Object, var_matcher); - TVARIABLE(BoolT, var_global); - TVARIABLE(BoolT, var_unicode); - Label create_iterator(this), if_fast_regexp(this), - if_slow_regexp(this, Label::kDeferred); - - // Strict, because following code uses the flags property. - // TODO(jgruber): Handle slow flag accesses on the fast path and make this - // permissive. - BranchIfFastRegExp_Strict(context, CAST(receiver), &if_fast_regexp, - &if_slow_regexp); - - BIND(&if_fast_regexp); - { - TNode<JSRegExp> fast_regexp = CAST(receiver); - TNode<Object> source = - LoadObjectField(fast_regexp, JSRegExp::kSourceOffset); - - // 4. Let C be ? SpeciesConstructor(R, %RegExp%). - // 5. Let flags be ? ToString(? Get(R, "flags")). - // 6. Let matcher be ? Construct(C, « R, flags »). - TNode<String> flags = FlagsGetter(context, fast_regexp, true); - var_matcher = RegExpCreate(context, native_context, source, flags); - CSA_ASSERT(this, - IsFastRegExpPermissive(context, CAST(var_matcher.value()))); - - // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). - // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - FastStoreLastIndex(CAST(var_matcher.value()), - FastLoadLastIndex(fast_regexp)); - - // 9. If flags contains "g", let global be true. - // 10. Else, let global be false. - var_global = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kGlobal); - - // 11. If flags contains "u", let fullUnicode be true. - // 12. Else, let fullUnicode be false. - var_unicode = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kUnicode); - Goto(&create_iterator); - } - - BIND(&if_slow_regexp); - { - // 4. Let C be ? SpeciesConstructor(R, %RegExp%). - TNode<JSFunction> regexp_fun = CAST( - LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX)); - TNode<JSReceiver> species_constructor = - SpeciesConstructor(native_context, receiver, regexp_fun); - - // 5. Let flags be ? ToString(? Get(R, "flags")). - TNode<Object> flags = - GetProperty(context, receiver, isolate()->factory()->flags_string()); - TNode<String> flags_string = ToString_Inline(context, flags); - - // 6. Let matcher be ? Construct(C, « R, flags »). - var_matcher = - Construct(context, species_constructor, receiver, flags_string); - - // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). - TNode<Number> last_index = - ToLength_Inline(context, SlowLoadLastIndex(context, receiver)); - - // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - SlowStoreLastIndex(context, var_matcher.value(), last_index); - - // 9. If flags contains "g", let global be true. - // 10. Else, let global be false. - TNode<String> global_char_string = StringConstant("g"); - TNode<Smi> global_ix = - CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string, - global_char_string, SmiZero())); - var_global = SmiNotEqual(global_ix, SmiConstant(-1)); - - // 11. If flags contains "u", let fullUnicode be true. - // 12. Else, let fullUnicode be false. - TNode<String> unicode_char_string = StringConstant("u"); - TNode<Smi> unicode_ix = - CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string, - unicode_char_string, SmiZero())); - var_unicode = SmiNotEqual(unicode_ix, SmiConstant(-1)); - Goto(&create_iterator); - } - - BIND(&create_iterator); - { - { - // UseCounter for matchAll with non-g RegExp. - // https://crbug.com/v8/9551 - Label next(this); - GotoIf(var_global.value(), &next); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp)); - Goto(&next); - BIND(&next); - } - - // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode). - TNode<Object> iterator = - CreateRegExpStringIterator(native_context, var_matcher.value(), string, - var_global.value(), var_unicode.value()); - Return(iterator); - } -} - // ES#sec-createregexpstringiterator // CreateRegExpStringIterator ( R, S, global, fullUnicode ) TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator( - TNode<Context> native_context, TNode<Object> regexp, TNode<String> string, - TNode<BoolT> global, TNode<BoolT> full_unicode) { + TNode<NativeContext> native_context, TNode<Object> regexp, + TNode<String> string, TNode<BoolT> global, TNode<BoolT> full_unicode) { TNode<Map> map = CAST(LoadContextElement( native_context, Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX)); @@ -2016,164 +1759,11 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator( return iterator; } -// https://tc39.github.io/proposal-string-matchall/ -// RegExp.prototype [ @@matchAll ] ( string ) -TF_BUILTIN(RegExpPrototypeMatchAll, RegExpMatchAllAssembler) { - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - TNode<NativeContext> native_context = LoadNativeContext(context); - TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString)); - Generate(context, native_context, receiver, maybe_string); -} - -void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( - TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string) { - CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp)); - - // Grab the initial value of last index. - TNode<Smi> previous_last_index = FastLoadLastIndex(regexp); - - // Ensure last index is 0. - FastStoreLastIndex(regexp, SmiZero()); - - // Call exec. - Label if_didnotmatch(this); - TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult( - context, regexp, string, &if_didnotmatch, true); - - // Successful match. - { - // Reset last index. - FastStoreLastIndex(regexp, previous_last_index); - - // Return the index of the match. - TNode<Object> const index = LoadFixedArrayElement( - match_indices, RegExpMatchInfo::kFirstCaptureIndex); - Return(index); - } - - BIND(&if_didnotmatch); - { - // Reset last index and return -1. - FastStoreLastIndex(regexp, previous_last_index); - Return(SmiConstant(-1)); - } -} - -void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow( - TNode<Context> context, Node* const regexp, Node* const string) { - CSA_ASSERT(this, IsJSReceiver(regexp)); - CSA_ASSERT(this, IsString(string)); - - Isolate* const isolate = this->isolate(); - - TNode<Smi> const smi_zero = SmiZero(); - - // Grab the initial value of last index. - TNode<Object> const previous_last_index = - SlowLoadLastIndex(context, CAST(regexp)); - - // Ensure last index is 0. - { - Label next(this), slow(this, Label::kDeferred); - BranchIfSameValue(previous_last_index, smi_zero, &next, &slow); - - BIND(&slow); - SlowStoreLastIndex(context, regexp, smi_zero); - Goto(&next); - BIND(&next); - } - - // Call exec. - TNode<Object> const exec_result = RegExpExec(context, regexp, string); - - // Reset last index if necessary. - { - Label next(this), slow(this, Label::kDeferred); - TNode<Object> const current_last_index = - SlowLoadLastIndex(context, CAST(regexp)); - - BranchIfSameValue(current_last_index, previous_last_index, &next, &slow); - - BIND(&slow); - SlowStoreLastIndex(context, regexp, previous_last_index); - Goto(&next); - BIND(&next); - } - - // Return -1 if no match was found. - { - Label next(this); - GotoIfNot(IsNull(exec_result), &next); - Return(SmiConstant(-1)); - BIND(&next); - } - - // Return the index of the match. - { - Label fast_result(this), slow_result(this, Label::kDeferred); - BranchIfFastRegExpResult(context, exec_result, &fast_result, &slow_result); - - BIND(&fast_result); - { - TNode<Object> const index = - LoadObjectField(CAST(exec_result), JSRegExpResult::kIndexOffset); - Return(index); - } - - BIND(&slow_result); - { - Return(GetProperty(context, exec_result, - isolate->factory()->index_string())); - } - } -} - -// ES#sec-regexp.prototype-@@search -// RegExp.prototype [ @@search ] ( string ) -TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) { - TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString)); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSReceiver. - ThrowIfNotJSReceiver(context, maybe_receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@search"); - TNode<JSReceiver> receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode<String> const string = ToString_Inline(context, maybe_string); - - Label fast_path(this), slow_path(this); - BranchIfFastRegExp_Permissive(context, receiver, &fast_path, &slow_path); - - BIND(&fast_path); - // TODO(pwong): Could be optimized to remove the overhead of calling the - // builtin (at the cost of a larger builtin). - Return(CallBuiltin(Builtins::kRegExpSearchFast, context, receiver, string)); - - BIND(&slow_path); - RegExpPrototypeSearchBodySlow(context, receiver, string); -} - -// Helper that skips a few initial checks. and assumes... -// 1) receiver is a "fast" RegExp -// 2) pattern is a string -TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) { - TNode<JSRegExp> receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode<String> string = CAST(Parameter(Descriptor::kPattern)); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - RegExpPrototypeSearchBodyFast(context, receiver, string); -} - // Generates the fast path for @@split. {regexp} is an unmodified, non-sticky // JSRegExp, {string} is a String, and {limit} is a Smi. -void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, - TNode<JSRegExp> regexp, - TNode<String> string, - TNode<Smi> const limit) { +TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( + TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string, + TNode<Smi> const limit) { CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp)); CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky))); @@ -2182,11 +1772,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, const ElementsKind kind = PACKED_ELEMENTS; const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS; - Node* const allocation_site = nullptr; + TNode<AllocationSite> allocation_site = {}; TNode<NativeContext> const native_context = LoadNativeContext(context); TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context); Label return_empty_array(this, Label::kDeferred); + TVARIABLE(JSArray, var_result); + Label done(this); // If limit is zero, return an empty array. { @@ -2220,13 +1812,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, { TNode<Smi> length = SmiConstant(1); TNode<IntPtrT> capacity = IntPtrConstant(1); - TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, - length, allocation_site, mode); + var_result = AllocateJSArray(kind, array_map, capacity, length, + allocation_site, mode); - TNode<FixedArray> fixed_array = CAST(LoadElements(result)); + TNode<FixedArray> fixed_array = CAST(LoadElements(var_result.value())); UnsafeStoreFixedArrayElement(fixed_array, 0, string); - Return(result); + Goto(&done); } } @@ -2240,11 +1832,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, TVARIABLE(Smi, var_last_matched_until, SmiZero()); TVARIABLE(Smi, var_next_search_from, SmiZero()); - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity(), &var_last_matched_until, - &var_next_search_from}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this); + Label loop(this, {array.var_array(), array.var_length(), array.var_capacity(), + &var_last_matched_until, &var_next_search_from}), + push_suffix_and_out(this), out(this); Goto(&loop); BIND(&loop); @@ -2321,19 +1911,17 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, match_indices, RegExpMatchInfo::kNumberOfCapturesIndex)); TNode<IntPtrT> const int_num_registers = SmiUntag(num_registers); - VARIABLE(var_reg, MachineType::PointerRepresentation()); - var_reg.Bind(IntPtrConstant(2)); + TVARIABLE(IntPtrT, var_reg, IntPtrConstant(2)); - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity(), &var_reg}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label nested_loop(this, vars_count, vars), nested_loop_out(this); + Label nested_loop(this, {array.var_array(), array.var_length(), + array.var_capacity(), &var_reg}), + nested_loop_out(this); Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop, &nested_loop_out); BIND(&nested_loop); { - Node* const reg = var_reg.value(); + const TNode<IntPtrT> reg = var_reg.value(); TNode<Object> const from = LoadFixedArrayElement( match_indices, reg, RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode); @@ -2342,30 +1930,30 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode)); Label select_capture(this), select_undefined(this), store_value(this); - VARIABLE(var_value, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined, &select_capture); BIND(&select_capture); { - var_value.Bind( - CallBuiltin(Builtins::kSubString, context, string, from, to)); + var_value = + CallBuiltin(Builtins::kSubString, context, string, from, to); Goto(&store_value); } BIND(&select_undefined); { - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); Goto(&store_value); } BIND(&store_value); { - array.Push(CAST(var_value.value())); + array.Push(var_value.value()); GotoIf(WordEqual(array.length(), int_limit), &out); - TNode<WordT> const new_reg = IntPtrAdd(reg, IntPtrConstant(2)); - var_reg.Bind(new_reg); + const TNode<IntPtrT> new_reg = IntPtrAdd(reg, IntPtrConstant(2)); + var_reg = new_reg; Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop, &nested_loop_out); @@ -2382,316 +1970,29 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode<Context> context, BIND(&push_suffix_and_out); { - TNode<Smi> const from = var_last_matched_until.value(); - Node* const to = string_length; + const TNode<Smi> from = var_last_matched_until.value(); + const TNode<Smi> to = string_length; array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to)); Goto(&out); } BIND(&out); { - TNode<JSArray> const result = array.ToJSArray(context); - Return(result); + var_result = array.ToJSArray(context); + Goto(&done); } BIND(&return_empty_array); { TNode<Smi> length = SmiZero(); TNode<IntPtrT> capacity = IntPtrZero(); - TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length, - allocation_site, mode); - Return(result); - } -} - -// Helper that skips a few initial checks. -TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) { - TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp)); - TNode<String> string = CAST(Parameter(Descriptor::kString)); - TNode<Object> maybe_limit = CAST(Parameter(Descriptor::kLimit)); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) { - BranchIfFastRegExp_Strict(context, regexp, ok, not_ok); - }); - - // Verify {maybe_limit}. - - VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit); - Label if_limitissmimax(this), runtime(this, Label::kDeferred); - - { - Label next(this); - - GotoIf(IsUndefined(maybe_limit), &if_limitissmimax); - Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime); - - // We need to be extra-strict and require the given limit to be either - // undefined or a positive smi. We can't call ToUint32(maybe_limit) since - // that might move us onto the slow path, resulting in ordering spec - // violations (see https://crbug.com/801171). - - BIND(&if_limitissmimax); - { - // TODO(jgruber): In this case, we can probably avoid generation of limit - // checks in Generate_RegExpPrototypeSplitBody. - var_limit.Bind(SmiConstant(Smi::kMaxValue)); - Goto(&next); - } - - BIND(&next); - } - - // Due to specific shortcuts we take on the fast path (specifically, we don't - // allocate a new regexp instance as specced), we need to ensure that the - // given regexp is non-sticky to avoid invalid results. See crbug.com/v8/6706. - - GotoIf(FastFlagGetter(regexp, JSRegExp::kSticky), &runtime); - - // We're good to go on the fast path, which is inlined here. - - RegExpPrototypeSplitBody(context, regexp, string, CAST(var_limit.value())); - - BIND(&runtime); - Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, - var_limit.value())); -} - -// ES#sec-regexp.prototype-@@split -// RegExp.prototype [ @@split ] ( string, limit ) -TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) { - const int kStringArg = 0; - const int kLimitArg = 1; - - TNode<IntPtrT> argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); - CodeStubArguments args(this, argc); - - TNode<Object> maybe_receiver = args.GetReceiver(); - TNode<Object> maybe_string = args.GetOptionalArgumentValue(kStringArg); - TNode<Object> maybe_limit = args.GetOptionalArgumentValue(kLimitArg); - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSReceiver. - ThrowIfNotJSReceiver(context, maybe_receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@split"); - TNode<JSReceiver> receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode<String> string = ToString_Inline(context, maybe_string); - - // Strict: Reads the flags property. - // TODO(jgruber): Handle slow flag accesses on the fast path and make this - // permissive. - Label stub(this), runtime(this, Label::kDeferred); - BranchIfFastRegExp_Strict(context, receiver, &stub, &runtime); - - BIND(&stub); - args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context, receiver, - string, maybe_limit)); - - BIND(&runtime); - args.PopAndReturn(CallRuntime(Runtime::kRegExpSplit, context, receiver, - string, maybe_limit)); -} - -class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler { - public: - explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state) - : RegExpBuiltinsAssembler(state) {} - - protected: - TNode<Smi> LoadFlags(TNode<HeapObject> iterator) { - return LoadObjectField<Smi>(iterator, JSRegExpStringIterator::kFlagsOffset); - } - - TNode<BoolT> HasDoneFlag(TNode<Smi> flags) { - return UncheckedCast<BoolT>( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kDoneBit)); - } - - TNode<BoolT> HasGlobalFlag(TNode<Smi> flags) { - return UncheckedCast<BoolT>( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kGlobalBit)); - } - - TNode<BoolT> HasUnicodeFlag(TNode<Smi> flags) { - return UncheckedCast<BoolT>( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kUnicodeBit)); - } - - void SetDoneFlag(TNode<HeapObject> iterator, TNode<Smi> flags) { - TNode<Smi> new_flags = - SmiOr(flags, SmiConstant(1 << JSRegExpStringIterator::kDoneBit)); - StoreObjectFieldNoWriteBarrier( - iterator, JSRegExpStringIterator::kFlagsOffset, new_flags); - } -}; - -// https://tc39.github.io/proposal-string-matchall/ -// %RegExpStringIteratorPrototype%.next ( ) -TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { - const char* method_name = "%RegExpStringIterator%.prototype.next"; - TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - - Label if_match(this), if_no_match(this, Label::kDeferred), - return_empty_done_result(this, Label::kDeferred); - - // 1. Let O be the this value. - // 2. If Type(O) is not Object, throw a TypeError exception. - // 3. If O does not have all of the internal slots of a RegExp String Iterator - // Object Instance (see 5.3), throw a TypeError exception. - ThrowIfNotInstanceType(context, maybe_receiver, - JS_REGEXP_STRING_ITERATOR_TYPE, method_name); - TNode<HeapObject> receiver = CAST(maybe_receiver); - - // 4. If O.[[Done]] is true, then - // a. Return ! CreateIterResultObject(undefined, true). - TNode<Smi> flags = LoadFlags(receiver); - GotoIf(HasDoneFlag(flags), &return_empty_done_result); - - // 5. Let R be O.[[IteratingRegExp]]. - TNode<JSReceiver> iterating_regexp = CAST(LoadObjectField( - receiver, JSRegExpStringIterator::kIteratingRegExpOffset)); - - // For extra safety, also check the type in release mode. - CSA_CHECK(this, IsJSReceiver(iterating_regexp)); - - // 6. Let S be O.[[IteratedString]]. - TNode<String> iterating_string = CAST( - LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset)); - - // 7. Let global be O.[[Global]]. - // See if_match. - - // 8. Let fullUnicode be O.[[Unicode]]. - // See if_global. - - // 9. Let match be ? RegExpExec(R, S). - TVARIABLE(Object, var_match); - TVARIABLE(BoolT, var_is_fast_regexp); - { - Label if_fast(this), if_slow(this, Label::kDeferred); - BranchIfFastRegExp_Permissive(context, iterating_regexp, &if_fast, - &if_slow); - - BIND(&if_fast); - { - TNode<RegExpMatchInfo> match_indices = - RegExpPrototypeExecBodyWithoutResult( - context, iterating_regexp, iterating_string, &if_no_match, true); - var_match = ConstructNewResultFromMatchInfo( - context, iterating_regexp, match_indices, iterating_string); - var_is_fast_regexp = Int32TrueConstant(); - Goto(&if_match); - } - - BIND(&if_slow); - { - var_match = RegExpExec(context, iterating_regexp, iterating_string); - var_is_fast_regexp = Int32FalseConstant(); - Branch(IsNull(var_match.value()), &if_no_match, &if_match); - } - } - - // 10. If match is null, then - BIND(&if_no_match); - { - // a. Set O.[[Done]] to true. - SetDoneFlag(receiver, flags); - - // b. Return ! CreateIterResultObject(undefined, true). - Goto(&return_empty_done_result); + var_result = AllocateJSArray(kind, array_map, capacity, length, + allocation_site, mode); + Goto(&done); } - // 11. Else, - BIND(&if_match); - { - Label if_global(this), if_not_global(this, Label::kDeferred), - return_result(this); - - // a. If global is true, - Branch(HasGlobalFlag(flags), &if_global, &if_not_global); - BIND(&if_global); - { - Label if_fast(this), if_slow(this, Label::kDeferred); - // ii. If matchStr is the empty string, - Branch(var_is_fast_regexp.value(), &if_fast, &if_slow); - BIND(&if_fast); - { - // i. Let matchStr be ? ToString(? Get(match, "0")). - CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) { - BranchIfFastRegExpResult(context, var_match.value(), ok, not_ok); - }); - CSA_ASSERT(this, - SmiNotEqual(LoadFastJSArrayLength(CAST(var_match.value())), - SmiZero())); - TNode<FixedArray> result_fixed_array = - CAST(LoadElements(CAST(var_match.value()))); - TNode<String> match_str = - CAST(LoadFixedArrayElement(result_fixed_array, 0)); - - // When iterating_regexp is fast, we assume it stays fast even after - // accessing the first match from the RegExp result. - CSA_ASSERT(this, IsFastRegExpPermissive(context, iterating_regexp)); - GotoIfNot(IsEmptyString(match_str), &return_result); - - // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode<Smi> this_index = FastLoadLastIndex(CAST(iterating_regexp)); - - // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode<Smi> next_index = AdvanceStringIndexFast( - iterating_string, this_index, HasUnicodeFlag(flags)); - - // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - FastStoreLastIndex(CAST(iterating_regexp), next_index); - - // iii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - BIND(&if_slow); - { - // i. Let matchStr be ? ToString(? Get(match, "0")). - TNode<String> match_str = ToString_Inline( - context, GetProperty(context, var_match.value(), SmiZero())); - - GotoIfNot(IsEmptyString(match_str), &return_result); - - // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode<Object> last_index = SlowLoadLastIndex(context, iterating_regexp); - TNode<Number> this_index = ToLength_Inline(context, last_index); - - // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode<Number> next_index = AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), false); - - // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - SlowStoreLastIndex(context, iterating_regexp, next_index); - - // iii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - } - // b. Else, - BIND(&if_not_global); - { - // i. Set O.[[Done]] to true. - SetDoneFlag(receiver, flags); - - // ii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - BIND(&return_result); - { - Return(AllocateJSIteratorResult(context, var_match.value(), - FalseConstant())); - } - } - BIND(&return_empty_done_result); - Return( - AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant())); + BIND(&done); + return var_result.value(); } } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.h b/chromium/v8/src/builtins/builtins-regexp-gen.h index de841f57b29..c6de458ef2a 100644 --- a/chromium/v8/src/builtins/builtins-regexp-gen.h +++ b/chromium/v8/src/builtins/builtins-regexp-gen.h @@ -25,8 +25,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map, TNode<Object> regexp_string, TNode<String> flags); - TNode<BoolT> IsRegExp(TNode<Context> context, TNode<Object> maybe_receiver); - TNode<Smi> SmiZero(); TNode<IntPtrT> IntPtrZero(); @@ -37,7 +35,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // and input string. TNode<JSRegExpResult> AllocateRegExpResult( TNode<Context> context, TNode<Smi> length, TNode<Smi> index, - TNode<String> input, TNode<FixedArray>* elements_out = nullptr); + TNode<String> input, TNode<RegExpMatchInfo> match_info, + TNode<FixedArray>* elements_out = nullptr); TNode<Object> FastLoadLastIndexBeforeSmiCheck(TNode<JSRegExp> regexp); TNode<Smi> FastLoadLastIndex(TNode<JSRegExp> regexp) { @@ -56,10 +55,12 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // Loads {var_string_start} and {var_string_end} with the corresponding // offsets into the given {string_data}. - void GetStringPointers(Node* const string_data, Node* const offset, - Node* const last_index, Node* const string_length, - String::Encoding encoding, Variable* var_string_start, - Variable* var_string_end); + void GetStringPointers(TNode<RawPtrT> string_data, TNode<IntPtrT> offset, + TNode<IntPtrT> last_index, + TNode<IntPtrT> string_length, + String::Encoding encoding, + TVariable<RawPtrT>* var_string_start, + TVariable<RawPtrT>* var_string_end); // Low level logic around the actual call into pattern matching code. TNode<HeapObject> RegExpExecInternal(TNode<Context> context, @@ -136,17 +137,17 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // Performs fast path checks on the given object itself, but omits prototype // checks. - Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context, - SloppyTNode<Object> object); - Node* IsFastRegExpNoPrototype(SloppyTNode<Context> context, - SloppyTNode<Object> object, - SloppyTNode<Map> map); + TNode<BoolT> IsFastRegExpNoPrototype(TNode<Context> context, + TNode<Object> object); + TNode<BoolT> IsFastRegExpNoPrototype(TNode<Context> context, + TNode<Object> object, TNode<Map> map); // For debugging only. Uses a slow GetProperty call to fetch object.exec. TNode<BoolT> IsFastRegExpWithOriginalExec(TNode<Context> context, TNode<JSRegExp> object); - void BranchIfFastRegExpResult(Node* const context, Node* const object, + void BranchIfFastRegExpResult(const TNode<Context> context, + const TNode<Object> object, Label* if_isunmodified, Label* if_ismodified); TNode<String> FlagsGetter(TNode<Context> context, TNode<Object> regexp, @@ -164,10 +165,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode<BoolT> FlagGetter(TNode<Context> context, TNode<Object> regexp, JSRegExp::Flag flag, bool is_fastpath); - Node* RegExpInitialize(Node* const context, Node* const regexp, - Node* const maybe_pattern, Node* const maybe_flags); - - TNode<Object> RegExpExec(TNode<Context> context, Node* regexp, Node* string); + TNode<Object> RegExpInitialize(const TNode<Context> context, + const TNode<JSRegExp> regexp, + const TNode<Object> maybe_pattern, + const TNode<Object> maybe_flags); TNode<Number> AdvanceStringIndex(SloppyTNode<String> string, SloppyTNode<Number> index, @@ -179,20 +180,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { return CAST(AdvanceStringIndex(string, index, is_unicode, true)); } + TNode<Smi> AdvanceStringIndexSlow(TNode<String> string, TNode<Number> index, + TNode<BoolT> is_unicode) { + return CAST(AdvanceStringIndex(string, index, is_unicode, false)); + } + TNode<Object> RegExpPrototypeMatchBody(TNode<Context> context, TNode<Object> regexp, TNode<String> const string, const bool is_fastpath); - void RegExpPrototypeSearchBodyFast(TNode<Context> context, - TNode<JSRegExp> regexp, - TNode<String> string); - void RegExpPrototypeSearchBodySlow(TNode<Context> context, Node* const regexp, - Node* const string); - - void RegExpPrototypeSplitBody(TNode<Context> context, TNode<JSRegExp> regexp, - TNode<String> const string, - TNode<Smi> const limit); + TNode<JSArray> RegExpPrototypeSplitBody(TNode<Context> context, + TNode<JSRegExp> regexp, + TNode<String> const string, + TNode<Smi> const limit); }; class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler { @@ -200,13 +201,11 @@ class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler { explicit RegExpMatchAllAssembler(compiler::CodeAssemblerState* state) : RegExpBuiltinsAssembler(state) {} - TNode<Object> CreateRegExpStringIterator(TNode<Context> native_context, + TNode<Object> CreateRegExpStringIterator(TNode<NativeContext> native_context, TNode<Object> regexp, TNode<String> string, TNode<BoolT> global, TNode<BoolT> full_unicode); - void Generate(TNode<Context> context, TNode<Context> native_context, - TNode<Object> receiver, TNode<Object> maybe_string); }; } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 8ae89187ecb..85cb4f10f77 100644 --- a/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/chromium/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -11,8 +11,6 @@ namespace v8 { namespace internal { using compiler::Node; -template <typename T> -using TNode = compiler::TNode<T>; class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { public: @@ -255,7 +253,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64); TNode<Number> value_integer = ToInteger_Inline(CAST(context), CAST(value)); - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer); #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); @@ -338,7 +336,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, @@ -444,8 +442,10 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer); - Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer); + TNode<Word32T> old_value_word32 = + TruncateTaggedToWord32(context, old_value_integer); + TNode<Word32T> new_value_word32 = + TruncateTaggedToWord32(context, new_value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, @@ -571,7 +571,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, diff --git a/chromium/v8/src/builtins/builtins-string-gen.cc b/chromium/v8/src/builtins/builtins-string-gen.cc index fc2745ed0a4..425ffc46d29 100644 --- a/chromium/v8/src/builtins/builtins-string-gen.cc +++ b/chromium/v8/src/builtins/builtins-string-gen.cc @@ -8,8 +8,10 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-factory.h" +#include "src/execution/protectors.h" #include "src/heap/factory-inl.h" #include "src/heap/heap-inl.h" +#include "src/logging/counters.h" #include "src/objects/objects.h" #include "src/objects/property-cell.h" @@ -17,8 +19,6 @@ namespace v8 { namespace internal { using Node = compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; Node* StringBuiltinsAssembler::DirectStringData(Node* string, Node* string_instance_type) { @@ -120,14 +120,14 @@ Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr, return result; } -TNode<IntPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex( - Node* const string_data, Node* const index, String::Encoding encoding) { +TNode<RawPtrT> StringBuiltinsAssembler::PointerToStringDataAtIndex( + TNode<RawPtrT> string_data, TNode<IntPtrT> index, + String::Encoding encoding) { const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING) ? UINT8_ELEMENTS : UINT16_ELEMENTS; - TNode<IntPtrT> const offset_in_bytes = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS); - return Signed(IntPtrAdd(string_data, offset_in_bytes)); + TNode<IntPtrT> offset_in_bytes = ElementOffsetFromIndex(index, kind); + return RawPtrAdd(string_data, offset_in_bytes); } void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left, @@ -289,6 +289,262 @@ void StringBuiltinsAssembler::StringEqual_Loop( } } +TNode<String> StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint( + TNode<Int32T> codepoint) { + VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); + + Label if_isword16(this), if_isword32(this), return_result(this); + + Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16, + &if_isword32); + + BIND(&if_isword16); + { + var_result.Bind(StringFromSingleCharCode(codepoint)); + Goto(&return_result); + } + + BIND(&if_isword32); + { + TNode<String> value = AllocateSeqTwoByteString(2); + StoreNoWriteBarrier( + MachineRepresentation::kWord32, value, + IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), + codepoint); + var_result.Bind(value); + Goto(&return_result); + } + + BIND(&return_result); + return CAST(var_result.value()); +} + +TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length, + TNode<String> left, + TNode<String> right) { + // Added string can be a cons string. + Comment("Allocating ConsString"); + TNode<Int32T> left_instance_type = LoadInstanceType(left); + TNode<Int32T> right_instance_type = LoadInstanceType(right); + + // Determine the resulting ConsString map to use depending on whether + // any of {left} or {right} has two byte encoding. + STATIC_ASSERT(kOneByteStringTag != 0); + STATIC_ASSERT(kTwoByteStringTag == 0); + TNode<Int32T> combined_instance_type = + Word32And(left_instance_type, right_instance_type); + TNode<Map> result_map = CAST(Select<Object>( + IsSetWord32(combined_instance_type, kStringEncodingMask), + [=] { return ConsOneByteStringMapConstant(); }, + [=] { return ConsStringMapConstant(); })); + TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize); + StoreMapNoWriteBarrier(result, result_map); + StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left); + StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right); + return CAST(result); +} + +TNode<String> StringBuiltinsAssembler::StringAdd(Node* context, + TNode<String> left, + TNode<String> right) { + TVARIABLE(String, result); + Label check_right(this), runtime(this, Label::kDeferred), cons(this), + done(this, &result), done_native(this, &result); + Counters* counters = isolate()->counters(); + + TNode<Uint32T> left_length = LoadStringLengthAsWord32(left); + GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right); + result = right; + Goto(&done_native); + + BIND(&check_right); + TNode<Uint32T> right_length = LoadStringLengthAsWord32(right); + GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons); + result = left; + Goto(&done_native); + + BIND(&cons); + { + TNode<Uint32T> new_length = Uint32Add(left_length, right_length); + + // If new length is greater than String::kMaxLength, goto runtime to + // throw. Note: we also need to invalidate the string length protector, so + // can't just throw here directly. + GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)), + &runtime); + + TVARIABLE(String, var_left, left); + TVARIABLE(String, var_right, right); + Variable* input_vars[2] = {&var_left, &var_right}; + Label non_cons(this, 2, input_vars); + Label slow(this, Label::kDeferred); + GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)), + &non_cons); + + result = + AllocateConsString(new_length, var_left.value(), var_right.value()); + Goto(&done_native); + + BIND(&non_cons); + + Comment("Full string concatenate"); + TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value()); + TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value()); + // Compute intersection and difference of instance types. + + TNode<Int32T> ored_instance_types = + Word32Or(left_instance_type, right_instance_type); + TNode<Word32T> xored_instance_types = + Word32Xor(left_instance_type, right_instance_type); + + // Check if both strings have the same encoding and both are sequential. + GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime); + GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow); + + TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length)); + TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length)); + + Label two_byte(this); + GotoIf(Word32Equal(Word32And(ored_instance_types, + Int32Constant(kStringEncodingMask)), + Int32Constant(kTwoByteStringTag)), + &two_byte); + // One-byte sequential string case + result = AllocateSeqOneByteString(new_length); + CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), + IntPtrConstant(0), word_left_length, + String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); + CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), + word_left_length, word_right_length, + String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); + Goto(&done_native); + + BIND(&two_byte); + { + // Two-byte sequential string case + result = AllocateSeqTwoByteString(new_length); + CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), + IntPtrConstant(0), word_left_length, + String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), + word_left_length, word_right_length, + String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + Goto(&done_native); + } + + BIND(&slow); + { + // Try to unwrap indirect strings, restart the above attempt on success. + MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right, + right_instance_type, &non_cons); + Goto(&runtime); + } + } + BIND(&runtime); + { + result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right)); + Goto(&done); + } + + BIND(&done_native); + { + IncrementCounter(counters->string_add_native(), 1); + Goto(&done); + } + + BIND(&done); + return result.value(); +} + +void StringBuiltinsAssembler::BranchIfCanDerefIndirectString( + TNode<String> string, TNode<Int32T> instance_type, Label* can_deref, + Label* cannot_deref) { + TNode<Int32T> representation = + Word32And(instance_type, Int32Constant(kStringRepresentationMask)); + GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref); + GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), + cannot_deref); + // Cons string. + TNode<String> rhs = + LoadObjectField<String>(string, ConsString::kSecondOffset); + GotoIf(IsEmptyString(rhs), can_deref); + Goto(cannot_deref); +} + +void StringBuiltinsAssembler::DerefIndirectString(TVariable<String>* var_string, + TNode<Int32T> instance_type) { +#ifdef DEBUG + Label can_deref(this), cannot_deref(this); + BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref, + &cannot_deref); + BIND(&cannot_deref); + DebugBreak(); // Should be able to dereference string. + Goto(&can_deref); + BIND(&can_deref); +#endif // DEBUG + + STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) == + static_cast<int>(ConsString::kFirstOffset)); + *var_string = + LoadObjectField<String>(var_string->value(), ThinString::kActualOffset); +} + +void StringBuiltinsAssembler::MaybeDerefIndirectString( + TVariable<String>* var_string, TNode<Int32T> instance_type, + Label* did_deref, Label* cannot_deref) { + Label deref(this); + BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref, + cannot_deref); + + BIND(&deref); + { + DerefIndirectString(var_string, instance_type); + Goto(did_deref); + } +} + +void StringBuiltinsAssembler::MaybeDerefIndirectStrings( + TVariable<String>* var_left, TNode<Int32T> left_instance_type, + TVariable<String>* var_right, TNode<Int32T> right_instance_type, + Label* did_something) { + Label did_nothing_left(this), did_something_left(this), + didnt_do_anything(this); + MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left, + &did_nothing_left); + + BIND(&did_something_left); + { + MaybeDerefIndirectString(var_right, right_instance_type, did_something, + did_something); + } + + BIND(&did_nothing_left); + { + MaybeDerefIndirectString(var_right, right_instance_type, did_something, + &didnt_do_anything); + } + + BIND(&didnt_do_anything); + // Fall through if neither string was an indirect string. +} + +TNode<String> StringBuiltinsAssembler::DerefIndirectString( + TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) { + Label deref(this); + BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref); + BIND(&deref); + STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) == + static_cast<int>(ConsString::kFirstOffset)); + return LoadObjectField<String>(string, ThinString::kActualOffset); +} + TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) { TNode<String> left = CAST(Parameter(Descriptor::kLeft)); TNode<String> right = CAST(Parameter(Descriptor::kRight)); @@ -504,19 +760,6 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) { Operation::kGreaterThanOrEqual); } -TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { - TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode<IntPtrT> position = - UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition)); - - // Load the character code at the {position} from the {receiver}. - TNode<Int32T> code = StringCharCodeAt(receiver, position); - - // And return the single character string with only that {code} - TNode<String> result = StringFromSingleCharCode(code); - Return(result); -} - TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); @@ -551,14 +794,14 @@ TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) { // ES6 section 21.1 String Objects // ES6 #sec-string.fromcharcode -TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { +TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) { // TODO(ishell): use constants from Descriptor once the JSFunction linkage // arguments are reordered. TNode<Int32T> argc = UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); Node* context = Parameter(Descriptor::kContext); - CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments arguments(this, argc); // Check if we have exactly one argument (plus the implicit receiver), i.e. // if the parent frame is not an arguments adaptor frame. Label if_oneargument(this), if_notoneargument(this); @@ -571,7 +814,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // for one-byte code units, or fall back to creating a single character // string on the fly otherwise. TNode<Object> code = arguments.AtIndex(0); - Node* code32 = TruncateTaggedToWord32(context, code); + TNode<Word32T> code32 = TruncateTaggedToWord32(context, code); TNode<Int32T> code16 = Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit))); TNode<String> result = StringFromSingleCharCode(code16); @@ -585,16 +828,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // Assume that the resulting string contains only one-byte characters. TNode<String> one_byte_result = AllocateSeqOneByteString(Unsigned(argc)); - TVARIABLE(IntPtrT, var_max_index); - var_max_index = IntPtrConstant(0); + TVARIABLE(IntPtrT, var_max_index, IntPtrConstant(0)); // Iterate over the incoming arguments, converting them to 8-bit character // codes. Stop if any of the conversions generates a code that doesn't fit // in 8 bits. CodeStubAssembler::VariableList vars({&var_max_index}, zone()); - arguments.ForEach(vars, [this, context, &two_byte, &var_max_index, &code16, - one_byte_result](Node* arg) { - Node* code32 = TruncateTaggedToWord32(context, arg); + arguments.ForEach(vars, [&](TNode<Object> arg) { + TNode<Word32T> code32 = TruncateTaggedToWord32(context, arg); code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)); GotoIf( @@ -604,7 +845,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // The {code16} fits into the SeqOneByteString {one_byte_result}. TNode<IntPtrT> offset = ElementOffsetFromIndex( var_max_index.value(), UINT8_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result, offset, code16); @@ -629,7 +869,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // Write the character that caused the 8-bit to 16-bit fault. TNode<IntPtrT> max_index_offset = ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqTwoByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result, max_index_offset, code16); @@ -640,14 +879,13 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // using a 16-bit representation. arguments.ForEach( vars, - [this, context, two_byte_result, &var_max_index](Node* arg) { - Node* code32 = TruncateTaggedToWord32(context, arg); + [&](TNode<Object> arg) { + TNode<Word32T> code32 = TruncateTaggedToWord32(context, arg); TNode<Word32T> code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)); TNode<IntPtrT> offset = ElementOffsetFromIndex( var_max_index.value(), UINT16_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqTwoByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result, offset, code16); @@ -723,9 +961,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&one_one); { - TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::ONE_BYTE_ENCODING); - TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::ONE_BYTE_ENCODING); Label direct_memchr_call(this), generic_fast_path(this); @@ -736,8 +974,8 @@ void StringBuiltinsAssembler::StringIndexOf( // search strings. BIND(&direct_memchr_call); { - TNode<IntPtrT> const string_addr = - IntPtrAdd(adjusted_subject_ptr, start_position); + TNode<RawPtrT> const string_addr = + RawPtrAdd(adjusted_subject_ptr, start_position); TNode<IntPtrT> const search_length = IntPtrSub(subject_length, start_position); TNode<IntPtrT> const search_byte = @@ -745,14 +983,14 @@ void StringBuiltinsAssembler::StringIndexOf( TNode<ExternalReference> const memchr = ExternalConstant(ExternalReference::libc_memchr_function()); - TNode<IntPtrT> const result_address = UncheckedCast<IntPtrT>( + TNode<RawPtrT> const result_address = UncheckedCast<RawPtrT>( CallCFunction(memchr, MachineType::Pointer(), std::make_pair(MachineType::Pointer(), string_addr), std::make_pair(MachineType::IntPtr(), search_byte), std::make_pair(MachineType::UintPtr(), search_length))); GotoIf(WordEqual(result_address, int_zero), &return_minus_1); TNode<IntPtrT> const result_index = - IntPtrAdd(IntPtrSub(result_address, string_addr), start_position); + IntPtrAdd(RawPtrSub(result_address, string_addr), start_position); f_return(SmiTag(result_index)); } @@ -767,9 +1005,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&one_two); { - TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::ONE_BYTE_ENCODING); - TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::TWO_BYTE_ENCODING); Node* const result = CallSearchStringRaw<onebyte_t, twobyte_t>( @@ -780,9 +1018,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&two_one); { - TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::TWO_BYTE_ENCODING); - TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::ONE_BYTE_ENCODING); Node* const result = CallSearchStringRaw<twobyte_t, onebyte_t>( @@ -793,9 +1031,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&two_two); { - TNode<IntPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::TWO_BYTE_ENCODING); - TNode<IntPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode<RawPtrT> const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::TWO_BYTE_ENCODING); Node* const result = CallSearchStringRaw<twobyte_t, twobyte_t>( @@ -1300,8 +1538,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { // maybe_regexp is a fast regexp and receiver is a string. TNode<String> s = CAST(receiver); - RegExpMatchAllAssembler regexp_asm(state()); - regexp_asm.Generate(context, native_context, maybe_regexp, s); + Return( + RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s)); }; auto if_generic_call = [=](Node* fn) { Callable call_callable = CodeFactory::Call(isolate()); @@ -1368,9 +1606,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray( TNode<IntPtrT> string_data_offset = to_direct.offset(); TNode<FixedArray> cache = SingleCharacterStringCacheConstant(); - BuildFastLoop( + BuildFastLoop<IntPtrT>( IntPtrConstant(0), length, - [&](Node* index) { + [&](TNode<IntPtrT> index) { // TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation // and use that to guard ToDirectStringAssembler.PointerToData(). CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime), @@ -1387,7 +1625,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray( StoreFixedArrayElement(elements, index, entry); }, - 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context); result_array = AllocateJSArray(array_map, elements, length_smi); @@ -1614,7 +1852,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) { } } -TF_BUILTIN(StringSubstring, CodeStubAssembler) { +TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) { TNode<String> string = CAST(Parameter(Descriptor::kString)); TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom)); TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo)); @@ -1870,9 +2108,248 @@ void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration( DCHECK(isolate()->heap()->string_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } +void StringBuiltinsAssembler::CopyStringCharacters( + Node* from_string, Node* to_string, TNode<IntPtrT> from_index, + TNode<IntPtrT> to_index, TNode<IntPtrT> character_count, + String::Encoding from_encoding, String::Encoding to_encoding) { + // Cannot assert IsString(from_string) and IsString(to_string) here because + // SubString can pass in faked sequential strings when handling external + // subject strings. + bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING; + bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING; + DCHECK_IMPLIES(to_one_byte, from_one_byte); + Comment("CopyStringCharacters ", + from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ", + to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING"); + + ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; + ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag; + TNode<IntPtrT> from_offset = + ElementOffsetFromIndex(from_index, from_kind, header_size); + TNode<IntPtrT> to_offset = + ElementOffsetFromIndex(to_index, to_kind, header_size); + TNode<IntPtrT> byte_count = + ElementOffsetFromIndex(character_count, from_kind); + TNode<IntPtrT> limit_offset = IntPtrAdd(from_offset, byte_count); + + // Prepare the fast loop + MachineType type = + from_one_byte ? MachineType::Uint8() : MachineType::Uint16(); + MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8 + : MachineRepresentation::kWord16; + int from_increment = 1 << ElementsKindToShiftSize(from_kind); + int to_increment = 1 << ElementsKindToShiftSize(to_kind); + + TVARIABLE(IntPtrT, current_to_offset, to_offset); + VariableList vars({¤t_to_offset}, zone()); + int to_index_constant = 0, from_index_constant = 0; + bool index_same = (from_encoding == to_encoding) && + (from_index == to_index || + (ToInt32Constant(from_index, &from_index_constant) && + ToInt32Constant(to_index, &to_index_constant) && + from_index_constant == to_index_constant)); + BuildFastLoop<IntPtrT>( + vars, from_offset, limit_offset, + [&](TNode<IntPtrT> offset) { + Node* value = Load(type, from_string, offset); + StoreNoWriteBarrier(rep, to_string, + index_same ? offset : current_to_offset.value(), + value); + if (!index_same) { + Increment(¤t_to_offset, to_increment); + } + }, + from_increment, IndexAdvanceMode::kPost); +} + +// A wrapper around CopyStringCharacters which determines the correct string +// encoding, allocates a corresponding sequential string, and then copies the +// given character range using CopyStringCharacters. +// |from_string| must be a sequential string. +// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length. +TNode<String> StringBuiltinsAssembler::AllocAndCopyStringCharacters( + Node* from, Node* from_instance_type, TNode<IntPtrT> from_index, + TNode<IntPtrT> character_count) { + Label end(this), one_byte_sequential(this), two_byte_sequential(this); + TVARIABLE(String, var_result); + + Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential, + &two_byte_sequential); + + // The subject string is a sequential one-byte string. + BIND(&one_byte_sequential); + { + TNode<String> result = AllocateSeqOneByteString( + Unsigned(TruncateIntPtrToInt32(character_count))); + CopyStringCharacters(from, result, from_index, IntPtrConstant(0), + character_count, String::ONE_BYTE_ENCODING, + String::ONE_BYTE_ENCODING); + var_result = result; + Goto(&end); + } + + // The subject string is a sequential two-byte string. + BIND(&two_byte_sequential); + { + TNode<String> result = AllocateSeqTwoByteString( + Unsigned(TruncateIntPtrToInt32(character_count))); + CopyStringCharacters(from, result, from_index, IntPtrConstant(0), + character_count, String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + var_result = result; + Goto(&end); + } + + BIND(&end); + return var_result.value(); +} + +TNode<String> StringBuiltinsAssembler::SubString(TNode<String> string, + TNode<IntPtrT> from, + TNode<IntPtrT> to) { + TVARIABLE(String, var_result); + ToDirectStringAssembler to_direct(state(), string); + Label end(this), runtime(this); + + TNode<IntPtrT> const substr_length = IntPtrSub(to, from); + TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string); + + // Begin dispatching based on substring length. + + Label original_string_or_invalid_length(this); + GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length), + &original_string_or_invalid_length); + + // A real substring (substr_length < string_length). + Label empty(this); + GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty); + + Label single_char(this); + GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char); + + // Deal with different string types: update the index if necessary + // and extract the underlying string. + + TNode<String> direct_string = to_direct.TryToDirect(&runtime); + TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset()); + TNode<Int32T> const instance_type = to_direct.instance_type(); + + // The subject string can only be external or sequential string of either + // encoding at this point. + Label external_string(this); + { + if (FLAG_string_slices) { + Label next(this); + + // Short slice. Copy instead of slicing. + GotoIf(IntPtrLessThan(substr_length, + IntPtrConstant(SlicedString::kMinLength)), + &next); + + // Allocate new sliced string. + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Label one_byte_slice(this), two_byte_slice(this); + Branch(IsOneByteStringInstanceType(to_direct.instance_type()), + &one_byte_slice, &two_byte_slice); + + BIND(&one_byte_slice); + { + var_result = AllocateSlicedOneByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); + Goto(&end); + } + + BIND(&two_byte_slice); + { + var_result = AllocateSlicedTwoByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); + Goto(&end); + } + + BIND(&next); + } + + // The subject string can only be external or sequential string of either + // encoding at this point. + GotoIf(to_direct.is_external(), &external_string); + + var_result = AllocAndCopyStringCharacters(direct_string, instance_type, + offset, substr_length); + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Goto(&end); + } + + // Handle external string. + BIND(&external_string); + { + TNode<RawPtrT> const fake_sequential_string = + to_direct.PointerToString(&runtime); + + var_result = AllocAndCopyStringCharacters( + fake_sequential_string, instance_type, offset, substr_length); + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Goto(&end); + } + + BIND(&empty); + { + var_result = EmptyStringConstant(); + Goto(&end); + } + + // Substrings of length 1 are generated through CharCodeAt and FromCharCode. + BIND(&single_char); + { + TNode<Int32T> char_code = StringCharCodeAt(string, from); + var_result = StringFromSingleCharCode(char_code); + Goto(&end); + } + + BIND(&original_string_or_invalid_length); + { + CSA_ASSERT(this, IntPtrEqual(substr_length, string_length)); + + // Equal length - check if {from, to} == {0, str.length}. + GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime); + + // Return the original string (substr_length == string_length). + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + var_result = string; + Goto(&end); + } + + // Fall back to a runtime call. + BIND(&runtime); + { + var_result = + CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string, + SmiTag(from), SmiTag(to))); + Goto(&end); + } + + BIND(&end); + return var_result.value(); +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/builtins/builtins-string-gen.h b/chromium/v8/src/builtins/builtins-string-gen.h index 64d5a77615d..0dfcf88a8c7 100644 --- a/chromium/v8/src/builtins/builtins-string-gen.h +++ b/chromium/v8/src/builtins/builtins-string-gen.h @@ -33,6 +33,25 @@ class StringBuiltinsAssembler : public CodeStubAssembler { SloppyTNode<IntPtrT> index, UnicodeEncoding encoding); + TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint); + + // Return a new string object which holds a substring containing the range + // [from,to[ of string. + TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from, + TNode<IntPtrT> to); + + // Copies |character_count| elements from |from_string| to |to_string| + // starting at the |from_index|'th character. |from_string| and |to_string| + // can either be one-byte strings or two-byte strings, although if + // |from_string| is two-byte, then |to_string| must be two-byte. + // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= + // |from_index| <= |from_index| + |character_count| <= from_string.length and + // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. + V8_EXPORT_PRIVATE void CopyStringCharacters( + Node* from_string, Node* to_string, TNode<IntPtrT> from_index, + TNode<IntPtrT> to_index, TNode<IntPtrT> character_count, + String::Encoding from_encoding, String::Encoding to_encoding); + protected: void StringEqual_Loop(Node* lhs, Node* lhs_instance_type, MachineType lhs_type, Node* rhs, @@ -51,8 +70,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler { Node* const search_ptr, Node* const search_length, Node* const start_position); - TNode<IntPtrT> PointerToStringDataAtIndex(Node* const string_data, - Node* const index, + TNode<RawPtrT> PointerToStringDataAtIndex(TNode<RawPtrT> string_data, + TNode<IntPtrT> index, String::Encoding encoding); // substr and slice have a common way of handling the {start} argument. @@ -82,6 +101,38 @@ class StringBuiltinsAssembler : public CodeStubAssembler { return SmiLessThan(value, SmiConstant(0)); } + TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left, + TNode<String> right); + + TNode<String> StringAdd(Node* context, TNode<String> left, + TNode<String> right); + + // Check if |string| is an indirect (thin or flat cons) string type that can + // be dereferenced by DerefIndirectString. + void BranchIfCanDerefIndirectString(TNode<String> string, + TNode<Int32T> instance_type, + Label* can_deref, Label* cannot_deref); + // Allocate an appropriate one- or two-byte ConsString with the first and + // second parts specified by |left| and |right|. + // Unpack an indirect (thin or flat cons) string type. + void DerefIndirectString(TVariable<String>* var_string, + TNode<Int32T> instance_type); + // Check if |var_string| has an indirect (thin or flat cons) string type, and + // unpack it if so. + void MaybeDerefIndirectString(TVariable<String>* var_string, + TNode<Int32T> instance_type, Label* did_deref, + Label* cannot_deref); + // Check if |var_left| or |var_right| has an indirect (thin or flat cons) + // string type, and unpack it/them if so. Fall through if nothing was done. + void MaybeDerefIndirectStrings(TVariable<String>* var_left, + TNode<Int32T> left_instance_type, + TVariable<String>* var_right, + TNode<Int32T> right_instance_type, + Label* did_something); + TNode<String> DerefIndirectString(TNode<String> string, + TNode<Int32T> instance_type, + Label* cannot_deref); + // Implements boilerplate logic for {match, split, replace, search} of the // form: // @@ -103,6 +154,12 @@ class StringBuiltinsAssembler : public CodeStubAssembler { Handle<Symbol> symbol, DescriptorIndexNameValue additional_property_to_check, const NodeFunction0& regexp_call, const NodeFunction1& generic_call); + + private: + TNode<String> AllocAndCopyStringCharacters(Node* from, + Node* from_instance_type, + TNode<IntPtrT> from_index, + TNode<IntPtrT> character_count); }; class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler { diff --git a/chromium/v8/src/builtins/builtins-string.cc b/chromium/v8/src/builtins/builtins-string.cc index 04a96c7e46d..ba2346d661c 100644 --- a/chromium/v8/src/builtins/builtins-string.cc +++ b/chromium/v8/src/builtins/builtins-string.cc @@ -136,20 +136,21 @@ BUILTIN(StringPrototypeLocaleCompare) { HandleScope handle_scope(isolate); isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare); + const char* method = "String.prototype.localeCompare"; #ifdef V8_INTL_SUPPORT - TO_THIS_STRING(str1, "String.prototype.localeCompare"); + TO_THIS_STRING(str1, method); Handle<String> str2; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1))); RETURN_RESULT_OR_FAILURE( - isolate, Intl::StringLocaleCompare(isolate, str1, str2, - args.atOrUndefined(isolate, 2), - args.atOrUndefined(isolate, 3))); + isolate, Intl::StringLocaleCompare( + isolate, str1, str2, args.atOrUndefined(isolate, 2), + args.atOrUndefined(isolate, 3), method)); #else DCHECK_EQ(2, args.length()); - TO_THIS_STRING(str1, "String.prototype.localeCompare"); + TO_THIS_STRING(str1, method); Handle<String> str2; ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2, Object::ToString(isolate, args.at(1))); diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.cc b/chromium/v8/src/builtins/builtins-typed-array-gen.cc index 448ff66603f..c69034e813b 100644 --- a/chromium/v8/src/builtins/builtins-typed-array-gen.cc +++ b/chromium/v8/src/builtins/builtins-typed-array-gen.cc @@ -8,6 +8,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/builtins/growable-fixed-array-gen.h" +#include "src/execution/protectors.h" #include "src/handles/handles-inl.h" #include "src/heap/factory-inl.h" @@ -15,8 +16,6 @@ namespace v8 { namespace internal { using compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; // ----------------------------------------------------------------------------- // ES6 section 22.2 TypedArray Objects @@ -117,8 +116,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.bytelength TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.byteLength"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -135,8 +134,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.byteoffset TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.byteOffset"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -153,8 +152,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.length TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.length"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -318,8 +317,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( // Grab pointers and byte lengths we need later on. - TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayBackingStore(target); - TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayBackingStore(source); + TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayDataPtr(target); + TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayDataPtr(source); TNode<Int32T> source_el_kind = LoadElementsKind(source); TNode<Int32T> target_el_kind = LoadElementsKind(target); @@ -538,13 +537,83 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::IsSharedArrayBuffer( return IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield); } +void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( + TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) { + offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag), + offset); + if (COMPRESS_POINTERS_BOOL) { + TNode<IntPtrT> full_base = Signed(BitcastTaggedToWord(base)); + TNode<Int32T> compressed_base = TruncateIntPtrToInt32(full_base); + // TODO(v8:9706): Add a way to directly use kRootRegister value. + TNode<IntPtrT> isolate_root = + IntPtrSub(full_base, ChangeInt32ToIntPtr(compressed_base)); + // Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset. + DCHECK_EQ( + isolate()->isolate_root(), + JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate())); + // See JSTypedArray::SetOnHeapDataPtr() for details. + offset = Unsigned(IntPtrAdd(offset, isolate_root)); + } + + StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base); + StoreObjectFieldNoWriteBarrier<UintPtrT>( + holder, JSTypedArray::kExternalPointerOffset, offset); +} + +void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr( + TNode<JSTypedArray> holder, TNode<RawPtrT> base, TNode<UintPtrT> offset) { + StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kBasePointerOffset, + SmiConstant(0)); + + base = RawPtrAdd(base, Signed(offset)); + StoreObjectFieldNoWriteBarrier<RawPtrT>( + holder, JSTypedArray::kExternalPointerOffset, base); +} + +void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged( + TNode<Context> context, TNode<JSTypedArray> typed_array, + TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) { + TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array); + switch (elements_kind) { + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + case INT8_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, SmiToInt32(CAST(value)), + SMI_PARAMETERS); + break; + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + TruncateTaggedToWord32(context, value), SMI_PARAMETERS); + break; + case FLOAT32_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), + SMI_PARAMETERS); + break; + case FLOAT64_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); + break; + case BIGUINT64_ELEMENTS: + case BIGINT64_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + UncheckedCast<BigInt>(value), SMI_PARAMETERS); + break; + default: + UNREACHABLE(); + } +} + // ES #sec-get-%typedarray%.prototype.set TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) { const char* method_name = "%TypedArray%.prototype.set"; + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - CodeStubArguments args( - this, - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this), if_offset_is_out_of_bounds(this, Label::kDeferred), @@ -618,7 +687,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) { // ES #sec-get-%typedarray%.prototype-@@tostringtag TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); Label if_receiverisheapobject(this), return_undefined(this); Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject); @@ -645,12 +714,12 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { #undef TYPED_ARRAY_CASE }; - // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so - // that this can be turned into a non-sparse table switch for ideal - // performance. + // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so that + // this can be turned into a non-sparse table switch for ideal performance. BIND(&if_receiverisheapobject); + TNode<HeapObject> receiver_heap_object = CAST(receiver); TNode<Int32T> elements_kind = - Int32Sub(LoadElementsKind(receiver), + Int32Sub(LoadElementsKind(receiver_heap_object), Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels, kTypedElementsKindCount); @@ -710,8 +779,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { TNode<IntPtrT> length = ChangeInt32ToIntPtr( UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount))); // 2. Let items be the List of arguments passed to this function. - CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS, - CodeStubArguments::ReceiverMode::kHasReceiver); + CodeStubArguments args(this, length); Label if_not_constructor(this, Label::kDeferred), if_detached(this, Label::kDeferred); @@ -737,10 +805,10 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { DispatchTypedArrayByElementsKind( elements_kind, [&](ElementsKind kind, int size, int typed_array_fun_index) { - BuildFastLoop( + BuildFastLoop<IntPtrT>( IntPtrConstant(0), length, - [&](Node* index) { - TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS); + [&](TNode<IntPtrT> index) { + TNode<Object> item = args.AtIndex(index); Node* value = PrepareValueForWriteToTypedArray(item, kind, context); @@ -752,12 +820,11 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // GC may move backing store in ToNumber, thus load backing // store everytime in this loop. - TNode<RawPtrT> backing_store = - LoadJSTypedArrayBackingStore(new_typed_array); - StoreElement(backing_store, kind, index, value, - INTPTR_PARAMETERS); + TNode<RawPtrT> data_ptr = + LoadJSTypedArrayDataPtr(new_typed_array); + StoreElement(data_ptr, kind, index, value, INTPTR_PARAMETERS); }, - 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); }); // 8. Return newObj. @@ -773,6 +840,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // ES6 #sec-%typedarray%.from TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { + TNode<Int32T> argc = + UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label check_iterator(this), from_array_like(this), fast_path(this), @@ -782,9 +851,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { if_iterator_fn_not_callable(this, Label::kDeferred), if_detached(this, Label::kDeferred); - CodeStubArguments args( - this, - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); TNode<Object> source = args.GetOptionalArgumentValue(0); // 5. If thisArg is present, let T be thisArg; else let T be undefined. @@ -866,7 +933,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { TNode<PropertyCell> protector_cell = ArrayIteratorProtectorConstant(); GotoIfNot( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), &check_iterator); // Source is a TypedArray with unmodified iterator behavior. Use the @@ -950,15 +1017,15 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value()); // 7e/13 : Copy the elements - BuildFastLoop( + BuildFastLoop<Smi>( SmiConstant(0), final_length.value(), - [&](Node* index) { + [&](TNode<Smi> index) { TNode<Object> const k_value = GetProperty(context, final_source.value(), index); TNode<Object> const mapped_value = - CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg, - k_value, index)); + CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg, + k_value, index); DispatchTypedArrayByElementsKind( elements_kind, @@ -974,13 +1041,12 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { // GC may move backing store in map_fn, thus load backing // store in each iteration of this loop. - TNode<RawPtrT> backing_store = - LoadJSTypedArrayBackingStore(target_obj.value()); - StoreElement(backing_store, kind, index, final_value, - SMI_PARAMETERS); + TNode<RawPtrT> data_ptr = + LoadJSTypedArrayDataPtr(target_obj.value()); + StoreElement(data_ptr, kind, index, final_value, SMI_PARAMETERS); }); }, - 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); args.PopAndReturn(target_obj.value()); diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.h b/chromium/v8/src/builtins/builtins-typed-array-gen.h index d637bc9c6b6..10a2cb608c6 100644 --- a/chromium/v8/src/builtins/builtins-typed-array-gen.h +++ b/chromium/v8/src/builtins/builtins-typed-array-gen.h @@ -111,6 +111,18 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function); TNode<BoolT> IsSharedArrayBuffer(TNode<JSArrayBuffer> buffer); + + void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder, + TNode<ByteArray> base, + TNode<UintPtrT> offset); + void SetJSTypedArrayOffHeapDataPtr(TNode<JSTypedArray> holder, + TNode<RawPtrT> base, + TNode<UintPtrT> offset); + void StoreJSTypedArrayElementFromTagged(TNode<Context> context, + TNode<JSTypedArray> typed_array, + TNode<Smi> index_node, + TNode<Object> value, + ElementsKind elements_kind); }; } // namespace internal diff --git a/chromium/v8/src/builtins/builtins-utils-inl.h b/chromium/v8/src/builtins/builtins-utils-inl.h index c9d15f09dd2..c8c9a2522c9 100644 --- a/chromium/v8/src/builtins/builtins-utils-inl.h +++ b/chromium/v8/src/builtins/builtins-utils-inl.h @@ -12,20 +12,21 @@ namespace v8 { namespace internal { -Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate, int index) { +Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate, + int index) const { if (index >= length()) { return isolate->factory()->undefined_value(); } return at<Object>(index); } -Handle<Object> BuiltinArguments::receiver() { return at<Object>(0); } +Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); } -Handle<JSFunction> BuiltinArguments::target() { +Handle<JSFunction> BuiltinArguments::target() const { return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset); } -Handle<HeapObject> BuiltinArguments::new_target() { +Handle<HeapObject> BuiltinArguments::new_target() const { return Arguments::at<HeapObject>(Arguments::length() - 1 - kNewTargetOffset); } diff --git a/chromium/v8/src/builtins/builtins-utils.h b/chromium/v8/src/builtins/builtins-utils.h index 822f9df6ecd..601dfd58131 100644 --- a/chromium/v8/src/builtins/builtins-utils.h +++ b/chromium/v8/src/builtins/builtins-utils.h @@ -23,13 +23,13 @@ class BuiltinArguments : public Arguments { DCHECK_LE(1, this->length()); } - Object operator[](int index) { + Object operator[](int index) const { DCHECK_LT(index, length()); return Arguments::operator[](index); } template <class S = Object> - Handle<S> at(int index) { + Handle<S> at(int index) const { DCHECK_LT(index, length()); return Arguments::at<S>(index); } @@ -42,10 +42,10 @@ class BuiltinArguments : public Arguments { static constexpr int kNumExtraArgs = 4; static constexpr int kNumExtraArgsWithReceiver = 5; - inline Handle<Object> atOrUndefined(Isolate* isolate, int index); - inline Handle<Object> receiver(); - inline Handle<JSFunction> target(); - inline Handle<HeapObject> new_target(); + inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const; + inline Handle<Object> receiver() const; + inline Handle<JSFunction> target() const; + inline Handle<HeapObject> new_target() const; // Gets the total number of arguments including the receiver (but // excluding extra arguments). @@ -77,7 +77,7 @@ class BuiltinArguments : public Arguments { RuntimeCallCounterId::kBuiltin_##name); \ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \ "V8.Builtin_" #name); \ - return Builtin_Impl_##name(args, isolate).ptr(); \ + return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ } \ \ V8_WARN_UNUSED_RESULT Address Builtin_##name( \ @@ -87,7 +87,7 @@ class BuiltinArguments : public Arguments { return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \ } \ BuiltinArguments args(args_length, args_object); \ - return Builtin_Impl_##name(args, isolate).ptr(); \ + return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ } \ \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ diff --git a/chromium/v8/src/builtins/builtins-wasm-gen.cc b/chromium/v8/src/builtins/builtins-wasm-gen.cc index 12270495c13..d6346fb9aa4 100644 --- a/chromium/v8/src/builtins/builtins-wasm-gen.cc +++ b/chromium/v8/src/builtins/builtins-wasm-gen.cc @@ -121,18 +121,19 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and count TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> count_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(count_heap, ChangeUint32ToFloat64(count)); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmAtomicNotify, centry, NoContextConstant(), instance, + Runtime::kWasmAtomicNotify, centry, context, instance, address_heap, count_heap)); ReturnRaw(SmiToInt32(result_smi)); } @@ -149,23 +150,24 @@ TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and expected_value TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> expected_value_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_heap, ChangeInt32ToFloat64(expected_value)); TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(timeout_heap, timeout); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmI32AtomicWait, centry, NoContextConstant(), instance, + Runtime::kWasmI32AtomicWait, centry, context, instance, address_heap, expected_value_heap, timeout_heap)); ReturnRaw(SmiToInt32(result_smi)); } @@ -184,28 +186,29 @@ TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) { TNode<Code> centry = LoadCEntryFromInstance(instance); TNode<Code> target = LoadBuiltinFromFrame(Builtins::kAllocateHeapNumber); + TNode<Object> context = LoadContextFromInstance(instance); // TODO(aseemgarg): Use SMIs if possible for address and expected_value TNode<HeapNumber> address_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(address_heap, ChangeUint32ToFloat64(address)); TNode<HeapNumber> expected_value_high_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_high_heap, ChangeUint32ToFloat64(expected_value_high)); TNode<HeapNumber> expected_value_low_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(expected_value_low_heap, ChangeUint32ToFloat64(expected_value_low)); TNode<HeapNumber> timeout_heap = UncheckedCast<HeapNumber>( - CallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant())); + CallStub(AllocateHeapNumberDescriptor(), target, context)); StoreHeapNumberValue(timeout_heap, timeout); TNode<Smi> result_smi = UncheckedCast<Smi>(CallRuntimeWithCEntry( - Runtime::kWasmI64AtomicWait, centry, NoContextConstant(), instance, + Runtime::kWasmI64AtomicWait, centry, context, instance, address_heap, expected_value_high_heap, expected_value_low_heap, timeout_heap)); ReturnRaw(SmiToInt32(result_smi)); diff --git a/chromium/v8/src/builtins/builtins.cc b/chromium/v8/src/builtins/builtins.cc index e5829dd1b34..e0750a732c6 100644 --- a/chromium/v8/src/builtins/builtins.cc +++ b/chromium/v8/src/builtins/builtins.cc @@ -88,14 +88,16 @@ const BuiltinMetadata builtin_metadata[] = {BUILTIN_LIST( } // namespace BailoutId Builtins::GetContinuationBailoutId(Name name) { - DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC); + DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC || + Builtins::KindOf(name) == TFS); return BailoutId(BailoutId::kFirstBuiltinContinuationId + name); } Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) { int builtin_index = id.ToInt() - BailoutId::kFirstBuiltinContinuationId; DCHECK(Builtins::KindOf(builtin_index) == TFJ || - Builtins::KindOf(builtin_index) == TFC); + Builtins::KindOf(builtin_index) == TFC || + Builtins::KindOf(builtin_index) == TFS); return static_cast<Name>(builtin_index); } @@ -204,7 +206,7 @@ void Builtins::PrintBuiltinCode() { CStrVector(FLAG_print_builtin_code_filter))) { CodeTracer::Scope trace_scope(isolate_->GetCodeTracer()); OFStream os(trace_scope.file()); - code->Disassemble(builtin_name, os); + code->Disassemble(builtin_name, os, isolate_); os << "\n"; } } diff --git a/chromium/v8/src/builtins/frames.tq b/chromium/v8/src/builtins/frames.tq index 7467381690e..79f2a0ae010 100644 --- a/chromium/v8/src/builtins/frames.tq +++ b/chromium/v8/src/builtins/frames.tq @@ -24,8 +24,8 @@ Cast<FrameType>(o: Object): FrameType labels CastError { if (TaggedIsNotSmi(o)) goto CastError; assert( - (Convert<uintptr>(BitcastTaggedToWord(o)) >>> kSmiTagSize) < - kFrameTypeCount); + Convert<int32>(BitcastTaggedToWordForTagAndSmiBits(o)) < + Convert<int32>(kFrameTypeCount << kSmiTagSize)); return %RawDownCast<FrameType>(o); } diff --git a/chromium/v8/src/builtins/growable-fixed-array-gen.h b/chromium/v8/src/builtins/growable-fixed-array-gen.h index 42f2afb281d..8f72429a97e 100644 --- a/chromium/v8/src/builtins/growable-fixed-array-gen.h +++ b/chromium/v8/src/builtins/growable-fixed-array-gen.h @@ -10,8 +10,6 @@ namespace v8 { namespace internal { -template <class T> -using TNode = compiler::TNode<T>; // Utility class implementing a growable fixed array through CSA. class GrowableFixedArray : public CodeStubAssembler { diff --git a/chromium/v8/src/builtins/ia32/builtins-ia32.cc b/chromium/v8/src/builtins/ia32/builtins-ia32.cc index feabac3b66a..0885b6e6337 100644 --- a/chromium/v8/src/builtins/ia32/builtins-ia32.cc +++ b/chromium/v8/src/builtins/ia32/builtins-ia32.cc @@ -5,7 +5,7 @@ #if V8_TARGET_ARCH_IA32 #include "src/api/api-arguments.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/code-factory.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" @@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register scratch) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry) { // ----------- S t a t e ------------- // -- edx : new target (preserved for callee if needed, and caller) // -- edi : target function (preserved for callee if needed, and caller) - // -- ecx : feedback vector (also used as scratch, value is not preserved) // ----------------------------------- - DCHECK(!AreAliased(edx, edi, scratch)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(edx, edi, optimized_code_entry)); Register closure = edi; - // Scratch contains feedback_vector. - Register feedback_vector = scratch; - // Load the optimized code from the feedback vector and re-use the register. - Register optimized_code_entry = scratch; - __ mov(optimized_code_entry, - FieldOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); + __ push(edx); + + // Check if the optimized code is marked for deopt. If it is, bailout to a + // given label. + Label found_deoptimized_code; + __ mov(eax, + FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset), + Immediate(1 << Code::kMarkedForDeoptimizationBit)); + __ j(not_zero, &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx, + eax); + static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); + __ LoadCodeObjectEntry(ecx, optimized_code_entry); + __ pop(edx); + __ jmp(ecx); - { - // Optimized code slot is an optimization marker. - - // Fall through if no optimization trigger. - __ cmp(optimized_code_entry, - Immediate(Smi::FromEnum(OptimizationMarker::kNone))); - __ j(equal, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + __ pop(edx); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ cmp( - optimized_code_entry, - Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } - } +static void MaybeOptimizeCode(MacroAssembler* masm, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- edx : new target (preserved for callee if needed, and caller) + // -- edi : target function (preserved for callee if needed, and caller) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(edx, edi, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, &fallthrough); - - __ push(edx); - - // Check if the optimized code is marked for deopt. If it is, bailout to a - // given label. - Label found_deoptimized_code; - __ mov(eax, - FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); - __ j(not_zero, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - edx, eax); - static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); - __ LoadCodeObjectEntry(ecx, optimized_code_entry); - __ pop(edx); - __ jmp(ecx); - - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - __ pop(edx); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ cmp( + optimization_marker, + Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); + } } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -912,20 +884,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide)); __ cmp(bytecode, Immediate(0x3)); __ j(above, &process_bytecode, Label::kNear); + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here. inc has to happen before test since it + // modifies the ZF flag. + __ inc(bytecode_offset); __ test(bytecode, Immediate(0x1)); + __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ j(not_equal, &extra_wide, Label::kNear); // Load the next bytecode and update table to the wide scaled table. - __ inc(bytecode_offset); - __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ add(bytecode_size_table, Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ jmp(&process_bytecode, Label::kNear); __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ inc(bytecode_offset); - __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the extra wide scaled table. __ add(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -982,9 +955,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE); __ j(not_equal, &push_stack_frame); - // Read off the optimized code slot in the closure's feedback vector, and if - // there is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, ecx); + // Read off the optimized code slot in the feedback vector. + // Load the optimized code from the feedback vector and re-use the register. + Register optimized_code_entry = ecx; + __ mov(optimized_code_entry, + FieldOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ cmp(optimized_code_entry, + Immediate(Smi::FromEnum(OptimizationMarker::kNone))); + __ j(not_equal, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Load the feedback vector and increment the invocation count. __ mov(feedback_vector, @@ -1035,6 +1020,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag))); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. Register frame_size = ecx; @@ -1042,22 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ mov(eax, esp); __ sub(eax, frame_size); __ CompareRealStackLimit(eax); - __ j(above_equal, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ j(below, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ Move(eax, masm->isolate()->factory()->undefined_value()); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ jmp(&loop_check); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ push(eax); + __ push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ sub(frame_size, Immediate(kSystemPointerSize)); @@ -1067,12 +1050,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If the bytecode array has a valid incoming new target or generator object // register, initialize it with incoming value which was passed in edx. Label no_incoming_new_target_or_generator_register; - __ mov(eax, FieldOperand( + __ mov(ecx, FieldOperand( kInterpreterBytecodeArrayRegister, BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); - __ test(eax, eax); + __ test(ecx, ecx); __ j(zero, &no_incoming_new_target_or_generator_register); - __ mov(Operand(ebp, eax, times_system_pointer_size, 0), edx); + __ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx); __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator and bytecode offset into registers. @@ -1117,8 +1100,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, edx, ecx); __ ret(0); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ int3(); // Should not return. } @@ -2601,14 +2602,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(eax); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } diff --git a/chromium/v8/src/builtins/internal-coverage.tq b/chromium/v8/src/builtins/internal-coverage.tq index 41ec0c36e42..ebedbdce75b 100644 --- a/chromium/v8/src/builtins/internal-coverage.tq +++ b/chromium/v8/src/builtins/internal-coverage.tq @@ -28,8 +28,6 @@ namespace internal_coverage { return UnsafeCast<CoverageInfo>(debugInfo.coverage_info); } - @export // Silence unused warning on release builds. SlotCount is only used - // in an assert. TODO(szuend): Remove once macros and asserts work. macro SlotCount(coverageInfo: CoverageInfo): Smi { assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below. assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask)); diff --git a/chromium/v8/src/builtins/iterator.tq b/chromium/v8/src/builtins/iterator.tq index 06e8ea539c0..e662e4e75e5 100644 --- a/chromium/v8/src/builtins/iterator.tq +++ b/chromium/v8/src/builtins/iterator.tq @@ -37,22 +37,45 @@ namespace iterator { extern macro IteratorBuiltinsAssembler::IterableToList( implicit context: Context)(JSAny, JSAny): JSArray; + extern macro IteratorBuiltinsAssembler::StringListFromIterable( + implicit context: Context)(JSAny): JSArray; + extern builtin IterableToListMayPreserveHoles(implicit context: Context)(JSAny, JSAny); extern builtin IterableToListWithSymbolLookup(implicit context: Context)(JSAny); transitioning builtin GetIteratorWithFeedback( - context: Context, receiver: JSAny, feedbackSlot: Smi, + context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi, feedback: Undefined | FeedbackVector): JSAny { + let iteratorMethod: JSAny; typeswitch (feedback) { case (Undefined): { - return GetProperty(receiver, IteratorSymbolConstant()); + iteratorMethod = GetProperty(receiver, IteratorSymbolConstant()); } case (feedback: FeedbackVector): { - return LoadIC( - context, receiver, IteratorSymbolConstant(), feedbackSlot, - feedback); + iteratorMethod = LoadIC( + context, receiver, IteratorSymbolConstant(), loadSlot, feedback); + } + } + return CallIteratorWithFeedback( + context, receiver, iteratorMethod, callSlot, feedback); + } + + transitioning builtin CallIteratorWithFeedback( + context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi, + feedback: Undefined | FeedbackVector): JSAny { + const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot)); + CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged); + const iteratorCallable: Callable = Cast<Callable>(iteratorMethod) + otherwise ThrowCalledNonCallable(iteratorMethod); + const iterator: JSAny = Call(context, iteratorCallable, receiver); + typeswitch (iterator) { + case (JSReceiver): { + return iterator; + } + case (JSPrimitive): { + ThrowSymbolIteratorInvalid(); } } } diff --git a/chromium/v8/src/builtins/mips/builtins-mips.cc b/chromium/v8/src/builtins/mips/builtins-mips.cc index d3237a1c381..ecfb224fb27 100644 --- a/chromium/v8/src/builtins/mips/builtins-mips.cc +++ b/chromium/v8/src/builtins/mips/builtins-mips.cc @@ -1085,18 +1085,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, t0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Subu(t1, sp, Operand(t0)); LoadRealStackLimit(masm, a2); - __ Branch(&ok, hs, t1, Operand(a2)); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ Branch(&stack_overflow, lo, t1, Operand(a2)); // If ok, push undefined as the initial value for all register file entries. Label loop_header; @@ -1169,6 +1167,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); // Unreachable code. __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1525,14 +1528,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(a0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2131,7 +2128,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(a1, &non_callable); __ bind(&non_smi); __ GetObjectType(a1, t1, t2); @@ -2146,12 +2143,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ Branch(&non_callable, eq, t1, Operand(zero_reg)); // Check if target is a proxy and call CallProxy external builtin - __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver with the (original) target. __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2); __ sw(a1, MemOperand(kScratchReg)); diff --git a/chromium/v8/src/builtins/mips64/builtins-mips64.cc b/chromium/v8/src/builtins/mips64/builtins-mips64.cc index 7cb66470a34..47dbc340020 100644 --- a/chromium/v8/src/builtins/mips64/builtins-mips64.cc +++ b/chromium/v8/src/builtins/mips64/builtins-mips64.cc @@ -1103,18 +1103,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, a4); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Dsubu(a5, sp, Operand(a4)); LoadRealStackLimit(masm, a2); - __ Branch(&ok, hs, a5, Operand(a2)); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ Branch(&stack_overflow, lo, a5, Operand(a2)); // If ok, push undefined as the initial value for all register file entries. Label loop_header; @@ -1188,6 +1186,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); // Unreachable code. __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1542,14 +1545,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(a0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2170,7 +2167,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(a1, &non_callable); __ bind(&non_smi); __ GetObjectType(a1, t1, t2); @@ -2184,12 +2181,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ And(t1, t1, Operand(Map::IsCallableBit::kMask)); __ Branch(&non_callable, eq, t1, Operand(zero_reg)); - __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver with the (original) target. __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2); __ Sd(a1, MemOperand(kScratchReg)); diff --git a/chromium/v8/src/builtins/ppc/builtins-ppc.cc b/chromium/v8/src/builtins/ppc/builtins-ppc.cc index 485b7933952..ab0c7900d59 100644 --- a/chromium/v8/src/builtins/ppc/builtins-ppc.cc +++ b/chromium/v8/src/builtins/ppc/builtins-ppc.cc @@ -863,9 +863,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); } -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store code entry in the closure. __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset), r0); @@ -902,100 +904,73 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r6 : new target (preserved for callee if needed, and caller) // -- r4 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch)); Register closure = r4; - Register optimized_code_entry = scratch1; - - __ LoadP( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CmpSmiLiteral(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone), r0); - __ beq(&fallthrough); - - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ CmpSmiLiteral( - optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ b(&fallthrough); - } - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadP(scratch, FieldMemOperand(optimized_code_entry, + Code::kCodeDataContainerOffset)); + __ LoadWordArith( + scratch, + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); + __ bne(&found_deoptimized_code, cr0); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch, r8); + static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); + __ LoadCodeObjectEntry(r5, optimized_code_entry); + __ Jump(r5); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadP(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ LoadWordArith( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0); - __ bne(&found_deoptimized_code, cr0); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); - __ LoadCodeObjectEntry(r5, optimized_code_entry); - __ Jump(r5); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r6 : new target (preserved for callee if needed, and caller) + // -- r4 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpSmiLiteral(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), + r0); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1104,9 +1079,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); __ bne(&push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); + Register optimized_code_entry = r7; + + // Read off the optimized code slot in the feedback vector. + __ LoadP(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CmpSmiLiteral(optimized_code_entry, + Smi::FromEnum(OptimizationMarker::kNone), r0); + __ bne(&optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ LoadWord( @@ -1149,29 +1135,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r3); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ sub(r8, sp, r5); LoadRealStackLimit(masm, r0); __ cmpl(r8, r0); - __ bge(&ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ blt(&stack_overflow); // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC); __ beq(&no_args, cr0); __ mtctr(r5); __ bind(&loop); - __ push(r8); + __ push(kInterpreterAccumulatorRegister); __ bdnz(&loop); __ bind(&no_args); } @@ -1189,8 +1173,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ StorePX(r6, MemOperand(fp, r8)); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. + // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -1231,8 +1215,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r5); __ blr(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1596,14 +1598,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset)); - { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r3); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2260,7 +2256,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r4 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r4, &non_callable); __ bind(&non_smi); __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE); @@ -2277,12 +2273,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ cmpi(r8, Operand(JS_PROXY_TYPE)); - __ bne(&non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2)); __ StorePX(r4, MemOperand(sp, r8)); diff --git a/chromium/v8/src/builtins/regexp-exec.tq b/chromium/v8/src/builtins/regexp-exec.tq new file mode 100644 index 00000000000..b2ca9de10b5 --- /dev/null +++ b/chromium/v8/src/builtins/regexp-exec.tq @@ -0,0 +1,45 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeExecBody( + implicit context: Context)(JSReceiver, String, constexpr bool): JSAny; + + transitioning macro RegExpPrototypeExecBodyFast(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + return RegExpPrototypeExecBody(receiver, string, true); + } + + transitioning macro RegExpPrototypeExecBodySlow(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + return RegExpPrototypeExecBody(receiver, string, false); + } + + // Slow path stub for RegExpPrototypeExec to decrease code size. + transitioning builtin + RegExpPrototypeExecSlow(implicit context: Context)( + regexp: JSRegExp, string: String): JSAny { + return RegExpPrototypeExecBodySlow(regexp, string); + } + + extern macro RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + implicit context: Context)(Object): bool; + + // ES#sec-regexp.prototype.exec + // RegExp.prototype.exec ( string ) + transitioning javascript builtin RegExpPrototypeExec( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + // Ensure {receiver} is a JSRegExp. + const receiver = Cast<JSRegExp>(receiver) otherwise ThrowTypeError( + kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver); + const string = ToString_Inline(context, string); + + return IsFastRegExpNoPrototype(receiver) ? + RegExpPrototypeExecBodyFast(receiver, string) : + RegExpPrototypeExecSlow(receiver, string); + } +} diff --git a/chromium/v8/src/builtins/regexp-match-all.tq b/chromium/v8/src/builtins/regexp-match-all.tq new file mode 100644 index 00000000000..1be6e69afce --- /dev/null +++ b/chromium/v8/src/builtins/regexp-match-all.tq @@ -0,0 +1,258 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + extern transitioning macro RegExpBuiltinsAssembler::RegExpCreate( + implicit context: Context)(Context, Object, String): Object; + + extern transitioning macro + RegExpMatchAllAssembler::CreateRegExpStringIterator( + NativeContext, Object, String, bool, bool): JSAny; + + @export + transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)( + nativeContext: NativeContext, receiver: JSAny, string: JSAny): JSAny { + // 1. Let R be the this value. + // 2. If Type(R) is not Object, throw a TypeError exception. + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@matchAll'); + const receiver = UnsafeCast<JSReceiver>(receiver); + + // 3. Let S be ? ToString(O). + const string: String = ToString_Inline(context, string); + + let matcher: Object; + let global: bool; + let unicode: bool; + + // 'FastJSRegExp' uses the strict fast path check because following code + // uses the flags property. + // TODO(jgruber): Handle slow flag accesses on the fast path and make this + // permissive. + typeswitch (receiver) { + case (fastRegExp: FastJSRegExp): { + const source = fastRegExp.source; + + // 4. Let C be ? SpeciesConstructor(R, %RegExp%). + // 5. Let flags be ? ToString(? Get(R, "flags")). + // 6. Let matcher be ? Construct(C, « R, flags »). + const flags: String = FastFlagsGetter(fastRegExp); + matcher = RegExpCreate(nativeContext, source, flags); + const matcherRegExp = UnsafeCast<JSRegExp>(matcher); + assert(IsFastRegExpPermissive(matcherRegExp)); + + // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). + // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). + const fastRegExp = UnsafeCast<FastJSRegExp>(receiver); + FastStoreLastIndex(matcherRegExp, fastRegExp.lastIndex); + + // 9. If flags contains "g", let global be true. + // 10. Else, let global be false. + global = FastFlagGetter(matcherRegExp, kGlobal); + + // 11. If flags contains "u", let fullUnicode be true. + // 12. Else, let fullUnicode be false. + unicode = FastFlagGetter(matcherRegExp, kUnicode); + } + case (Object): { + // 4. Let C be ? SpeciesConstructor(R, %RegExp%). + const regexpFun = + UnsafeCast<JSFunction>(nativeContext[REGEXP_FUNCTION_INDEX]); + const speciesConstructor = + UnsafeCast<Constructor>(SpeciesConstructor(receiver, regexpFun)); + + // 5. Let flags be ? ToString(? Get(R, "flags")). + const flags = GetProperty(receiver, 'flags'); + const flagsString = ToString_Inline(context, flags); + + // 6. Let matcher be ? Construct(C, « R, flags »). + matcher = Construct(speciesConstructor, receiver, flagsString); + + // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). + const lastIndex: Number = + ToLength_Inline(context, SlowLoadLastIndex(receiver)); + + // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). + SlowStoreLastIndex(UnsafeCast<JSReceiver>(matcher), lastIndex); + + // 9. If flags contains "g", let global be true. + // 10. Else, let global be false. + const globalCharString: String = StringConstant('g'); + const globalIndex: Smi = + StringIndexOf(flagsString, globalCharString, 0); + global = globalIndex != -1; + + // 11. If flags contains "u", let fullUnicode be true. + // 12. Else, let fullUnicode be false. + const unicodeCharString = StringConstant('u'); + const unicodeIndex: Smi = + StringIndexOf(flagsString, unicodeCharString, 0); + unicode = unicodeIndex != -1; + } + } + + // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode). + return CreateRegExpStringIterator( + nativeContext, matcher, string, global, unicode); + } + + // https://tc39.github.io/proposal-string-matchall/ + // RegExp.prototype [ @@matchAll ] ( string ) + transitioning javascript builtin RegExpPrototypeMatchAll( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + const nativeContext: NativeContext = LoadNativeContext(context); + return RegExpPrototypeMatchAllImpl(nativeContext, receiver, string); + } + + const kJSRegExpStringIteratorDone: + constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit'; + const kJSRegExpStringIteratorGlobal: constexpr int31 + generates '1 << JSRegExpStringIterator::kGlobalBit'; + const kJSRegExpStringIteratorUnicode: constexpr int31 + generates '1 << JSRegExpStringIterator::kUnicodeBit'; + + extern macro IsSetSmi(Smi, constexpr int31): bool; + + macro HasDoneFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorDone); + } + + macro HasGlobalFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorGlobal); + } + + macro HasUnicodeFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorUnicode); + } + + macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) { + const newFlags: Smi = flags | kJSRegExpStringIteratorDone; + iterator.flags = newFlags; + } + + extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( + implicit context: Context)(JSReceiver, RegExpMatchInfo, String): + JSRegExpResult; + + // https://tc39.github.io/proposal-string-matchall/ + // %RegExpStringIteratorPrototype%.next ( ) + transitioning javascript builtin RegExpStringIteratorPrototypeNext( + js-implicit context: Context, receiver: JSAny)(): JSAny { + // 1. Let O be the this value. + // 2. If Type(O) is not Object, throw a TypeError exception. + // 3. If O does not have all of the internal slots of a RegExp String + // Iterator Object Instance (see 5.3), throw a TypeError exception. + const methodName: constexpr string = + '%RegExpStringIterator%.prototype.next'; + const receiver = Cast<JSRegExpStringIterator>(receiver) otherwise + ThrowTypeError(kIncompatibleMethodReceiver, methodName, receiver); + + try { + // 4. If O.[[Done]] is true, then + // a. Return ! CreateIterResultObject(undefined, true). + const flags: Smi = receiver.flags; + if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult; + + // 5. Let R be O.[[iteratingRegExp]]. + const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp; + + // 6. Let S be O.[[IteratedString]]. + const iteratingString: String = receiver.iterated_string; + + // 7. Let global be O.[[Global]]. + // 8. Let fullUnicode be O.[[Unicode]]. + // 9. Let match be ? RegExpExec(R, S). + let match: Object; + let isFastRegExp: bool = false; + try { + if (IsFastRegExpPermissive(iteratingRegExp)) { + const matchIndices: RegExpMatchInfo = + RegExpPrototypeExecBodyWithoutResultFast( + UnsafeCast<JSRegExp>(iteratingRegExp), iteratingString) + otherwise IfNoMatch; + match = ConstructNewResultFromMatchInfo( + iteratingRegExp, matchIndices, iteratingString); + isFastRegExp = true; + } else { + match = RegExpExec(iteratingRegExp, iteratingString); + if (match == Null) { + goto IfNoMatch; + } + } + // 11. Else, + // b. Else, handle non-global case first. + if (!HasGlobalFlag(flags)) { + // i. Set O.[[Done]] to true. + SetDoneFlag(receiver, flags); + + // ii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False); + } + // a. If global is true, + assert(HasGlobalFlag(flags)); + if (isFastRegExp) { + // i. Let matchStr be ? ToString(? Get(match, "0")). + const match = UnsafeCast<FastJSRegExpResult>(match); + const resultFixedArray = UnsafeCast<FixedArray>(match.elements); + const matchStr = UnsafeCast<String>(resultFixedArray.objects[0]); + + // When iterating_regexp is fast, we assume it stays fast even after + // accessing the first match from the RegExp result. + assert(IsFastRegExpPermissive(iteratingRegExp)); + const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp); + if (matchStr == kEmptyString) { + // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). + const thisIndex: Smi = FastLoadLastIndex(iteratingRegExp); + + // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, + // fullUnicode). + const nextIndex: Smi = AdvanceStringIndexFast( + iteratingString, thisIndex, HasUnicodeFlag(flags)); + + // 3. Perform ? Set(R, "lastIndex", nextIndex, true). + FastStoreLastIndex(iteratingRegExp, nextIndex); + } + + // iii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(match, False); + } + assert(!isFastRegExp); + // i. Let matchStr be ? ToString(? Get(match, "0")). + const match = UnsafeCast<JSAny>(match); + const matchStr = + ToString_Inline(context, GetProperty(match, SmiConstant(0))); + + if (matchStr == kEmptyString) { + // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). + const lastIndex: JSAny = SlowLoadLastIndex(iteratingRegExp); + const thisIndex: Number = ToLength_Inline(context, lastIndex); + + // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, + // fullUnicode). + const nextIndex: Number = AdvanceStringIndexSlow( + iteratingString, thisIndex, HasUnicodeFlag(flags)); + + // 3. Perform ? Set(R, "lastIndex", nextIndex, true). + SlowStoreLastIndex(iteratingRegExp, nextIndex); + } + // iii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(match, False); + } + // 10. If match is null, then + label IfNoMatch { + // a. Set O.[[Done]] to true. + SetDoneFlag(receiver, flags); + + // b. Return ! CreateIterResultObject(undefined, true). + goto ReturnEmptyDoneResult; + } + } + label ReturnEmptyDoneResult { + return AllocateJSIteratorResult(Undefined, True); + } + } +} diff --git a/chromium/v8/src/builtins/regexp-replace.tq b/chromium/v8/src/builtins/regexp-replace.tq index f13724b476c..1333ce97fb9 100644 --- a/chromium/v8/src/builtins/regexp-replace.tq +++ b/chromium/v8/src/builtins/regexp-replace.tq @@ -7,8 +7,6 @@ namespace regexp { extern builtin - StringIndexOf(implicit context: Context)(String, String, Smi): Smi; - extern builtin SubString(implicit context: Context)(String, Smi, Smi): String; extern runtime RegExpExecMultiple(implicit context: Context)( @@ -21,9 +19,6 @@ namespace regexp { StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)( String, JSRegExp, Callable): String; - extern macro - RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; - transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context: Context)( matchesElements: FixedArray, matchesLength: intptr, string: String, diff --git a/chromium/v8/src/builtins/regexp-search.tq b/chromium/v8/src/builtins/regexp-search.tq new file mode 100644 index 00000000000..3c4e57d734e --- /dev/null +++ b/chromium/v8/src/builtins/regexp-search.tq @@ -0,0 +1,105 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + transitioning macro + RegExpPrototypeSearchBodyFast(implicit context: Context)( + regexp: JSRegExp, string: String): JSAny { + assert(IsFastRegExpPermissive(regexp)); + + // Grab the initial value of last index. + const previousLastIndex: Smi = FastLoadLastIndex(regexp); + + // Ensure last index is 0. + FastStoreLastIndex(regexp, 0); + + // Call exec. + try { + const matchIndices: RegExpMatchInfo = + RegExpPrototypeExecBodyWithoutResultFast(regexp, string) + otherwise DidNotMatch; + + // Successful match. + // Reset last index. + FastStoreLastIndex(regexp, previousLastIndex); + + // Return the index of the match. + return UnsafeCast<Smi>( + matchIndices.objects[kRegExpMatchInfoFirstCaptureIndex]); + } + label DidNotMatch { + // Reset last index and return -1. + FastStoreLastIndex(regexp, previousLastIndex); + return SmiConstant(-1); + } + } + + extern macro RegExpBuiltinsAssembler::BranchIfFastRegExpResult( + implicit context: Context)(Object): never labels IsUnmodified, + IsModified; + + macro + IsFastRegExpResult(implicit context: Context)(execResult: HeapObject): bool { + BranchIfFastRegExpResult(execResult) otherwise return true, return false; + } + + transitioning macro RegExpPrototypeSearchBodySlow(implicit context: Context)( + regexp: JSReceiver, string: String): JSAny { + // Grab the initial value of last index. + const previousLastIndex = SlowLoadLastIndex(regexp); + const smiZero: Smi = 0; + + // Ensure last index is 0. + if (!SameValue(previousLastIndex, smiZero)) { + SlowStoreLastIndex(regexp, smiZero); + } + + // Call exec. + const execResult = RegExpExec(regexp, string); + + // Reset last index if necessary. + const currentLastIndex = SlowLoadLastIndex(regexp); + if (!SameValue(currentLastIndex, previousLastIndex)) { + SlowStoreLastIndex(regexp, previousLastIndex); + } + + // Return -1 if no match was found. + if (execResult == Null) { + return SmiConstant(-1); + } + + // Return the index of the match. + const fastExecResult = Cast<FastJSRegExpResult>(execResult) + otherwise return GetProperty(execResult, 'index'); + return fastExecResult.index; + } + + // Helper that skips a few initial checks. and assumes... + // 1) receiver is a "fast permissive" RegExp + // 2) pattern is a string + transitioning builtin RegExpSearchFast(implicit context: Context)( + receiver: JSRegExp, string: String): JSAny { + return RegExpPrototypeSearchBodyFast(receiver, string); + } + + // ES#sec-regexp.prototype-@@search + // RegExp.prototype [ @@search ] ( string ) + transitioning javascript builtin RegExpPrototypeSearch( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@search'); + const receiver = UnsafeCast<JSReceiver>(receiver); + const string: String = ToString_Inline(context, string); + + if (IsFastRegExpPermissive(receiver)) { + // TODO(pwong): Could be optimized to remove the overhead of calling the + // builtin (at the cost of a larger builtin). + return RegExpSearchFast(UnsafeCast<JSRegExp>(receiver), string); + } + return RegExpPrototypeSearchBodySlow(receiver, string); + } +} diff --git a/chromium/v8/src/builtins/regexp-source.tq b/chromium/v8/src/builtins/regexp-source.tq index c1ce1c5e9a6..266c9e7472f 100644 --- a/chromium/v8/src/builtins/regexp-source.tq +++ b/chromium/v8/src/builtins/regexp-source.tq @@ -6,9 +6,6 @@ namespace regexp { - const kRegExpPrototypeSourceGetter: constexpr int31 - generates 'v8::Isolate::kRegExpPrototypeSourceGetter'; - // ES6 21.2.5.10. // ES #sec-get-regexp.prototype.source transitioning javascript builtin RegExpPrototypeSourceGetter( diff --git a/chromium/v8/src/builtins/regexp-split.tq b/chromium/v8/src/builtins/regexp-split.tq new file mode 100644 index 00000000000..8a9a30a7e90 --- /dev/null +++ b/chromium/v8/src/builtins/regexp-split.tq @@ -0,0 +1,72 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace runtime { + extern transitioning runtime + RegExpSplit(implicit context: Context)(JSReceiver, String, Object): JSAny; +} // namespace runtime + +namespace regexp { + + const kMaxValueSmi: constexpr int31 + generates 'Smi::kMaxValue'; + + extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( + implicit context: Context)(JSRegExp, String, Smi): JSArray; + + // Helper that skips a few initial checks. + transitioning builtin + RegExpSplit(implicit context: Context)( + regexp: FastJSRegExp, string: String, limit: JSAny): JSAny { + let sanitizedLimit: Smi; + + // We need to be extra-strict and require the given limit to be either + // undefined or a positive smi. We can't call ToUint32(maybe_limit) since + // that might move us onto the slow path, resulting in ordering spec + // violations (see https://crbug.com/801171). + + if (limit == Undefined) { + // TODO(jgruber): In this case, we can probably avoid generation of limit + // checks in Generate_RegExpPrototypeSplitBody. + sanitizedLimit = SmiConstant(kMaxValueSmi); + } else if (!TaggedIsPositiveSmi(limit)) { + return runtime::RegExpSplit(regexp, string, limit); + } else { + sanitizedLimit = UnsafeCast<Smi>(limit); + } + + // Due to specific shortcuts we take on the fast path (specifically, we + // don't allocate a new regexp instance as specced), we need to ensure that + // the given regexp is non-sticky to avoid invalid results. See + // crbug.com/v8/6706. + + if (FastFlagGetter(regexp, kSticky)) { + return runtime::RegExpSplit(regexp, string, sanitizedLimit); + } + + // We're good to go on the fast path, which is inlined here. + return RegExpPrototypeSplitBody(regexp, string, sanitizedLimit); + } + + // ES#sec-regexp.prototype-@@split + // RegExp.prototype [ @@split ] ( string, limit ) + transitioning javascript builtin RegExpPrototypeSplit( + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@split'); + const receiver = UnsafeCast<JSReceiver>(receiver); + const string: String = ToString_Inline(context, arguments[0]); + const limit = arguments[1]; + + // Strict: Reads the flags property. + // TODO(jgruber): Handle slow flag accesses on the fast path and make this + // permissive. + const fastRegExp = Cast<FastJSRegExp>(receiver) + otherwise return runtime::RegExpSplit(receiver, string, limit); + return RegExpSplit(fastRegExp, string, limit); + } + +} diff --git a/chromium/v8/src/builtins/regexp-test.tq b/chromium/v8/src/builtins/regexp-test.tq index 938dfa51f39..f2ebb7c2597 100644 --- a/chromium/v8/src/builtins/regexp-test.tq +++ b/chromium/v8/src/builtins/regexp-test.tq @@ -20,7 +20,7 @@ namespace regexp { otherwise return False; return True; } - const matchIndices = RegExpExec(context, receiver, str); + const matchIndices = RegExpExec(receiver, str); return SelectBooleanConstant(matchIndices != Null); } diff --git a/chromium/v8/src/builtins/regexp.tq b/chromium/v8/src/builtins/regexp.tq index 7352d2738fa..e48e7c584de 100644 --- a/chromium/v8/src/builtins/regexp.tq +++ b/chromium/v8/src/builtins/regexp.tq @@ -22,8 +22,34 @@ namespace regexp { BranchIfFastRegExp_Permissive(o) otherwise return true, return false; } - extern macro RegExpBuiltinsAssembler::RegExpExec(Context, Object, Object): - Object; + const kInvalidRegExpExecResult: constexpr MessageTemplate + generates 'MessageTemplate::kInvalidRegExpExecResult'; + + // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S ) + @export + transitioning macro RegExpExec(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + // Take the slow path of fetching the exec property, calling it, and + // verifying its return value. + + const exec = GetProperty(receiver, 'exec'); + + // Is {exec} callable? + typeswitch (exec) { + case (execCallable: Callable): { + const result = Call(context, execCallable, receiver, string); + if (result != Null) { + ThrowIfNotJSReceiver(result, kInvalidRegExpExecResult, ''); + } + return result; + } + case (Object): { + const regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError( + kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver); + return RegExpPrototypeExecSlow(regexp, string); + } + } + } extern macro RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast( @@ -161,4 +187,59 @@ namespace regexp { otherwise return SlowFlagsGetter(receiver); return FastFlagsGetter(fastRegexp); } + + extern transitioning macro RegExpBuiltinsAssembler::SlowLoadLastIndex( + implicit context: Context)(JSAny): JSAny; + extern transitioning macro RegExpBuiltinsAssembler::SlowStoreLastIndex( + implicit context: Context)(JSAny, JSAny): void; + + extern macro RegExpBuiltinsAssembler::FastLoadLastIndex(JSRegExp): Smi; + extern macro RegExpBuiltinsAssembler::FastStoreLastIndex(JSRegExp, Smi): void; + + extern builtin + StringIndexOf(implicit context: Context)(String, String, Smi): Smi; + + extern macro + RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; + extern macro + RegExpBuiltinsAssembler::AdvanceStringIndexSlow(String, Number, bool): Smi; + + type UseCounterFeature extends int31 + constexpr 'v8::Isolate::UseCounterFeature'; + const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp'; + const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp'; + const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpPrototypeSourceGetter'; + + // ES#sec-isregexp IsRegExp ( argument ) + @export + transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool { + const receiver = Cast<JSReceiver>(obj) otherwise return false; + + // Check @match. + const value = GetProperty(receiver, MatchSymbolConstant()); + if (value == Undefined) { + return Is<JSRegExp>(receiver); + } + + assert(value != Undefined); + // The common path. Symbol.match exists, equals the RegExpPrototypeMatch + // function (and is thus trueish), and the receiver is a JSRegExp. + if (ToBoolean(value)) { + if (!Is<JSRegExp>(receiver)) { + IncrementUseCounter( + context, SmiConstant(kRegExpMatchIsTrueishOnNonJSRegExp)); + } + return true; + } + + assert(!ToBoolean(value)); + if (Is<JSRegExp>(receiver)) { + IncrementUseCounter( + context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp)); + } + return false; + } } diff --git a/chromium/v8/src/builtins/s390/builtins-s390.cc b/chromium/v8/src/builtins/s390/builtins-s390.cc index 7dca12d17e4..7fc6b91ba37 100644 --- a/chromium/v8/src/builtins/s390/builtins-s390.cc +++ b/chromium/v8/src/builtins/s390/builtins-s390.cc @@ -103,7 +103,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // here which will cause scratch to become negative. __ SubP(scratch, sp, scratch); // Check if the arguments will overflow the stack. - __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r0, num_args, Operand(kSystemPointerSizeLog2)); __ CmpP(scratch, r0); __ ble(stack_overflow); // Signed comparison. } @@ -147,11 +147,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // sp[2]: number of arguments (smi-tagged) Label loop, no_args; __ beq(&no_args); - __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, r2, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, scratch); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(scratch, MemOperand(scratch, -kPointerSize)); + __ lay(scratch, MemOperand(scratch, -kSystemPointerSize)); __ LoadP(r0, MemOperand(scratch, r6)); __ StoreP(r0, MemOperand(scratch, sp)); __ BranchOnCount(r1, &loop); @@ -177,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiToPtrArrayOffset(scratch, scratch); __ AddP(sp, sp, scratch); - __ AddP(sp, sp, Operand(kPointerSize)); + __ AddP(sp, sp, Operand(kSystemPointerSize)); __ Ret(); __ bind(&stack_overflow); @@ -213,11 +213,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(r5); // ----------- S t a t e ------------- - // -- sp[0*kPointerSize]: new target - // -- sp[1*kPointerSize]: padding - // -- r3 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: new target + // -- sp[1*kSystemPointerSize]: padding + // -- r3 and sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- sp[4*kSystemPointerSize]: context // ----------------------------------- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); @@ -239,11 +239,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2: receiver - // -- Slot 4 / sp[0*kPointerSize]: new target - // -- Slot 3 / sp[1*kPointerSize]: padding - // -- Slot 2 / sp[2*kPointerSize]: constructor function - // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[4*kPointerSize]: context + // -- Slot 4 / sp[0*kSystemPointerSize]: new target + // -- Slot 3 / sp[1*kSystemPointerSize]: padding + // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function + // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kSystemPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -259,12 +259,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r5: new target - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: padding - // -- sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: implicit receiver + // -- sp[2*kSystemPointerSize]: padding + // -- sp[3*kSystemPointerSize]: constructor function + // -- sp[4*kSystemPointerSize]: number of arguments (tagged) + // -- sp[5*kSystemPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -295,21 +295,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- r5: new target // -- r6: pointer to last argument // -- cr0: condition indicating whether r2 is zero - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: padding - // -- r3 and sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: implicit receiver + // -- sp[2*kSystemPointerSize]: padding + // -- r3 and sp[3*kSystemPointerSize]: constructor function + // -- sp[4*kSystemPointerSize]: number of arguments (tagged) + // -- sp[5*kSystemPointerSize]: context // ----------------------------------- __ ltgr(r2, r2); __ beq(&no_args); - __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r8, r2, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, r8); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(r8, MemOperand(r8, -kPointerSize)); + __ lay(r8, MemOperand(r8, -kSystemPointerSize)); __ LoadP(r0, MemOperand(r8, r6)); __ StoreP(r0, MemOperand(r8, sp)); __ BranchOnCount(r1, &loop); @@ -321,11 +321,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0: constructor result - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: padding - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments - // -- sp[4*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: padding + // -- sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments + // -- sp[4*kSystemPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -376,7 +376,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ SmiToPtrArrayOffset(r3, r3); __ AddP(sp, sp, r3); - __ AddP(sp, sp, Operand(kPointerSize)); + __ AddP(sp, sp, Operand(kSystemPointerSize)); __ Ret(); } @@ -465,16 +465,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { r3, JSGeneratorObject::kParametersAndRegistersOffset)); { Label loop, done_loop; - __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2)); __ SubP(sp, r5); // ip = stack offset // r5 = parameter array offset __ LoadImmP(ip, Operand::Zero()); - __ SubP(r5, Operand(kPointerSize)); + __ SubP(r5, Operand(kSystemPointerSize)); __ blt(&done_loop); - __ lgfi(r1, Operand(-kPointerSize)); + __ lgfi(r1, Operand(-kSystemPointerSize)); __ bind(&loop); @@ -483,7 +483,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreP(r0, MemOperand(sp, ip)); // update offsets - __ lay(ip, MemOperand(ip, kPointerSize)); + __ lay(ip, MemOperand(ip, kSystemPointerSize)); __ BranchRelativeOnIdxHighP(r5, r1, &loop); @@ -550,9 +550,9 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { namespace { constexpr int kPushedStackSpace = - (kNumCalleeSaved + 2) * kPointerSize + - kNumCalleeSavedDoubles * kDoubleSize + 5 * kPointerSize + - EntryFrameConstants::kCallerFPOffset - kPointerSize; + (kNumCalleeSaved + 2) * kSystemPointerSize + + kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize + + EntryFrameConstants::kCallerFPOffset - kSystemPointerSize; // Called with the native C calling convention. The corresponding function // signature is either: @@ -607,9 +607,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Requires us to save the callee-preserved registers r6-r13 // General convention is to also save r14 (return addr) and // sp/r15 as well in a single STM/STMG - __ lay(sp, MemOperand(sp, -10 * kPointerSize)); + __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize)); __ StoreMultipleP(r6, sp, MemOperand(sp, 0)); - pushed_stack_space += (kNumCalleeSaved + 2) * kPointerSize; + pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize; // Initialize the root register. // C calling convention. The first argument is passed in r2. @@ -625,8 +625,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // SMI Marker // kCEntryFPAddress // Frame type - __ lay(sp, MemOperand(sp, -5 * kPointerSize)); - pushed_stack_space += 5 * kPointerSize; + __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize)); + pushed_stack_space += 5 * kSystemPointerSize; // Push a bad frame pointer to fail if it is used. __ LoadImmP(r9, Operand(-1)); @@ -637,16 +637,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ Move(r6, ExternalReference::Create( IsolateAddressId::kCEntryFPAddress, masm->isolate())); __ LoadP(r6, MemOperand(r6)); - __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize)); + __ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize)); Register scrach = r8; // Set up frame pointer for the frame to be pushed. - // Need to add kPointerSize, because sp has one extra + // Need to add kSystemPointerSize, because sp has one extra // frame already for the frame type being pushed later. - __ lay(fp, MemOperand( - sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize)); - pushed_stack_space += EntryFrameConstants::kCallerFPOffset - kPointerSize; + __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + + kSystemPointerSize)); + pushed_stack_space += + EntryFrameConstants::kCallerFPOffset - kSystemPointerSize; // restore r6 __ LoadRR(r6, r1); @@ -736,7 +737,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Reload callee-saved preserved regs, return address reg (r14) and sp __ LoadMultipleP(r6, sp, MemOperand(sp, 0)); - __ la(sp, MemOperand(sp, 10 * kPointerSize)); + __ la(sp, MemOperand(sp, 10 * kSystemPointerSize)); // saving floating point registers #if V8_TARGET_ARCH_S390X @@ -790,7 +791,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, // here which will cause scratch1 to become negative. __ SubP(scratch1, sp, scratch1); // Check if the arguments will overflow the stack. - __ ShiftLeftP(scratch2, argc, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch2, argc, Operand(kSystemPointerSizeLog2)); __ CmpP(scratch1, scratch2); __ bgt(&okay); // Signed comparison. @@ -807,7 +808,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: function // r5: receiver // r6: argc - // [fp + kPushedStackSpace + 20 * kPointerSize]: argv + // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv // r0,r2,r7-r9, cp may be clobbered // Enter an internal frame. @@ -831,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r3: new.target // r4: function // r6: argc - // [fp + kPushedStackSpace + 20 * kPointerSize]: argv + // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv // r0,r2,r5,r7-r9, cp may be clobbered // Setup new.target, argc and function. @@ -862,15 +863,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r9: scratch reg to hold index into argv Label argLoop, argExit; intptr_t zero = 0; - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ SubRR(sp, r7); // Buy the stack frame to fit args __ LoadImmP(r9, Operand(zero)); // Initialize argv index __ bind(&argLoop); __ CmpPH(r7, Operand(zero)); __ beq(&argExit, Label::kNear); - __ lay(r7, MemOperand(r7, -kPointerSize)); + __ lay(r7, MemOperand(r7, -kSystemPointerSize)); __ LoadP(r8, MemOperand(r9, r6)); // read next parameter - __ la(r9, MemOperand(r9, kPointerSize)); // r9++; + __ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++; __ LoadP(r0, MemOperand(r8)); // dereference handle __ StoreP(r0, MemOperand(r7, sp)); // push parameter __ b(&argLoop); @@ -920,9 +921,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); } -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store code entry in the closure. __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset), r0); @@ -960,100 +963,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r5 : new target (preserved for callee if needed, and caller) // -- r3 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch)); Register closure = r3; - Register optimized_code_entry = scratch1; - - __ LoadP( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CmpSmiLiteral(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone), r0); - __ beq(&fallthrough); - - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ CmpSmiLiteral( - optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ b(&fallthrough, Label::kNear); - } - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadP(scratch, FieldMemOperand(optimized_code_entry, + Code::kCodeDataContainerOffset)); + __ LoadW(scratch, FieldMemOperand( + scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); + __ bne(&found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch, r7); + static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); + __ LoadCodeObjectEntry(r4, optimized_code_entry); + __ Jump(r4); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadP(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ LoadW( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0); - __ bne(&found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); - __ LoadCodeObjectEntry(r4, optimized_code_entry); - __ Jump(r4); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r5 : new target (preserved for callee if needed, and caller) + // -- r3 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpSmiLiteral(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), + r0); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1163,9 +1138,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE)); __ bne(&push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7); + Register optimized_code_entry = r6; + + // Read off the optimized code slot in the feedback vector. + __ LoadP(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CmpSmiLiteral(optimized_code_entry, + Smi::FromEnum(OptimizationMarker::kNone), r0); + __ bne(&optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ LoadW(r1, FieldMemOperand(feedback_vector, @@ -1202,29 +1189,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r4); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ SubP(r8, sp, r4); __ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm)); - __ bge(&ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ blt(&stack_overflow); // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, RootIndex::kUndefinedValue); - __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2)); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2)); __ LoadAndTestP(r4, r4); __ beq(&no_args); __ LoadRR(r1, r4); __ bind(&loop); - __ push(r8); + __ push(kInterpreterAccumulatorRegister); __ SubP(r1, Operand(1)); __ bne(&loop); __ bind(&no_args); @@ -1238,12 +1223,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); __ CmpP(r8, Operand::Zero()); __ beq(&no_incoming_new_target_or_generator_register); - __ ShiftLeftP(r8, r8, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(fp, r8)); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. + // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -1254,7 +1239,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, r5)); __ Call(kJavaScriptCallCodeStartRegister); @@ -1285,8 +1270,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r4); __ Ret(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1296,11 +1299,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Label loop, skip; __ CmpP(count, Operand::Zero()); __ beq(&skip); - __ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU + __ AddP(index, index, Operand(kSystemPointerSize)); // Bias up for LoadPU __ LoadRR(r0, count); __ bind(&loop); - __ LoadP(scratch, MemOperand(index, -kPointerSize)); - __ lay(index, MemOperand(index, -kPointerSize)); + __ LoadP(scratch, MemOperand(index, -kSystemPointerSize)); + __ lay(index, MemOperand(index, -kSystemPointerSize)); __ push(scratch); __ SubP(r0, Operand(1)); __ bne(&loop); @@ -1474,7 +1477,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { Register scratch = temps.Acquire(); __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); @@ -1540,7 +1543,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { } for (int i = j - 1; i >= 0; --i) { __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset + - i * kPointerSize)); + i * kSystemPointerSize)); __ push(r6); } for (int i = 0; i < 3 - j; ++i) { @@ -1589,9 +1592,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ StoreP( - r2, MemOperand( - sp, config->num_allocatable_general_registers() * kPointerSize + - BuiltinContinuationFrameConstants::kFixedFrameSize)); + r2, + MemOperand(sp, config->num_allocatable_general_registers() * + kSystemPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize)); } for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); @@ -1647,14 +1651,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r2); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -1707,16 +1705,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register arg_size = r7; Register new_sp = r5; Register scratch = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ LoadRR(r4, scratch); __ LoadP(r3, MemOperand(new_sp, 0)); // receiver - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg + __ LoadP(scratch, MemOperand(new_sp, 1 * -kSystemPointerSize)); // thisArg __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray + __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argArray __ bind(&skip); __ LoadRR(sp, new_sp); __ StoreP(scratch, MemOperand(sp, 0)); @@ -1765,7 +1763,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r2: actual number of arguments // 2. Get the callable to call (passed as receiver) from the stack. - __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2)); __ LoadP(r3, MemOperand(sp, r4)); // 3. Shift arguments and return address one slot down on the stack @@ -1780,9 +1778,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { __ AddP(r4, sp, r4); __ bind(&loop); - __ LoadP(scratch, MemOperand(r4, -kPointerSize)); + __ LoadP(scratch, MemOperand(r4, -kSystemPointerSize)); __ StoreP(scratch, MemOperand(r4)); - __ SubP(r4, Operand(kPointerSize)); + __ SubP(r4, Operand(kSystemPointerSize)); __ CmpP(r4, sp); __ bne(&loop); // Adjust the actual number of arguments and remove the top element @@ -1812,19 +1810,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register arg_size = r7; Register new_sp = r5; Register scratch = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(scratch, r3); __ LoadRR(r4, r3); - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target + __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target __ beq(&skip); - __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument - __ CmpP(arg_size, Operand(2 * kPointerSize)); + __ LoadP(scratch, + MemOperand(new_sp, 2 * -kSystemPointerSize)); // thisArgument + __ CmpP(arg_size, Operand(2 * kSystemPointerSize)); __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList + __ LoadP(r4, MemOperand(new_sp, 3 * -kSystemPointerSize)); // argumentsList __ bind(&skip); __ LoadRR(sp, new_sp); __ StoreP(scratch, MemOperand(sp, 0)); @@ -1862,21 +1861,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Label skip; Register arg_size = r7; Register new_sp = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(r4, r3); __ LoadRR(r5, r3); __ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined) - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target + __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target __ LoadRR(r5, r3); // new.target defaults to target __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList - __ CmpP(arg_size, Operand(2 * kPointerSize)); + __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argumentsList + __ CmpP(arg_size, Operand(2 * kSystemPointerSize)); __ beq(&skip); - __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target + __ LoadP(r5, MemOperand(new_sp, 3 * -kSystemPointerSize)); // new.target __ bind(&skip); __ LoadRR(sp, new_sp); } @@ -1912,15 +1911,15 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { // Function // ArgC as SMI // Padding <--- New SP - __ lay(sp, MemOperand(sp, -5 * kPointerSize)); + __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize)); // Cleanse the top nibble of 31-bit pointers. __ CleanseP(r14); - __ StoreP(r14, MemOperand(sp, 4 * kPointerSize)); - __ StoreP(fp, MemOperand(sp, 3 * kPointerSize)); - __ StoreP(r6, MemOperand(sp, 2 * kPointerSize)); - __ StoreP(r3, MemOperand(sp, 1 * kPointerSize)); - __ StoreP(r2, MemOperand(sp, 0 * kPointerSize)); + __ StoreP(r14, MemOperand(sp, 4 * kSystemPointerSize)); + __ StoreP(fp, MemOperand(sp, 3 * kSystemPointerSize)); + __ StoreP(r6, MemOperand(sp, 2 * kSystemPointerSize)); + __ StoreP(r3, MemOperand(sp, 1 * kSystemPointerSize)); + __ StoreP(r2, MemOperand(sp, 0 * kSystemPointerSize)); __ Push(Smi::zero()); // Padding. __ la(fp, MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); @@ -1933,7 +1932,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - int stack_adjustment = kPointerSize; // adjust for receiver + int stack_adjustment = kSystemPointerSize; // adjust for receiver __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment); __ SmiToPtrArrayOffset(r3, r3); __ lay(sp, MemOperand(sp, r3)); @@ -1981,12 +1980,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Label loop, no_args, skip; __ CmpP(r6, Operand::Zero()); __ beq(&no_args); - __ AddP(r4, r4, - Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); + __ AddP( + r4, r4, + Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize)); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(scratch, MemOperand(r4, kPointerSize)); - __ la(r4, MemOperand(r4, kPointerSize)); + __ LoadP(scratch, MemOperand(r4, kSystemPointerSize)); + __ la(r4, MemOperand(r4, kSystemPointerSize)); __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); __ LoadRoot(scratch, RootIndex::kUndefinedValue); @@ -2070,11 +2070,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Forward the arguments from the caller frame. { Label loop; - __ AddP(r6, r6, Operand(kPointerSize)); + __ AddP(r6, r6, Operand(kSystemPointerSize)); __ AddP(r2, r2, r7); __ bind(&loop); { - __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2)); __ LoadP(scratch, MemOperand(r6, scratch)); __ push(scratch); __ SubP(r7, r7, Operand(1)); @@ -2132,7 +2132,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadGlobalProxy(r5); } else { Label convert_to_object, convert_receiver; - __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r2, Operand(kSystemPointerSizeLog2)); __ LoadP(r5, MemOperand(sp, r5)); __ JumpIfSmi(r5, &convert_to_object); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); @@ -2169,7 +2169,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ bind(&convert_receiver); } - __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(sp, r6)); } __ bind(&done_convert); @@ -2226,7 +2226,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { { Label done; __ LoadRR(scratch, sp); // preserve previous stack pointer - __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, r9); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack @@ -2256,7 +2256,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ bind(&loop); __ LoadP(r0, MemOperand(scratch, r7)); __ StoreP(r0, MemOperand(sp, r7)); - __ AddP(r7, r7, Operand(kPointerSize)); + __ AddP(r7, r7, Operand(kSystemPointerSize)); __ BranchOnCount(r1, &loop); __ bind(&skip); } @@ -2268,10 +2268,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ AddP(r4, r4, r9); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(r0, MemOperand(r4, -kPointerSize)); - __ lay(r4, MemOperand(r4, -kPointerSize)); + __ LoadP(r0, MemOperand(r4, -kSystemPointerSize)); + __ lay(r4, MemOperand(r4, -kSystemPointerSize)); __ StoreP(r0, MemOperand(sp, r7)); - __ AddP(r7, r7, Operand(kPointerSize)); + __ AddP(r7, r7, Operand(kSystemPointerSize)); __ BranchOnCount(r1, &loop); __ AddP(r2, r2, r6); } @@ -2291,7 +2291,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // Patch the receiver to [[BoundThis]]. __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); - __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(sp, r1)); // Push the [[BoundArguments]] onto the stack. @@ -2311,7 +2311,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r3 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r3, &non_callable); __ bind(&non_smi); __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE); @@ -2328,14 +2328,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ CmpP(r7, Operand(JS_PROXY_TYPE)); - __ bne(&non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r3, MemOperand(sp, r7)); // Let the "call_as_function_delegate" take care of the rest. __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3); @@ -2449,7 +2447,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r3, MemOperand(sp, r7)); // Let the "call_as_constructor_delegate" take care of the rest. __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3); @@ -2504,8 +2502,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ SmiToPtrArrayOffset(r2, r2); __ AddP(r2, fp); // adjust for return address and receiver - __ AddP(r2, r2, Operand(2 * kPointerSize)); - __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); + __ AddP(r2, r2, Operand(2 * kSystemPointerSize)); + __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2)); __ SubP(r6, r2, r6); // Copy the arguments (including the receiver) to the new stack frame. @@ -2520,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ LoadP(r0, MemOperand(r2, 0)); __ push(r0); __ CmpP(r2, r6); // Compare before moving to next argument. - __ lay(r2, MemOperand(r2, -kPointerSize)); + __ lay(r2, MemOperand(r2, -kSystemPointerSize)); __ bne(©); __ b(&invoke); @@ -2548,22 +2546,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { Label copy; __ bind(©); // Adjust load for return address and receiver. - __ LoadP(r0, MemOperand(r2, 2 * kPointerSize)); + __ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize)); __ push(r0); __ CmpP(r2, fp); // Compare before moving to next argument. - __ lay(r2, MemOperand(r2, -kPointerSize)); + __ lay(r2, MemOperand(r2, -kSystemPointerSize)); __ bne(©); // Fill the remaining expected arguments with undefined. // r3: function // r4: expected number of argumentus __ LoadRoot(r0, RootIndex::kUndefinedValue); - __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2)); __ SubP(r6, fp, r6); // Adjust for frame. __ SubP(r6, r6, Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + kSystemPointerSize)); Label fill; __ bind(&fill); @@ -2608,7 +2606,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Remove superfluous parameters from the stack. __ SubP(r6, r2, r4); __ lgr(r2, r4); - __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2)); __ lay(sp, MemOperand(sp, r6)); __ b(&dont_adapt_arguments); } @@ -2708,8 +2706,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRR(r3, r4); } else { // Compute the argv pointer. - __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2)); - __ lay(r3, MemOperand(r3, sp, -kPointerSize)); + __ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2)); + __ lay(r3, MemOperand(r3, sp, -kSystemPointerSize)); } // Enter the exit frame that transitions from JavaScript to C++. @@ -2751,7 +2749,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // by one register each. __ LoadRR(r4, r3); __ LoadRR(r3, r2); - __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + __ la(r2, + MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize)); isolate_reg = r5; // Clang doesn't preserve r2 (result buffer) // write to r8 (preserved) before entry @@ -2765,7 +2764,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If return value is on the stack, pop it to registers. if (needs_return_buffer) { __ LoadRR(r2, r8); - __ LoadP(r3, MemOperand(r2, kPointerSize)); + __ LoadP(r3, MemOperand(r2, kSystemPointerSize)); __ LoadP(r2, MemOperand(r2)); } @@ -2870,7 +2869,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Push(result_reg, scratch); // Account for saved regs. - int argument_offset = 2 * kPointerSize; + int argument_offset = 2 * kSystemPointerSize; // Load double input. __ LoadDouble(double_scratch, MemOperand(sp, argument_offset)); @@ -2884,7 +2883,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Push(scratch_high, scratch_low); // Account for saved regs. - argument_offset += 2 * kPointerSize; + argument_offset += 2 * kSystemPointerSize; __ LoadlW(scratch_high, MemOperand(sp, argument_offset + Register::kExponentOffset)); @@ -2958,7 +2957,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ bind(&done); __ Pop(scratch_high, scratch_low); - argument_offset -= 2 * kPointerSize; + argument_offset -= 2 * kSystemPointerSize; __ bind(&fastpath_done); __ StoreP(result_reg, MemOperand(sp, argument_offset)); @@ -3159,33 +3158,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Set up FunctionCallbackInfo's implicit_args on the stack as follows: // // Target state: - // sp[0 * kPointerSize]: kHolder - // sp[1 * kPointerSize]: kIsolate - // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) - // sp[3 * kPointerSize]: undefined (kReturnValue) - // sp[4 * kPointerSize]: kData - // sp[5 * kPointerSize]: undefined (kNewTarget) + // sp[0 * kSystemPointerSize]: kHolder + // sp[1 * kSystemPointerSize]: kIsolate + // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue) + // sp[3 * kSystemPointerSize]: undefined (kReturnValue) + // sp[4 * kSystemPointerSize]: kData + // sp[5 * kSystemPointerSize]: undefined (kNewTarget) // Reserve space on the stack. - __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize))); + __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize))); // kHolder. - __ StoreP(holder, MemOperand(sp, 0 * kPointerSize)); + __ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize)); // kIsolate. __ Move(scratch, ExternalReference::isolate_address(masm->isolate())); - __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize)); // kReturnValueDefaultValue and kReturnValue. __ LoadRoot(scratch, RootIndex::kUndefinedValue); - __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize)); - __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize)); + __ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize)); // kData. - __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize)); + __ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize)); // kNewTarget. - __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize)); // Keep a pointer to kHolder (= implicit_args) in a scratch register. // We use it below to set up the FunctionCallbackInfo object. @@ -3207,33 +3206,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * + kSystemPointerSize)); // FunctionCallbackInfo::values_ (points at the first varargs argument passed // on the stack). - __ AddP(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize)); - __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2)); + __ AddP(scratch, scratch, + Operand((FCA::kArgsLength - 1) * kSystemPointerSize)); + __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddP(scratch, scratch, r1); - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) * + kSystemPointerSize)); // FunctionCallbackInfo::length_. - __ StoreW(argc, - MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize)); + __ StoreW(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) * + kSystemPointerSize)); // We also store the number of bytes to drop from the stack after returning // from the API function here. __ mov(scratch, - Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize)); - __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2)); + Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); + __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddP(scratch, r1); - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) * + kSystemPointerSize)); // v8::InvocationCallback's argument. __ lay(r2, - MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize)); ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); @@ -3241,11 +3241,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // TODO(jgruber): Document what these arguments are. static constexpr int kStackSlotsAboveFCA = 2; MemOperand return_value_operand( - fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); + fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize); static constexpr int kUseStackSpaceOperand = 0; MemOperand stack_space_operand( - sp, (kStackFrameExtraParamSlot + 4) * kPointerSize); + sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize); AllowExternalCallThatCantCauseGC scope(masm); CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, @@ -3293,7 +3293,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // Load address of v8::PropertyAccessorInfo::args_ array and name handle. __ LoadRR(r2, sp); // r2 = Handle<Name> - __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_ + __ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_ // If ABI passes Handles (pointer-sized struct) in a register: // @@ -3321,14 +3321,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { if (!ABI_PASSES_HANDLES_IN_REGS) { // pass 1st arg by reference - __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize)); - __ AddP(r2, sp, Operand(arg0Slot * kPointerSize)); + __ StoreP(r2, MemOperand(sp, arg0Slot * kSystemPointerSize)); + __ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize)); } // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. - __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize)); - __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize)); + __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize)); + __ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize)); // r3 = v8::PropertyCallbackInfo& ExternalReference thunk_ref = @@ -3340,7 +3340,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // +3 is to skip prolog, return address and name handle. MemOperand return_value_operand( - fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); + fp, + (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize); MemOperand* const kUseStackSpaceConstant = nullptr; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, kStackUnwindSpace, kUseStackSpaceConstant, diff --git a/chromium/v8/src/builtins/setup-builtins-internal.cc b/chromium/v8/src/builtins/setup-builtins-internal.cc index 99ac0d6b1fa..e3f39a0906a 100644 --- a/chromium/v8/src/builtins/setup-builtins-internal.cc +++ b/chromium/v8/src/builtins/setup-builtins-internal.cc @@ -264,13 +264,12 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { namespace { Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index, - const char* name, interpreter::OperandScale operand_scale, interpreter::Bytecode bytecode) { DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); Handle<Code> code = interpreter::GenerateBytecodeHandler( - isolate, bytecode, operand_scale, builtin_index, - BuiltinAssemblerOptions(isolate, builtin_index)); + isolate, Builtins::name(builtin_index), bytecode, operand_scale, + builtin_index, BuiltinAssemblerOptions(isolate, builtin_index)); return *code; } @@ -314,9 +313,8 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { CallDescriptors::InterfaceDescriptor, #Name); \ AddBuiltin(builtins, index++, code); -#define BUILD_BCH(Name, OperandScale, Bytecode) \ - code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \ - OperandScale, Bytecode); \ +#define BUILD_BCH(Name, OperandScale, Bytecode) \ + code = GenerateBytecodeHandler(isolate, index, OperandScale, Bytecode); \ AddBuiltin(builtins, index++, code); #define BUILD_ASM(Name, InterfaceDescriptor) \ diff --git a/chromium/v8/src/builtins/string-endswith.tq b/chromium/v8/src/builtins/string-endswith.tq index c3cc7d949b7..9590b853e79 100644 --- a/chromium/v8/src/builtins/string-endswith.tq +++ b/chromium/v8/src/builtins/string-endswith.tq @@ -41,7 +41,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. - if (IsRegExp(searchString)) { + if (regexp::IsRegExp(searchString)) { ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } diff --git a/chromium/v8/src/builtins/string-iterator.tq b/chromium/v8/src/builtins/string-iterator.tq index d36a44fa97d..b0bbb8d4a35 100644 --- a/chromium/v8/src/builtins/string-iterator.tq +++ b/chromium/v8/src/builtins/string-iterator.tq @@ -11,7 +11,7 @@ namespace string_iterator { properties_or_hash: kEmptyFixedArray, elements: kEmptyFixedArray, string: string, - next_index: nextIndex + index: nextIndex }; } @@ -31,7 +31,7 @@ namespace string_iterator { kIncompatibleMethodReceiver, 'String Iterator.prototype.next', receiver); const string = iterator.string; - const position: intptr = SmiUntag(iterator.next_index); + const position: intptr = SmiUntag(iterator.index); const length: intptr = string.length_intptr; if (position >= length) { return AllocateJSIteratorResult(Undefined, True); @@ -40,7 +40,7 @@ namespace string_iterator { const encoding = UTF16; const ch = string::LoadSurrogatePairAt(string, length, position, encoding); const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch); - iterator.next_index = SmiTag(position + value.length_intptr); + iterator.index = SmiTag(position + value.length_intptr); return AllocateJSIteratorResult(value, False); } } diff --git a/chromium/v8/src/builtins/string-slice.tq b/chromium/v8/src/builtins/string-slice.tq index 661cc264c50..b5ddbdb2ccb 100644 --- a/chromium/v8/src/builtins/string-slice.tq +++ b/chromium/v8/src/builtins/string-slice.tq @@ -4,7 +4,8 @@ namespace string_slice { - extern macro SubString(String, intptr, intptr): String; + extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr): + String; // ES6 #sec-string.prototype.slice ( start, end ) // https://tc39.github.io/ecma262/#sec-string.prototype.slice diff --git a/chromium/v8/src/builtins/string-startswith.tq b/chromium/v8/src/builtins/string-startswith.tq index 7fa7ec6d5ce..3238f52b86b 100644 --- a/chromium/v8/src/builtins/string-startswith.tq +++ b/chromium/v8/src/builtins/string-startswith.tq @@ -5,9 +5,6 @@ #include 'src/builtins/builtins-regexp-gen.h' namespace string { - extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context: - Context)(Object): bool; - // https://tc39.github.io/ecma262/#sec-string.prototype.startswith transitioning javascript builtin StringPrototypeStartsWith( js-implicit context: Context, receiver: JSAny)(...arguments): Boolean { @@ -23,7 +20,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. - if (IsRegExp(searchString)) { + if (regexp::IsRegExp(searchString)) { ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } diff --git a/chromium/v8/src/builtins/string-substring.tq b/chromium/v8/src/builtins/string-substring.tq index c97b294a34f..813dc35ab28 100644 --- a/chromium/v8/src/builtins/string-substring.tq +++ b/chromium/v8/src/builtins/string-substring.tq @@ -4,7 +4,8 @@ namespace string_substring { - extern macro SubString(String, intptr, intptr): String; + extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr): + String; transitioning macro ToSmiBetweenZeroAnd(implicit context: Context)( value: JSAny, limit: Smi): Smi { diff --git a/chromium/v8/src/builtins/string.tq b/chromium/v8/src/builtins/string.tq index 7f007680e93..4f2c342fd55 100644 --- a/chromium/v8/src/builtins/string.tq +++ b/chromium/v8/src/builtins/string.tq @@ -21,7 +21,8 @@ namespace string { extern macro StringBuiltinsAssembler::LoadSurrogatePairAt( String, intptr, intptr, constexpr UnicodeEncoding): int32; - extern macro StringFromSingleUTF16EncodedCodePoint(int32): String; + extern macro StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint( + int32): String; // This function assumes StringPrimitiveWithNoCustomIteration is true. transitioning builtin StringToList(implicit context: Context)(string: String): @@ -187,4 +188,12 @@ namespace string { left: String, right: JSAny): String { return left + ToStringImpl(context, ToPrimitiveDefault(right)); } + + builtin StringCharAt(implicit context: Context)( + receiver: String, position: intptr): String { + // Load the character code at the {position} from the {receiver}. + const code: int32 = StringCharCodeAt(receiver, position); + // And return the single character string with only that {code} + return StringFromSingleCharCode(code); + } } diff --git a/chromium/v8/src/builtins/typed-array-createtypedarray.tq b/chromium/v8/src/builtins/typed-array-createtypedarray.tq index a4767398616..a6bd445e34a 100644 --- a/chromium/v8/src/builtins/typed-array-createtypedarray.tq +++ b/chromium/v8/src/builtins/typed-array-createtypedarray.tq @@ -27,21 +27,16 @@ namespace typed_array_createtypedarray { isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer, byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray { let elements: ByteArray; - let externalPointer: RawPtr; - let basePointer: ByteArray | Smi; if constexpr (isOnHeap) { elements = AllocateByteArray(byteLength); - basePointer = elements; - externalPointer = PointerConstant(kExternalPointerForOnHeapArray); } else { - basePointer = Convert<Smi>(0); + elements = kEmptyByteArray; // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit // platforms are self-limiting, because we can't allocate an array bigger // than our 32-bit arithmetic range anyway. 64 bit platforms could // theoretically have an offset up to 2^35 - 1. - const backingStore: RawPtr = buffer.backing_store; - externalPointer = backingStore + Convert<intptr>(byteOffset); + const backingStore: uintptr = Convert<uintptr>(buffer.backing_store); // Assert no overflow has occurred. Only assert if the mock array buffer // allocator is NOT used. When the mock array buffer is used, impossibly @@ -49,9 +44,7 @@ namespace typed_array_createtypedarray { // and this assertion to fail. assert( IsMockArrayBufferAllocatorFlag() || - Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore)); - - elements = kEmptyByteArray; + (backingStore + byteOffset) >= backingStore); } // We can't just build the new object with "new JSTypedArray" here because @@ -64,8 +57,16 @@ namespace typed_array_createtypedarray { typedArray.byte_offset = byteOffset; typedArray.byte_length = byteLength; typedArray.length = length; - typedArray.external_pointer = externalPointer; - typedArray.base_pointer = basePointer; + if constexpr (isOnHeap) { + typed_array::SetJSTypedArrayOnHeapDataPtr( + typedArray, elements, byteOffset); + } else { + typed_array::SetJSTypedArrayOffHeapDataPtr( + typedArray, buffer.backing_store, byteOffset); + assert( + typedArray.data_ptr == + (buffer.backing_store + Convert<intptr>(byteOffset))); + } SetupTypedArrayEmbedderFields(typedArray); return typedArray; } diff --git a/chromium/v8/src/builtins/typed-array-slice.tq b/chromium/v8/src/builtins/typed-array-slice.tq index dc13865590e..d17ff4a3756 100644 --- a/chromium/v8/src/builtins/typed-array-slice.tq +++ b/chromium/v8/src/builtins/typed-array-slice.tq @@ -23,7 +23,7 @@ namespace typed_array_slice { // of src and result array are the same and they are not sharing the // same buffer, use memmove. if (srcKind != destInfo.kind) goto IfSlow; - if (BitcastTaggedToWord(dest.buffer) == BitcastTaggedToWord(src.buffer)) { + if (dest.buffer == src.buffer) { goto IfSlow; } diff --git a/chromium/v8/src/builtins/typed-array.tq b/chromium/v8/src/builtins/typed-array.tq index 59100736a5d..1c901abf752 100644 --- a/chromium/v8/src/builtins/typed-array.tq +++ b/chromium/v8/src/builtins/typed-array.tq @@ -71,12 +71,17 @@ namespace typed_array { ElementsKind): bool; extern macro LoadFixedTypedArrayElementAsTagged( RawPtr, Smi, constexpr ElementsKind): Numeric; - extern macro StoreJSTypedArrayElementFromTagged( + extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged( Context, JSTypedArray, Smi, JSAny, constexpr ElementsKind); type LoadFn = builtin(Context, JSTypedArray, Smi) => JSAny; type StoreFn = builtin(Context, JSTypedArray, Smi, JSAny) => JSAny; + extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( + JSTypedArray, ByteArray, uintptr): void; + extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr( + JSTypedArray, RawPtr, uintptr): void; + // AttachedJSTypedArray guards that the array's buffer is not detached. transient type AttachedJSTypedArray extends JSTypedArray; @@ -198,7 +203,7 @@ namespace typed_array { builtin StoreFixedElement<T: type>( context: Context, typedArray: JSTypedArray, index: Smi, value: JSAny): JSAny { - StoreJSTypedArrayElementFromTagged( + typed_array::StoreJSTypedArrayElementFromTagged( context, typedArray, index, value, KindForArrayType<T>()); return Undefined; } diff --git a/chromium/v8/src/builtins/x64/builtins-x64.cc b/chromium/v8/src/builtins/x64/builtins-x64.cc index b6b407fb332..9679237ff82 100644 --- a/chromium/v8/src/builtins/x64/builtins-x64.cc +++ b/chromium/v8/src/builtins/x64/builtins-x64.cc @@ -5,8 +5,9 @@ #if V8_TARGET_ARCH_X64 #include "src/api/api-arguments.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/code-factory.h" +#include "src/codegen/x64/assembler-x64.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" #include "src/execution/frames.h" @@ -401,13 +402,13 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pushq(r13); __ pushq(r14); __ pushq(r15); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. #endif __ pushq(rbx); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // On Win64 XMM6-XMM15 are callee-save. __ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize); __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); @@ -507,7 +508,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, } // Restore callee-saved registers (X64 conventions). -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // On Win64 XMM6-XMM15 are callee-save __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0)); __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1)); @@ -523,7 +524,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, #endif __ popq(rbx); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. __ popq(rsi); __ popq(rdi); @@ -611,17 +612,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Push(rdi); __ Push(arg_reg_4); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Load the previous frame pointer to access C arguments on stack __ movq(kScratchRegister, Operand(rbp, 0)); // Load the number of arguments and setup pointer to the arguments. __ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset)); __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); -#else // _WIN64 +#else // V8_TARGET_OS_WIN // Load the number of arguments and setup pointer to the arguments. __ movq(rax, r8); __ movq(rbx, r9); -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN // Current stack contents: // [rsp + 2 * kSystemPointerSize ... ] : Internal frame @@ -851,10 +852,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // TODO(juliana): if we remove the code below then we don't need all // the parameters. -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { - +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store the optimized code in the closure. __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code); @@ -895,104 +897,71 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { // ----------- S t a t e ------------- // -- rdx : new target (preserved for callee if needed, and caller) // -- rdi : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, rdx, rdi, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; - - Register closure = rdi; - Register optimized_code_entry = scratch1; - Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg; - - __ LoadAnyTaggedField( - optimized_code_entry, - FieldOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset), - decompr_scratch); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ SmiCompare(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone)); - __ j(equal, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ SmiCompare(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); - __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } + DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ SmiCompare(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); } +} - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, &fallthrough); +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch1, Register scratch2) { + // ----------- S t a t e ------------- + // -- rdx : new target (preserved for callee if needed, and caller) + // -- rdi : target function (preserved for callee if needed, and caller) + // ----------------------------------- - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadTaggedPointerField( - scratch2, - FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ testl( - FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); - __ j(not_zero, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - __ Move(rcx, optimized_code_entry); - __ JumpCodeObject(rcx); + Register closure = rdi; - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadTaggedPointerField( + scratch1, + FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset), + Immediate(1 << Code::kMarkedForDeoptimizationBit)); + __ j(not_zero, &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch1, scratch2); + static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); + __ Move(rcx, optimized_code_entry); + __ JumpCodeObject(rcx); - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); + // Optimized code slot contains deoptimized code, evict it and re-enter the + // closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1019,20 +988,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide)); __ cmpb(bytecode, Immediate(0x3)); __ j(above, &process_bytecode, Label::kNear); + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here. incl has to happen before testb since it + // modifies the ZF flag. + __ incl(bytecode_offset); __ testb(bytecode, Immediate(0x1)); + __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ j(not_equal, &extra_wide, Label::kNear); - // Load the next bytecode and update table to the wide scaled table. - __ incl(bytecode_offset); - __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the wide scaled table. __ addq(bytecode_size_table, Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ jmp(&process_bytecode, Label::kNear); __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ incl(bytecode_offset); - __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the extra wide scaled table. __ addq(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -1101,7 +1071,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15); + + Register optimized_code_entry = rcx; + Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg; + + __ LoadAnyTaggedField( + optimized_code_entry, + FieldOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset), + decompr_scratch); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone)); + __ j(not_equal, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ incl( @@ -1137,28 +1123,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(rcx); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ movq(rax, rsp); __ subq(rax, rcx); __ cmpq(rax, RealStackLimitAsOperand(masm)); - __ j(above_equal, &ok, Label::kNear); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ j(below, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(rax, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ j(always, &loop_check, Label::kNear); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ Push(rax); + __ Push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ subq(rcx, Immediate(kSystemPointerSize)); @@ -1169,16 +1153,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // register, initialize it with incoming value which was passed in rdx. Label no_incoming_new_target_or_generator_register; __ movsxlq( - rax, + rcx, FieldOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); - __ testl(rax, rax); + __ testl(rcx, rcx); __ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear); - __ movq(Operand(rbp, rax, times_system_pointer_size, 0), rdx); + __ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1201,10 +1184,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Either return, or advance to the next bytecode and dispatch. Label do_return; @@ -1223,6 +1204,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); __ int3(); // Should not return. + + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ int3(); // Should not return. } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1425,10 +1425,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Get the target bytecode offset from the frame. - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Dispatch to the target bytecode. __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister, @@ -1443,10 +1441,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Load the current bytecode. __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister, @@ -1459,8 +1455,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { &if_return); // Convert new bytecode offset to a Smi and save in the stackframe. - __ SmiTag(rbx, kInterpreterBytecodeOffsetRegister); - __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx); + __ SmiTag(kInterpreterBytecodeOffsetRegister); + __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), + kInterpreterBytecodeOffsetRegister); Generate_InterpreterEnterBytecode(masm); @@ -1485,7 +1482,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { // Preserve argument count for later compare. __ movq(rcx, rax); // Push the number of arguments to the callee. - __ SmiTag(rax, rax); + __ SmiTag(rax); __ Push(rax); // Push a copy of the target function and the new target. __ Push(rdi); @@ -1522,7 +1519,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ Drop(2); __ Pop(rcx); - __ SmiUntag(rcx, rcx); + __ SmiUntag(rcx); scope.GenerateLeaveFrame(); __ PopReturnAddressTo(rbx); @@ -1536,7 +1533,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ Pop(rdx); __ Pop(rdi); __ Pop(rax); - __ SmiUntag(rax, rax); + __ SmiUntag(rax); } // On failure, tail call back to regular js by re-calling the function // which has be reset to the compile lazy builtin. @@ -1563,7 +1560,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, int code = config->GetAllocatableGeneralCode(i); __ popq(Register::from_code(code)); if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { - __ SmiUntag(Register::from_code(code), Register::from_code(code)); + __ SmiUntag(Register::from_code(code)); } } __ movq( @@ -2274,7 +2271,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // TODO(bmeurer): Inline the allocation here to avoid building the frame // in the fast case? (fall back to AllocateInNewSpace?) FrameScope scope(masm, StackFrame::INTERNAL); - __ SmiTag(rax, rax); + __ SmiTag(rax); __ Push(rax); __ Push(rdi); __ movq(rax, rcx); @@ -2285,7 +2282,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movq(rcx, rax); __ Pop(rdi); __ Pop(rax); - __ SmiUntag(rax, rax); + __ SmiUntag(rax); } __ LoadTaggedPointerField( rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); @@ -2601,14 +2598,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movq(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ Push(rax); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2647,7 +2638,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was pushed to the stack by the caller as int32. __ Pop(r11); // Convert to Smi for the runtime call. - __ SmiTag(r11, r11); + __ SmiTag(r11); { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2716,7 +2707,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If argv_mode == kArgvInRegister: // r15: pointer to the first argument -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the // stack to be aligned to 16 bytes. It only allows a single-word to be // returned in register rax. Larger return sizes must be written to an address @@ -2738,7 +2729,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, const Register kCCallArg3 = rcx; const int kArgExtraStackSpace = 0; const int kMaxRegisterResultSize = 2; -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN // Enter the exit frame that transitions from JavaScript to C++. int arg_stack_space = @@ -2809,7 +2800,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); Operand pending_exception_operand = masm->ExternalReferenceAsOperand(pending_exception_address); - __ cmpq(r14, pending_exception_operand); + __ cmp_tagged(r14, pending_exception_operand); __ j(equal, &okay, Label::kNear); __ int3(); __ bind(&okay); diff --git a/chromium/v8/src/codegen/OWNERS b/chromium/v8/src/codegen/OWNERS index feb2f62f787..64d2d7b97de 100644 --- a/chromium/v8/src/codegen/OWNERS +++ b/chromium/v8/src/codegen/OWNERS @@ -1,6 +1,6 @@ bbudge@chromium.org bmeurer@chromium.org -clemensh@chromium.org +clemensb@chromium.org gdeepti@chromium.org ishell@chromium.org jarin@chromium.org diff --git a/chromium/v8/src/codegen/arm/assembler-arm-inl.h b/chromium/v8/src/codegen/arm/assembler-arm-inl.h index 3fbd679104e..45ec07a3828 100644 --- a/chromium/v8/src/codegen/arm/assembler-arm-inl.h +++ b/chromium/v8/src/codegen/arm/assembler-arm-inl.h @@ -118,7 +118,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/arm/assembler-arm.cc b/chromium/v8/src/codegen/arm/assembler-arm.cc index 9c46063537d..6659960bb80 100644 --- a/chromium/v8/src/codegen/arm/assembler-arm.cc +++ b/chromium/v8/src/codegen/arm/assembler-arm.cc @@ -40,6 +40,7 @@ #include "src/base/bits.h" #include "src/base/cpu.h" +#include "src/base/overflowing-math.h" #include "src/codegen/arm/assembler-arm-inl.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler.h" @@ -452,8 +453,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle<HeapObject> object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); @@ -4802,15 +4803,17 @@ void Assembler::GrowBuffer() { int rc_delta = (new_start + new_size) - (buffer_start_ + old_size); size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); MemMove(new_start, buffer_start_, pc_offset()); - MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), - reloc_size); + byte* new_reloc_start = reinterpret_cast<byte*>( + reinterpret_cast<Address>(reloc_info_writer.pos()) + rc_delta); + MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size); // Switch buffers. buffer_ = std::move(new_buffer); buffer_start_ = new_start; - pc_ += pc_delta; - reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.last_pc() + pc_delta); + pc_ = reinterpret_cast<byte*>(reinterpret_cast<Address>(pc_) + pc_delta); + byte* new_last_pc = reinterpret_cast<byte*>( + reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta); + reloc_info_writer.Reposition(new_reloc_start, new_last_pc); // None of our relocation types are pc relative pointing outside the code // buffer nor pc absolute pointing inside the code buffer, so there is no need @@ -4831,7 +4834,7 @@ void Assembler::dd(uint32_t data) { // blocked before using dd. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - *reinterpret_cast<uint32_t*>(pc_) = data; + base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), data); pc_ += sizeof(uint32_t); } @@ -4840,7 +4843,7 @@ void Assembler::dq(uint64_t value) { // blocked before using dq. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - *reinterpret_cast<uint64_t*>(pc_) = value; + base::WriteUnalignedValue(reinterpret_cast<Address>(pc_), value); pc_ += sizeof(uint64_t); } diff --git a/chromium/v8/src/codegen/arm/assembler-arm.h b/chromium/v8/src/codegen/arm/assembler-arm.h index f669943f34e..1d280e5555b 100644 --- a/chromium/v8/src/codegen/arm/assembler-arm.h +++ b/chromium/v8/src/codegen/arm/assembler-arm.h @@ -41,6 +41,7 @@ #define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_ #include <stdio.h> +#include <memory> #include <vector> #include "src/codegen/arm/constants-arm.h" @@ -305,9 +306,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { explicit Assembler(const AssemblerOptions&, std::unique_ptr<AssemblerBuffer> = {}); - virtual ~Assembler(); + ~Assembler() override; - virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); } + void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); } // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc index 7f6d82518ec..6f1adfead26 100644 --- a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc @@ -573,7 +573,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, Condition cond) { DCHECK_LT(lsb, 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { - int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); + int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u); and_(dst, src1, Operand(mask), LeaveCC, cond); if (lsb != 0) { mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); @@ -1602,57 +1602,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - Move(r4, debug_hook_active); - ldrsb(r4, MemOperand(r4)); - cmp(r4, Operand(0)); - b(eq, &skip_hook); - - { - // Load receiver to pass it later to DebugOnFunctionCall hook. - if (actual.is_reg()) { - mov(r4, actual.reg()); - } else { - mov(r4, Operand(actual.immediate())); - } - ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2)); - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg()); - Push(actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Push(r4); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + // Load receiver to pass it later to DebugOnFunctionCall hook. + if (actual.is_reg()) { + ldr(r4, MemOperand(sp, actual.reg(), LSL, kPointerSizeLog2)); + } else { + ldr(r4, MemOperand(sp, actual.immediate() << kPointerSizeLog2)); + } + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(r4); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -1665,7 +1651,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target == r3); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + Move(r4, debug_hook_active); + ldrsb(r4, MemOperand(r4)); + cmp(r4, Operand(0)); + b(ne, &debug_hook); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -1687,11 +1682,17 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(code); } - - // Continue here if InvokePrologue does handle the invocation due to - // mismatched parameter counts. - bind(&done); } + b(&done); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + b(&continue_after_hook); + + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); } void MacroAssembler::InvokeFunction(Register fun, Register new_target, diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.h b/chromium/v8/src/codegen/arm/macro-assembler-arm.h index bbea40b9a62..4807a6d20da 100644 --- a/chromium/v8/src/codegen/arm/macro-assembler-arm.h +++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.h @@ -633,10 +633,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h index baae106c1c6..ce34da7dc2a 100644 --- a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -54,14 +54,12 @@ inline bool CPURegister::IsSP() const { } inline void CPURegList::Combine(const CPURegList& other) { - DCHECK(IsValid()); DCHECK(other.type() == type_); DCHECK(other.RegisterSizeInBits() == size_); list_ |= other.list(); } inline void CPURegList::Remove(const CPURegList& other) { - DCHECK(IsValid()); if (other.type() == type_) { list_ &= ~other.list(); } @@ -84,13 +82,12 @@ inline void CPURegList::Remove(const CPURegister& other1, } inline void CPURegList::Combine(int code) { - DCHECK(IsValid()); DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ |= (1ULL << code); + DCHECK(IsValid()); } inline void CPURegList::Remove(int code) { - DCHECK(IsValid()); DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ &= ~(1ULL << code); } @@ -311,6 +308,18 @@ Operand Operand::ToExtendedRegister() const { return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); } +Operand Operand::ToW() const { + if (IsShiftedRegister()) { + DCHECK(reg_.Is64Bits()); + return Operand(reg_.W(), shift(), shift_amount()); + } else if (IsExtendedRegister()) { + DCHECK(reg_.Is64Bits()); + return Operand(reg_.W(), extend(), shift_amount()); + } + DCHECK(IsImmediate()); + return *this; +} + Immediate Operand::immediate_for_heap_object_request() const { DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber && immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) || @@ -711,7 +720,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.cc b/chromium/v8/src/codegen/arm64/assembler-arm64.cc index c798d3a8a03..ea2f4696bdb 100644 --- a/chromium/v8/src/codegen/arm64/assembler-arm64.cc +++ b/chromium/v8/src/codegen/arm64/assembler-arm64.cc @@ -63,18 +63,16 @@ void CpuFeatures::PrintFeatures() {} // CPURegList utilities. CPURegister CPURegList::PopLowestIndex() { - DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } - int index = CountTrailingZeros(list_, kRegListSizeInBits); + int index = base::bits::CountTrailingZeros(list_); DCHECK((1LL << index) & list_); Remove(index); return CPURegister::Create(index, size_, type_); } CPURegister CPURegList::PopHighestIndex() { - DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } @@ -369,8 +367,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - Handle<HeapObject> object = isolate->factory()->NewHeapNumber( - request.heap_number(), AllocationType::kOld); + Handle<HeapObject> object = + isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); EmbeddedObjectIndex index = AddEmbeddedObject(object); set_embedded_object_index_referenced_from(pc, index); break; @@ -3967,19 +3966,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr, bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); } bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) { - bool offset_is_size_multiple = (((offset >> size) << size) == offset); + bool offset_is_size_multiple = + (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) == + offset); return offset_is_size_multiple && is_uint12(offset >> size); } bool Assembler::IsImmLSPair(int64_t offset, unsigned size) { - bool offset_is_size_multiple = (((offset >> size) << size) == offset); + bool offset_is_size_multiple = + (static_cast<int64_t>(static_cast<uint64_t>(offset >> size) << size) == + offset); return offset_is_size_multiple && is_int7(offset >> size); } bool Assembler::IsImmLLiteral(int64_t offset) { int inst_size = static_cast<int>(kInstrSizeLog2); bool offset_is_inst_multiple = - (((offset >> inst_size) << inst_size) == offset); + (static_cast<int64_t>(static_cast<uint64_t>(offset >> inst_size) + << inst_size) == offset); DCHECK_GT(offset, 0); offset >>= kLoadLiteralScaleLog2; return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width); @@ -4178,9 +4182,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n, // 1110ss 4 UInt(ss) // 11110s 2 UInt(s) // - // So we 'or' (-d << 1) with our computed s to form imms. + // So we 'or' (-d * 2) with our computed s to form imms. *n = out_n; - *imm_s = ((-d << 1) | (s - 1)) & 0x3F; + *imm_s = ((-d * 2) | (s - 1)) & 0x3F; *imm_r = r; return true; diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.h b/chromium/v8/src/codegen/arm64/assembler-arm64.h index 04ee6d8b750..23e8acb1f95 100644 --- a/chromium/v8/src/codegen/arm64/assembler-arm64.h +++ b/chromium/v8/src/codegen/arm64/assembler-arm64.h @@ -8,6 +8,7 @@ #include <deque> #include <list> #include <map> +#include <memory> #include <vector> #include "src/base/optional.h" @@ -105,6 +106,9 @@ class Operand { // which helps in the encoding of instructions that use the stack pointer. inline Operand ToExtendedRegister() const; + // Returns new Operand adapted for using with W registers. + inline Operand ToW() const; + inline Immediate immediate() const; inline int64_t ImmediateValue() const; inline RelocInfo::Mode ImmediateRMode() const; @@ -189,9 +193,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { explicit Assembler(const AssemblerOptions&, std::unique_ptr<AssemblerBuffer> = {}); - virtual ~Assembler(); + ~Assembler() override; - virtual void AbortedCodeGeneration(); + void AbortedCodeGeneration() override; // System functions --------------------------------------------------------- // Start generating code from the beginning of the buffer, discarding any code @@ -375,7 +379,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Instruction set functions ------------------------------------------------ // Branch / Jump instructions. - // For branches offsets are scaled, i.e. they in instrcutions not in bytes. + // For branches offsets are scaled, i.e. in instructions not in bytes. // Branch to register. void br(const Register& xn); diff --git a/chromium/v8/src/codegen/arm64/constants-arm64.h b/chromium/v8/src/codegen/arm64/constants-arm64.h index 914268644a6..ccafae5e14f 100644 --- a/chromium/v8/src/codegen/arm64/constants-arm64.h +++ b/chromium/v8/src/codegen/arm64/constants-arm64.h @@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; constexpr uint8_t kInstrSize = 4; constexpr uint8_t kInstrSizeLog2 = 2; constexpr uint8_t kLoadLiteralScaleLog2 = 2; +constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2; constexpr int kMaxLoadLiteralRange = 1 * MB; const int kNumberOfRegisters = 32; @@ -146,7 +147,8 @@ const unsigned kFloat16ExponentBias = 15; // Actual value of root register is offset from the root array's start // to take advantage of negative displacement values. // TODO(sigurds): Choose best value. -constexpr int kRootRegisterBias = 256; +// TODO(ishell): Choose best value for ptr-compr. +constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0; using float16 = uint16_t; diff --git a/chromium/v8/src/codegen/arm64/instructions-arm64.cc b/chromium/v8/src/codegen/arm64/instructions-arm64.cc index 05f3654da99..ab022affdd1 100644 --- a/chromium/v8/src/codegen/arm64/instructions-arm64.cc +++ b/chromium/v8/src/codegen/arm64/instructions-arm64.cc @@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate, unsigned int width) { DCHECK_LE(width, 64); rotate &= 63; + if (rotate == 0) return value; return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) | (value >> rotate); } @@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() { } else if (BranchType() != UnknownBranchType) { // All PC-relative branches. // Relative branch offsets are instruction-size-aligned. - offset = ImmBranch() << kInstrSizeLog2; + offset = ImmBranch() * kInstrSize; } else if (IsUnresolvedInternalReference()) { // Internal references are always word-aligned. - offset = ImmUnresolvedInternalReference() << kInstrSizeLog2; + offset = ImmUnresolvedInternalReference() * kInstrSize; } else { // Load literal (offset from PC). DCHECK(IsLdrLiteral()); // The offset is always shifted by 2 bits, even for loads to 64-bits // registers. - offset = ImmLLiteral() << kInstrSizeLog2; + offset = ImmLLiteral() * kInstrSize; } return offset; } diff --git a/chromium/v8/src/codegen/arm64/instructions-arm64.h b/chromium/v8/src/codegen/arm64/instructions-arm64.h index 1132ba39db2..7fe732e2baa 100644 --- a/chromium/v8/src/codegen/arm64/instructions-arm64.h +++ b/chromium/v8/src/codegen/arm64/instructions-arm64.h @@ -5,6 +5,7 @@ #ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_ #define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_ +#include "src/base/memory.h" #include "src/codegen/arm64/constants-arm64.h" #include "src/codegen/arm64/register-arm64.h" #include "src/codegen/arm64/utils-arm64.h" @@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister }; class Instruction { public: V8_INLINE Instr InstructionBits() const { - return *reinterpret_cast<const Instr*>(this); + // Usually this is aligned, but when de/serializing that's not guaranteed. + return base::ReadUnalignedValue<Instr>(reinterpret_cast<Address>(this)); } V8_INLINE void SetInstructionBits(Instr new_instr) { - *reinterpret_cast<Instr*>(this) = new_instr; + // Usually this is aligned, but when de/serializing that's not guaranteed. + base::WriteUnalignedValue(reinterpret_cast<Address>(this), new_instr); } int Bit(int pos) const { return (InstructionBits() >> pos) & 1; } @@ -96,7 +99,9 @@ class Instruction { } int32_t SignedBits(int msb, int lsb) const { - int32_t bits = *(reinterpret_cast<const int32_t*>(this)); + // Usually this is aligned, but when de/serializing that's not guaranteed. + int32_t bits = + base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(this)); return signed_bitextract_32(msb, lsb, bits); } @@ -125,7 +130,8 @@ class Instruction { // formed from ImmPCRelLo and ImmPCRelHi. int ImmPCRel() const { DCHECK(IsPCRelAddressing()); - int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); + int offset = (static_cast<uint32_t>(ImmPCRelHi()) << ImmPCRelLo_width) | + ImmPCRelLo(); int width = ImmPCRelLo_width + ImmPCRelHi_width; return signed_bitextract_32(width - 1, 0, offset); } @@ -404,7 +410,7 @@ class Instruction { void SetImmLLiteral(Instruction* source); uintptr_t LiteralAddress() { - int offset = ImmLLiteral() << kLoadLiteralScaleLog2; + int offset = ImmLLiteral() * kLoadLiteralScale; return reinterpret_cast<uintptr_t>(this) + offset; } diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 62bd9c26bfb..261fd1e564a 100644 --- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, } } +void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond) { + if (COMPRESS_POINTERS_BOOL) { + Ccmp(rn.W(), operand.ToW(), nzcv, cond); + } else { + Ccmp(rn, operand, nzcv, cond); + } +} + void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); @@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) { Subs(AppropriateZeroRegFor(rn), rn, operand); } +void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { + if (COMPRESS_POINTERS_BOOL) { + Cmp(rn.W(), operand.ToW()); + } else { + Cmp(rn, operand); + } +} + void TurboAssembler::Neg(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { AssertSmi(src); } DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); - Asr(dst, src, kSmiShift); + if (COMPRESS_POINTERS_BOOL) { + Asr(dst.W(), src.W(), kSmiShift); + Sxtw(dst, dst); + } else { + Asr(dst, src, kSmiShift); + } } void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { @@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } else { DCHECK(SmiValuesAre31Bits()); -#ifdef V8_COMPRESS_POINTERS - Ldrsw(dst, src); -#else - Ldr(dst, src); -#endif + if (COMPRESS_POINTERS_BOOL) { + Ldr(dst.W(), src); + } else { + Ldr(dst, src); + } SmiUntag(dst); } } @@ -1029,13 +1051,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, } void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { - Cmp(x, y); - B(eq, dest); + CompareAndBranch(x, y, eq, dest); } void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { - Cmp(x, y); - B(lt, dest); + CompareAndBranch(x, y, lt, dest); } void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { @@ -1083,7 +1103,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); - const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); + const int shift = base::bits::CountTrailingZeros(unit_size); const Operand size(count, LSL, shift); if (size.IsZero()) { @@ -1136,7 +1156,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); - const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); + const int shift = base::bits::CountTrailingZeros(unit_size); const Operand size(count, LSL, shift); if (size.IsZero()) { @@ -1175,7 +1195,7 @@ void TurboAssembler::DropSlots(int64_t count) { void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } -void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, +void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) && ((cond == eq) || (cond == ne))) { @@ -1190,6 +1210,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, } } +void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, + const Operand& rhs, Condition cond, + Label* label) { + if (COMPRESS_POINTERS_BOOL) { + CompareAndBranch(lhs.W(), rhs.ToW(), cond, label); + } else { + CompareAndBranch(lhs, rhs, cond, label); + } +} + void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, const uint64_t bit_pattern, Label* label) { diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc index 0a721b06474..892458fe8bb 100644 --- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -295,7 +295,9 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) { Handle<HeapObject> x( reinterpret_cast<Address*>(operand.ImmediateValue())); - IndirectLoadConstant(rd, x); + // TODO(v8:9706): Fix-it! This load will always uncompress the value + // even when we are loading a compressed embedded object. + IndirectLoadConstant(rd.X(), x); return; } } @@ -650,7 +652,14 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, // The move was successful; nothing to do here. } else { // Pre-shift the immediate to the least-significant bits of the register. - int shift_low = CountTrailingZeros(imm, reg_size); + int shift_low; + if (reg_size == 64) { + shift_low = base::bits::CountTrailingZeros(imm); + } else { + DCHECK_EQ(reg_size, 32); + shift_low = base::bits::CountTrailingZeros(static_cast<uint32_t>(imm)); + } + if (mode == kLimitShiftForSP) { // When applied to the stack pointer, the subsequent arithmetic operation // can use the extend form to shift left by a maximum of four bits. Right @@ -1456,15 +1465,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void MacroAssembler::LoadObject(Register result, Handle<Object> object) { - AllowDeferredHandleDereference heap_object_check; - if (object->IsHeapObject()) { - Mov(result, Handle<HeapObject>::cast(object)); - } else { - Mov(result, Operand(Smi::cast(*object))); - } -} - void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); } void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, @@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) { } void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { - STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. -#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 0); - Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift); -#else - STATIC_ASSERT(kSmiShiftSize == 31); - Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); -#endif - Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset()); - Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); + if (SmiValuesAre32Bits()) { + Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); + Add(builtin_index, builtin_index, + IsolateData::builtin_entry_table_offset()); + Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); + } else { + DCHECK(SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + Add(builtin_index, kRootRegister, + Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift)); + } else { + Add(builtin_index, kRootRegister, + Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift)); + } + Ldr(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); + } } void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { @@ -2207,43 +2211,34 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Bind(®ular_invoke); } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); - Ldrsb(x4, MemOperand(x4)); - Cbz(x4, &skip_hook); - - { - // Load receiver to pass it later to DebugOnFunctionCall hook. - Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) - : Operand(actual.reg()); - Mov(x4, actual_op); - Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2)); - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + // Load receiver to pass it later to DebugOnFunctionCall hook. + if (actual.is_reg()) { + Ldr(x4, MemOperand(sp, actual.reg(), LSL, kSystemPointerSizeLog2)); + } else { + Ldr(x4, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2)); + } + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - Register expected_reg = padreg; - Register actual_reg = padreg; - if (expected.is_reg()) expected_reg = expected.reg(); - if (actual.is_reg()) actual_reg = actual.reg(); - if (!new_target.is_valid()) new_target = padreg; + Register expected_reg = padreg; + Register actual_reg = padreg; + if (expected.is_reg()) expected_reg = expected.reg(); + if (actual.is_reg()) actual_reg = actual.reg(); + if (!new_target.is_valid()) new_target = padreg; - // Save values on stack. - SmiTag(expected_reg); - SmiTag(actual_reg); - Push(expected_reg, actual_reg, new_target, fun); - Push(fun, x4); - CallRuntime(Runtime::kDebugOnFunctionCall); + // Save values on stack. + SmiTag(expected_reg); + SmiTag(actual_reg); + Push(expected_reg, actual_reg, new_target, fun); + Push(fun, x4); + CallRuntime(Runtime::kDebugOnFunctionCall); - // Restore values from stack. - Pop(fun, new_target, actual_reg, expected_reg); - SmiUntag(actual_reg); - SmiUntag(expected_reg); - } - Bind(&skip_hook); + // Restore values from stack. + Pop(fun, new_target, actual_reg, expected_reg); + SmiUntag(actual_reg); + SmiUntag(expected_reg); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -2256,7 +2251,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3)); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); + Ldrsb(x4, MemOperand(x4)); + Cbnz(x4, &debug_hook); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -2284,6 +2285,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, JumpCodeObject(code); } } + B(&done); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + B(&continue_after_hook); // Continue here if InvokePrologue does handle the invocation due to // mismatched parameter counts. @@ -2636,7 +2643,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) { Register temp = temps.AcquireX(); DCHECK(!AreAliased(obj, temp)); LoadRoot(temp, index); - Cmp(obj, temp); + CmpTagged(obj, temp); } void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index, @@ -2669,20 +2676,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value, void TurboAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressTaggedPointer(destination, field_operand); -#else - Ldr(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedPointer(destination, field_operand); + } else { + Ldr(destination, field_operand); + } } void TurboAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressAnyTagged(destination, field_operand); -#else - Ldr(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressAnyTagged(destination, field_operand); + } else { + Ldr(destination, field_operand); + } } void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { @@ -2691,33 +2698,31 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { void TurboAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - Str(value.W(), dst_field_operand); - RecordComment("]"); -#else - Str(value, dst_field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + Str(value.W(), dst_field_operand); + } else { + Str(value, dst_field_operand); + } } void TurboAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressTaggedSigned"); - Ldrsw(destination, field_operand); + Ldr(destination.W(), field_operand); RecordComment("]"); } void TurboAssembler::DecompressTaggedSigned(const Register& destination, const Register& source) { RecordComment("[ DecompressTaggedSigned"); - Sxtw(destination, source); + Mov(destination.W(), source.W()); RecordComment("]"); } void TurboAssembler::DecompressTaggedPointer(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressTaggedPointer"); - Ldrsw(destination, field_operand); + Ldr(destination.W(), field_operand); Add(destination, kRootRegister, destination); RecordComment("]"); } @@ -2725,57 +2730,22 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination, void TurboAssembler::DecompressTaggedPointer(const Register& destination, const Register& source) { RecordComment("[ DecompressTaggedPointer"); - Add(destination, kRootRegister, Operand(source, SXTW)); + Add(destination, kRootRegister, Operand(source, UXTW)); RecordComment("]"); } void TurboAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressAnyTagged"); - Ldrsw(destination, field_operand); - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - UseScratchRegisterScope temps(this); - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); - Register masked_root = temps.AcquireX(); - // Sign extend tag bit to entire register. - Sbfx(masked_root, destination, 0, kSmiTagSize); - And(masked_root, masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is a - // smi or add the isolate root if it is a heap object. - Add(destination, masked_root, destination); - } else { - Label done; - JumpIfSmi(destination, &done); - Add(destination, kRootRegister, destination); - bind(&done); - } + Ldr(destination.W(), field_operand); + Add(destination, kRootRegister, destination); RecordComment("]"); } void TurboAssembler::DecompressAnyTagged(const Register& destination, const Register& source) { RecordComment("[ DecompressAnyTagged"); - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - UseScratchRegisterScope temps(this); - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); - Register masked_root = temps.AcquireX(); - // Sign extend tag bit to entire register. - Sbfx(masked_root, source, 0, kSmiTagSize); - And(masked_root, masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is a - // smi or add the isolate root if it is a heap object. - Add(destination, masked_root, Operand(source, SXTW)); - } else { - Label done; - Sxtw(destination, source); - JumpIfSmi(destination, &done); - Add(destination, kRootRegister, destination); - bind(&done); - } + Add(destination, kRootRegister, Operand(source, UXTW)); RecordComment("]"); } diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h index 94091e86248..cb3b51eb527 100644 --- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { const Operand& operand); inline void Blr(const Register& xn); inline void Cmp(const Register& rn, const Operand& operand); + inline void CmpTagged(const Register& rn, const Operand& operand); inline void Subs(const Register& rd, const Register& rn, const Operand& operand); void Csel(const Register& rd, const Register& rn, const Operand& operand, @@ -843,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met); + // Compare a register with an operand, and branch to label depending on the + // condition. May corrupt the status flags. + inline void CompareAndBranch(const Register& lhs, const Operand& rhs, + Condition cond, Label* label); + inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs, + Condition cond, Label* label); + // Test the bits of register defined by bit_pattern, and branch if ANY of // those bits are set. May corrupt the status flags. inline void TestAndBranchIfAnySet(const Register& reg, @@ -1006,6 +1014,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Conditional macros. inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond); + inline void CcmpTagged(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond); inline void Clz(const Register& rd, const Register& rn); @@ -1597,8 +1607,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { tbx(vd, vn, vn2, vn3, vn4, vm); } - void LoadObject(Register result, Handle<Object> object); - inline void PushSizeRegList( RegList registers, unsigned reg_size, CPURegister::RegisterType type = CPURegister::kRegister) { @@ -1643,11 +1651,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // be aligned to 16 bytes. void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset); - // Compare a register with an operand, and branch to label depending on the - // condition. May corrupt the status flags. - inline void CompareAndBranch(const Register& lhs, const Operand& rhs, - Condition cond, Label* label); - // Insert one or more instructions into the instruction stream that encode // some caller-defined data. The instructions used will be executable with no // side effects. @@ -1767,10 +1770,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& actual, Label* done, InvokeFlag flag, bool* definitely_mismatches); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); void InvokeFunctionCode(Register function, Register new_target, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); diff --git a/chromium/v8/src/codegen/arm64/register-arm64.h b/chromium/v8/src/codegen/arm64/register-arm64.h index 7b938579f40..2bdf0ceea03 100644 --- a/chromium/v8/src/codegen/arm64/register-arm64.h +++ b/chromium/v8/src/codegen/arm64/register-arm64.h @@ -105,7 +105,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> { enum RegisterType { kRegister, kVRegister, kNoRegister }; static constexpr CPURegister no_reg() { - return CPURegister{0, 0, kNoRegister}; + return CPURegister{kCode_no_reg, 0, kNoRegister}; } template <int code, int size, RegisterType type> @@ -595,18 +595,16 @@ class V8_EXPORT_PRIVATE CPURegList { } CPURegister::RegisterType type() const { - DCHECK(IsValid()); return type_; } RegList list() const { - DCHECK(IsValid()); return list_; } inline void set_list(RegList new_list) { - DCHECK(IsValid()); list_ = new_list; + DCHECK(IsValid()); } // Combine another CPURegList into this one. Registers that already exist in @@ -654,7 +652,6 @@ class V8_EXPORT_PRIVATE CPURegList { static CPURegList GetSafepointSavedRegisters(); bool IsEmpty() const { - DCHECK(IsValid()); return list_ == 0; } @@ -662,7 +659,6 @@ class V8_EXPORT_PRIVATE CPURegList { const CPURegister& other2 = NoCPUReg, const CPURegister& other3 = NoCPUReg, const CPURegister& other4 = NoCPUReg) const { - DCHECK(IsValid()); RegList list = 0; if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit(); if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit(); @@ -672,12 +668,10 @@ class V8_EXPORT_PRIVATE CPURegList { } int Count() const { - DCHECK(IsValid()); return CountSetBits(list_, kRegListSizeInBits); } int RegisterSizeInBits() const { - DCHECK(IsValid()); return size_; } @@ -688,7 +682,6 @@ class V8_EXPORT_PRIVATE CPURegList { } int TotalSizeInBytes() const { - DCHECK(IsValid()); return RegisterSizeInBytes() * Count(); } diff --git a/chromium/v8/src/codegen/arm64/utils-arm64.cc b/chromium/v8/src/codegen/arm64/utils-arm64.cc index 2f972ce5027..dba2eeb7e10 100644 --- a/chromium/v8/src/codegen/arm64/utils-arm64.cc +++ b/chromium/v8/src/codegen/arm64/utils-arm64.cc @@ -89,15 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) { } } -int CountTrailingZeros(uint64_t value, int width) { - DCHECK((width == 32) || (width == 64)); - if (width == 64) { - return static_cast<int>(base::bits::CountTrailingZeros64(value)); - } - return static_cast<int>(base::bits::CountTrailingZeros32( - static_cast<uint32_t>(value & 0xFFFFFFFFF))); -} - int CountSetBits(uint64_t value, int width) { DCHECK((width == 32) || (width == 64)); if (width == 64) { @@ -109,7 +100,7 @@ int CountSetBits(uint64_t value, int width) { int LowestSetBitPosition(uint64_t value) { DCHECK_NE(value, 0U); - return CountTrailingZeros(value, 64) + 1; + return base::bits::CountTrailingZeros(value) + 1; } int HighestSetBitPosition(uint64_t value) { @@ -118,12 +109,14 @@ int HighestSetBitPosition(uint64_t value) { } uint64_t LargestPowerOf2Divisor(uint64_t value) { - return value & (-(int64_t)value); + // Simulate two's complement (instead of casting to signed and negating) to + // avoid undefined behavior on signed overflow. + return value & ((~value) + 1); } int MaskToBit(uint64_t mask) { DCHECK_EQ(CountSetBits(mask, 64), 1); - return CountTrailingZeros(mask, 64); + return base::bits::CountTrailingZeros(mask); } #undef __ diff --git a/chromium/v8/src/codegen/arm64/utils-arm64.h b/chromium/v8/src/codegen/arm64/utils-arm64.h index 6bddce6fff2..182d781d55d 100644 --- a/chromium/v8/src/codegen/arm64/utils-arm64.h +++ b/chromium/v8/src/codegen/arm64/utils-arm64.h @@ -33,7 +33,6 @@ int float16classify(float16 value); // Bit counting. int CountLeadingZeros(uint64_t value, int width); int CountLeadingSignBits(int64_t value, int width); -V8_EXPORT_PRIVATE int CountTrailingZeros(uint64_t value, int width); V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width); int LowestSetBitPosition(uint64_t value); int HighestSetBitPosition(uint64_t value); @@ -61,7 +60,7 @@ T ReverseBytes(T value, int block_bytes_log2) { static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1}, {4, 5, 6, 7, 0, 1, 2, 3}, {0, 1, 2, 3, 4, 5, 6, 7}}; - T result = 0; + typename std::make_unsigned<T>::type result = 0; for (int i = 0; i < 8; i++) { result <<= 8; result |= bytes[permute_table[block_bytes_log2 - 1][i]]; diff --git a/chromium/v8/src/codegen/assembler.cc b/chromium/v8/src/codegen/assembler.cc index 498afb03206..4e354d9e54b 100644 --- a/chromium/v8/src/codegen/assembler.cc +++ b/chromium/v8/src/codegen/assembler.cc @@ -92,7 +92,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer { std::unique_ptr<AssemblerBuffer> Grow(int new_size) override { DCHECK_LT(size(), new_size); - return base::make_unique<DefaultAssemblerBuffer>(new_size); + return std::make_unique<DefaultAssemblerBuffer>(new_size); } private: @@ -121,12 +121,12 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* start, int size) { - return base::make_unique<ExternalAssemblerBufferImpl>( + return std::make_unique<ExternalAssemblerBufferImpl>( reinterpret_cast<byte*>(start), size); } std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) { - return base::make_unique<DefaultAssemblerBuffer>(size); + return std::make_unique<DefaultAssemblerBuffer>(size); } // ----------------------------------------------------------------------------- diff --git a/chromium/v8/src/codegen/assembler.h b/chromium/v8/src/codegen/assembler.h index 98639583d81..af70c4a48fb 100644 --- a/chromium/v8/src/codegen/assembler.h +++ b/chromium/v8/src/codegen/assembler.h @@ -36,6 +36,7 @@ #define V8_CODEGEN_ASSEMBLER_H_ #include <forward_list> +#include <memory> #include <unordered_map> #include "src/base/memory.h" diff --git a/chromium/v8/src/codegen/code-stub-assembler.cc b/chromium/v8/src/codegen/code-stub-assembler.cc index 7dad8cb95e0..3051ce3662c 100644 --- a/chromium/v8/src/codegen/code-stub-assembler.cc +++ b/chromium/v8/src/codegen/code-stub-assembler.cc @@ -7,9 +7,11 @@ #include "include/v8-internal.h" #include "src/base/macros.h" #include "src/codegen/code-factory.h" +#include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/execution/frames-inl.h" #include "src/execution/frames.h" +#include "src/execution/protectors.h" #include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop. #include "src/logging/counters.h" #include "src/objects/api-callbacks.h" @@ -17,6 +19,7 @@ #include "src/objects/descriptor-array.h" #include "src/objects/function-kind.h" #include "src/objects/heap-number.h" +#include "src/objects/js-generator.h" #include "src/objects/oddball.h" #include "src/objects/ordered-hash-table-inl.h" #include "src/objects/property-cell.h" @@ -26,10 +29,6 @@ namespace v8 { namespace internal { using compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; -template <class T> -using SloppyTNode = compiler::SloppyTNode<T>; CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) : compiler::CodeAssembler(state), @@ -135,6 +134,148 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node, Check(branch, message, file, line, extra_nodes); } +template <> +TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) { + return SmiTag(value); +} +template <> +TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>( + TNode<IntPtrT> value) { + return value; +} + +void CodeStubAssembler::CollectCallableFeedback( + TNode<Object> maybe_target, TNode<Context> context, + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) { + Label extra_checks(this, Label::kDeferred), done(this); + + // Check if we have monomorphic {target} feedback already. + TNode<MaybeObject> feedback = + LoadFeedbackVectorSlot(feedback_vector, slot_id); + Comment("check if monomorphic"); + TNode<BoolT> is_monomorphic = IsWeakReferenceToObject(feedback, maybe_target); + GotoIf(is_monomorphic, &done); + + // Check if it is a megamorphic {target}. + Comment("check if megamorphic"); + TNode<BoolT> is_megamorphic = TaggedEqual( + feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); + Branch(is_megamorphic, &done, &extra_checks); + + BIND(&extra_checks); + { + Label initialize(this), mark_megamorphic(this); + + Comment("check if weak reference"); + TNode<BoolT> is_uninitialized = TaggedEqual( + feedback, + HeapConstant(FeedbackVector::UninitializedSentinel(isolate()))); + GotoIf(is_uninitialized, &initialize); + CSA_ASSERT(this, IsWeakOrCleared(feedback)); + + // If the weak reference is cleared, we have a new chance to become + // monomorphic. + Comment("check if weak reference is cleared"); + Branch(IsCleared(feedback), &initialize, &mark_megamorphic); + + BIND(&initialize); + { + Comment("check if function in same native context"); + GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic); + TNode<HeapObject> target = CAST(maybe_target); + // Check if the {target} is a JSFunction or JSBoundFunction + // in the current native context. + TVARIABLE(HeapObject, var_current, target); + Label loop(this, &var_current), done_loop(this); + Goto(&loop); + BIND(&loop); + { + Label if_boundfunction(this), if_function(this); + TNode<HeapObject> current = var_current.value(); + TNode<Uint16T> current_instance_type = LoadInstanceType(current); + GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), + &if_boundfunction); + Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), + &if_function, &mark_megamorphic); + + BIND(&if_function); + { + // Check that the JSFunction {current} is in the current native + // context. + TNode<Context> current_context = + CAST(LoadObjectField(current, JSFunction::kContextOffset)); + TNode<NativeContext> current_native_context = + LoadNativeContext(current_context); + Branch( + TaggedEqual(LoadNativeContext(context), current_native_context), + &done_loop, &mark_megamorphic); + } + BIND(&if_boundfunction); + { + // Continue with the [[BoundTargetFunction]] of {target}. + var_current = LoadObjectField<HeapObject>( + current, JSBoundFunction::kBoundTargetFunctionOffset); + Goto(&loop); + } + } + BIND(&done_loop); + StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, target); + ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize"); + Goto(&done); + } + + BIND(&mark_megamorphic); + { + // MegamorphicSentinel is an immortal immovable object so + // write-barrier is not needed. + Comment("transition to megamorphic"); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); + StoreFeedbackVectorSlot( + feedback_vector, slot_id, + HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), + SKIP_WRITE_BARRIER); + ReportFeedbackUpdate(feedback_vector, slot_id, + "Call:TransitionMegamorphic"); + Goto(&done); + } + } + + BIND(&done); +} + +void CodeStubAssembler::CollectCallFeedback( + TNode<Object> maybe_target, TNode<Context> context, + TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot_id) { + Label feedback_done(this); + // If feedback_vector is not valid, then nothing to do. + GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done); + + // Increment the call count. + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); + IncrementCallCount(feedback_vector, slot_id); + + // Collect the callable {target} feedback. + CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id); + Goto(&feedback_done); + + BIND(&feedback_done); +} + +void CodeStubAssembler::IncrementCallCount( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) { + Comment("increment call count"); + TNode<Smi> call_count = + CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); + // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call + // count are used as flags. To increment the call count by 1 we hence + // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. + TNode<Smi> new_count = SmiAdd( + call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); + // Count is Smi, so we don't need a write barrier. + StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, + SKIP_WRITE_BARRIER, kTaggedSize); +} + void CodeStubAssembler::FastCheck(TNode<BoolT> condition) { Label ok(this), not_ok(this, Label::kDeferred); Branch(condition, &ok, ¬_ok); @@ -221,7 +362,7 @@ TNode<Object> CodeStubAssembler::NoContextConstant() { } #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ + TNode<std::remove_pointer<std::remove_reference<decltype( \ std::declval<Heap>().rootAccessorName())>::type>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ @@ -232,7 +373,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ + TNode<std::remove_pointer<std::remove_reference<decltype( \ std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ @@ -242,14 +383,12 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR -#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ - compiler::TNode<BoolT> CodeStubAssembler::Is##name( \ - SloppyTNode<Object> value) { \ - return TaggedEqual(value, name##Constant()); \ - } \ - compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \ - SloppyTNode<Object> value) { \ - return TaggedNotEqual(value, name##Constant()); \ +#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ + TNode<BoolT> CodeStubAssembler::Is##name(SloppyTNode<Object> value) { \ + return TaggedEqual(value, name##Constant()); \ + } \ + TNode<BoolT> CodeStubAssembler::IsNot##name(SloppyTNode<Object> value) { \ + return TaggedNotEqual(value, name##Constant()); \ } HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST @@ -264,6 +403,21 @@ TNode<BInt> CodeStubAssembler::BIntConstant(int value) { #endif } +template <> +TNode<Smi> CodeStubAssembler::IntPtrOrSmiConstant<Smi>(int value) { + return SmiConstant(value); +} + +template <> +TNode<IntPtrT> CodeStubAssembler::IntPtrOrSmiConstant<IntPtrT>(int value) { + return IntPtrConstant(value); +} + +template <> +TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) { + return ReinterpretCast<RawPtrT>(IntPtrConstant(value)); +} + Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiConstant(value); @@ -273,41 +427,29 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { } } -TNode<BoolT> CodeStubAssembler::IntPtrOrSmiEqual(Node* left, Node* right, - ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiEqual(CAST(left), CAST(right)); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return IntPtrEqual(UncheckedCast<IntPtrT>(left), - UncheckedCast<IntPtrT>(right)); +bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) { + Smi smi_test; + if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { + return true; } + return false; } -TNode<BoolT> CodeStubAssembler::IntPtrOrSmiNotEqual(Node* left, Node* right, - ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiNotEqual(CAST(left), CAST(right)); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordNotEqual(UncheckedCast<IntPtrT>(left), - UncheckedCast<IntPtrT>(right)); +bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) { + int32_t constant_test; + if (ToInt32Constant(test, &constant_test) && constant_test == 0) { + return true; } + return false; } bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode) { - int32_t constant_test; - Smi smi_test; if (mode == INTPTR_PARAMETERS) { - if (ToInt32Constant(test, &constant_test) && constant_test == 0) { - return true; - } + return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test)); } else { DCHECK_EQ(mode, SMI_PARAMETERS); - if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { - return true; - } + return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test)); } return false; } @@ -352,6 +494,10 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) { } TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) { + intptr_t constant; + if (ToIntPtrConstant(value, &constant)) { + return BoolConstant(base::bits::IsPowerOfTwo(constant)); + } // value && !(value & (value - 1)) return IntPtrEqual( Select<IntPtrT>( @@ -578,21 +724,44 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) { TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) { if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) { // Check that the Smi value is zero in the lower bits. - TNode<IntPtrT> value = BitcastTaggedSignedToWord(smi); + TNode<IntPtrT> value = BitcastTaggedToWordForTagAndSmiBits(smi); return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value)); } return Int32TrueConstant(); } -Node* CodeStubAssembler::SmiShiftBitsConstant() { - return IntPtrConstant(kSmiShiftSize + kSmiTagSize); +TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) { + if (COMPRESS_POINTERS_BOOL) { + return WordEqual( + BitcastTaggedToWordForTagAndSmiBits(smi), + BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi))); + } + return Int32TrueConstant(); +} + +TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) { + if (COMPRESS_POINTERS_BOOL) { + TNode<Int32T> raw = + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index)); + smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)); + } + return smi_index; } TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) { - TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value); - TNode<Smi> smi = - BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant())); - return smi; + if (COMPRESS_POINTERS_BOOL) { + static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1), + "Use shifting instead of add"); + return BitcastWordToTaggedSigned( + ChangeUint32ToWord(Int32Add(value, value))); + } + return SmiTag(ChangeInt32ToIntPtr(value)); +} + +TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) { + CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value), + IntPtrConstant(Smi::kMaxValue))); + return SmiFromInt32(Signed(value)); } TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) { @@ -612,6 +781,9 @@ TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) { if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) { return SmiConstant(constant_value); } + if (COMPRESS_POINTERS_BOOL) { + return SmiFromInt32(TruncateIntPtrToInt32(value)); + } TNode<Smi> smi = BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); return smi; @@ -622,11 +794,19 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) { if (ToIntPtrConstant(value, &constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } - return Signed( - WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant())); + if (COMPRESS_POINTERS_BOOL) { + return ChangeInt32ToIntPtr(SmiToInt32(value)); + } + return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value), + SmiShiftBitsConstant())); } TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) { + if (COMPRESS_POINTERS_BOOL) { + return Signed(Word32Sar( + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), + SmiShiftBitsConstant32())); + } TNode<IntPtrT> result = SmiUntag(value); return TruncateIntPtrToInt32(result); } @@ -673,13 +853,13 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { return BitcastWordToTaggedSigned( - TryIntPtrAdd(BitcastTaggedSignedToWord(lhs), - BitcastTaggedSignedToWord(rhs), if_overflow)); + TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs), + BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow)); } else { DCHECK(SmiValuesAre31Bits()); TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow( - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); TNode<BoolT> overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode<Int32T> result = Projection<0>(pair); @@ -690,8 +870,9 @@ TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs, TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { - TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow( - BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs)); + TNode<PairT<IntPtrT, BoolT>> pair = + IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs), + BitcastTaggedToWordForTagAndSmiBits(rhs)); TNode<BoolT> overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode<IntPtrT> result = Projection<0>(pair); @@ -699,8 +880,8 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs, } else { DCHECK(SmiValuesAre31Bits()); TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow( - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); TNode<BoolT> overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode<Int32T> result = Projection<0>(pair); @@ -878,7 +1059,7 @@ TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) { } BIND(&answer_zero); { - TNode<Word32T> or_result = Word32Or(lhs32, rhs32); + TNode<Int32T> or_result = Word32Or(lhs32, rhs32); Label if_should_be_negative_zero(this), if_should_be_zero(this); Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, &if_should_be_zero); @@ -982,41 +1163,27 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32( return ReinterpretCast<Int32T>(value); } -TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) { - STATIC_ASSERT(kSmiTagMask < kMaxUInt32); - return Word32Equal(Word32And(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), - Int32Constant(kSmiTagMask)), - Int32Constant(0)); -} - TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) { STATIC_ASSERT(kSmiTagMask < kMaxUInt32); return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(a)), + Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), Int32Constant(kSmiTagMask)), Int32Constant(0)); } -TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) { - // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we - // can nonetheless use it to inspect the Smi tag. The assumption here is that - // the GC will not exchange Smis for HeapObjects or vice-versa. - TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a)); - STATIC_ASSERT(kSmiTagMask < kMaxUInt32); - return Word32NotEqual( - Word32And(TruncateIntPtrToInt32(a_bitcast), Int32Constant(kSmiTagMask)), - Int32Constant(0)); +TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) { + return Word32BinaryNot(TaggedIsSmi(a)); } TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) { #if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) return Word32Equal( Word32And( - TruncateIntPtrToInt32(BitcastTaggedToWord(a)), - Uint32Constant(kSmiTagMask | static_cast<int32_t>(kSmiSignMask))), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + Uint32Constant(static_cast<uint32_t>(kSmiTagMask | kSmiSignMask))), Int32Constant(0)); #else - return WordEqual(WordAnd(BitcastTaggedToWord(a), + return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a), IntPtrConstant(kSmiTagMask | kSmiSignMask)), IntPtrConstant(0)); #endif @@ -1052,55 +1219,6 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( INTPTR_PARAMETERS, if_hole); } -void CodeStubAssembler::BranchIfPrototypesHaveNoElements( - Node* receiver_map, Label* definitely_no_elements, - Label* possibly_elements) { - CSA_SLOW_ASSERT(this, IsMap(receiver_map)); - VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); - Label loop_body(this, &var_map); - TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant(); - TNode<NumberDictionary> empty_slow_element_dictionary = - EmptySlowElementDictionaryConstant(); - Goto(&loop_body); - - BIND(&loop_body); - { - Node* map = var_map.value(); - TNode<HeapObject> prototype = LoadMapPrototype(map); - GotoIf(IsNull(prototype), definitely_no_elements); - TNode<Map> prototype_map = LoadMap(prototype); - TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map); - - // Pessimistically assume elements if a Proxy, Special API Object, - // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this - // instance type check, it's not necessary to check for interceptors or - // access checks. - Label if_custom(this, Label::kDeferred), if_notcustom(this); - Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type), - &if_custom, &if_notcustom); - - BIND(&if_custom); - { - // For string JSPrimitiveWrapper wrappers we still support the checks as - // long as they wrap the empty string. - GotoIfNot( - InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), - possibly_elements); - Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype); - Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); - } - - BIND(&if_notcustom); - { - TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype)); - var_map.Bind(prototype_map); - GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body); - Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary), - &loop_body, possibly_elements); - } - } -} - void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object, Label* if_true, Label* if_false) { GotoIf(TaggedIsSmi(object), if_false); @@ -1118,19 +1236,6 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { #endif } -void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects( - Label* if_true) { - STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t)); - - TNode<ExternalReference> execution_mode_address = ExternalConstant( - ExternalReference::debug_execution_mode_address(isolate())); - TNode<Int32T> execution_mode = - UncheckedCast<Int32T>(Load(MachineType::Int32(), execution_mode_address)); - - GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)), - if_true); -} - TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes, AllocationFlags flags, TNode<RawPtrT> top_address, @@ -1557,7 +1662,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map, } TNode<HeapObject> CodeStubAssembler::LoadFastProperties( - SloppyTNode<JSObject> object) { + SloppyTNode<JSReceiver> object) { CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); return Select<HeapObject>( @@ -1566,7 +1671,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties( } TNode<HeapObject> CodeStubAssembler::LoadSlowProperties( - SloppyTNode<JSObject> object) { + SloppyTNode<JSReceiver> object) { CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object))); TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); return Select<HeapObject>( @@ -1862,18 +1967,8 @@ TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32( return LoadObjectField<Uint32T>(string, String::kLengthOffset); } -Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { - CSA_ASSERT(this, IsString(seq_string)); - CSA_ASSERT(this, - IsSequentialStringInstanceType(LoadInstanceType(seq_string))); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); - return IntPtrAdd( - BitcastTaggedToWord(seq_string), - IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); -} - -Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) { - CSA_ASSERT(this, IsJSPrimitiveWrapper(object)); +TNode<Object> CodeStubAssembler::LoadJSPrimitiveWrapperValue( + TNode<JSPrimitiveWrapper> object) { return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); } @@ -1887,15 +1982,9 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object, GotoIf(IsCleared(maybe_object), if_cleared); - GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32( - BitcastMaybeObjectToWord(maybe_object)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kHeapObjectTag)), - &inner_if_strong); + GotoIf(IsStrong(maybe_object), &inner_if_strong); - *extracted = - BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object), - IntPtrConstant(~kWeakHeapObjectMask))); + *extracted = GetHeapObjectAssumeWeak(maybe_object); Goto(if_weak); BIND(&inner_if_smi); @@ -1908,10 +1997,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object, } TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) { - return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kHeapObjectTag)); + return Word32Equal(Word32And(TruncateIntPtrToInt32( + BitcastTaggedToWordForTagAndSmiBits(value)), + Int32Constant(kHeapObjectTagMask)), + Int32Constant(kHeapObjectTag)); } TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong( @@ -1921,10 +2010,10 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong( } TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) { - return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kWeakHeapObjectTag)); + return Word32Equal(Word32And(TruncateIntPtrToInt32( + BitcastTaggedToWordForTagAndSmiBits(value)), + Int32Constant(kHeapObjectTagMask)), + Int32Constant(kWeakHeapObjectTag)); } TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) { @@ -1932,11 +2021,6 @@ TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) { Int32Constant(kClearedWeakHeapObjectLower32)); } -TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) { - return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kClearedWeakHeapObjectLower32)); -} - TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( TNode<MaybeObject> value) { CSA_ASSERT(this, IsWeakOrCleared(value)); @@ -1951,43 +2035,41 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( return GetHeapObjectAssumeWeak(value); } -TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object, - TNode<Object> value) { -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) - STATIC_ASSERT(kTaggedSize == kInt32Size); - return Word32Equal( - Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)), - Uint32Constant( - static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))), - TruncateWordToInt32(BitcastTaggedToWord(value))); -#else - return WordEqual(WordAnd(BitcastMaybeObjectToWord(object), - IntPtrConstant(~kWeakHeapObjectMask)), - BitcastTaggedToWord(value)); - -#endif -} - -TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object, - TNode<Object> value) { - return TaggedEqual(BitcastWordToTagged(BitcastMaybeObjectToWord(object)), - value); -} - -TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object, - TNode<Object> value) { -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) - return Word32NotEqual( - Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)), - Uint32Constant( - static_cast<uint32_t>(~kWeakHeapObjectMask & kMaxUInt32))), - TruncateWordToInt32(BitcastTaggedToWord(value))); -#else - return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object), - IntPtrConstant(~kWeakHeapObjectMask)), - BitcastTaggedToWord(value)); - -#endif +// This version generates +// (maybe_object & ~mask) == value +// It works for non-Smi |maybe_object| and for both Smi and HeapObject values +// but requires a big constant for ~mask. +TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject( + TNode<MaybeObject> maybe_object, TNode<Object> value) { + CSA_ASSERT(this, TaggedIsNotSmi(maybe_object)); + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal( + Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), + Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))), + TruncateWordToInt32(BitcastTaggedToWord(value))); + } else { + return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object), + IntPtrConstant(~kWeakHeapObjectMask)), + BitcastTaggedToWord(value)); + } +} + +// This version generates +// maybe_object == (heap_object | mask) +// It works for any |maybe_object| values and generates a better code because it +// uses a small constant for mask. +TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo( + TNode<MaybeObject> maybe_object, TNode<HeapObject> heap_object) { + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal( + TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), + Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)), + Int32Constant(kWeakHeapObjectMask))); + } else { + return WordEqual(BitcastMaybeObjectToWord(maybe_object), + WordOr(BitcastTaggedToWord(heap_object), + IntPtrConstant(kWeakHeapObjectMask))); + } } TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) { @@ -2123,16 +2205,27 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength( return Signed(DecodeWord<PropertyArray::LengthField>(value)); } -TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore( +TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr( TNode<JSTypedArray> typed_array) { - // Backing store = external_pointer + base_pointer. - Node* external_pointer = - LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset, - MachineType::Pointer()); - TNode<Object> base_pointer = - LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset); - return UncheckedCast<RawPtrT>( - IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer))); + // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer). + TNode<RawPtrT> external_pointer = LoadObjectField<RawPtrT>( + typed_array, JSTypedArray::kExternalPointerOffset); + + TNode<IntPtrT> base_pointer; + if (COMPRESS_POINTERS_BOOL) { + TNode<Int32T> compressed_base = + LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset); + // Zero-extend TaggedT to WordT according to current compression scheme + // so that the addition with |external_pointer| (which already contains + // compensated offset value) below will decompress the tagged value. + // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for + // details. + base_pointer = Signed(ChangeUint32ToWord(compressed_base)); + } else { + base_pointer = + LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset); + } + return RawPtrAdd(external_pointer, base_pointer); } TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( @@ -2267,8 +2360,7 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) { return var_result.value(); } -compiler::TNode<BigInt> -CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( +TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) { Label if_zero(this), done(this); if (Is64()) { @@ -2416,59 +2508,30 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( return var_result.value(); } -void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( - TNode<Context> context, TNode<JSTypedArray> typed_array, - TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) { - TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - case UINT16_ELEMENTS: - case INT16_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - SmiToInt32(CAST(value)), SMI_PARAMETERS); - break; - case UINT32_ELEMENTS: - case INT32_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - TruncateTaggedToWord32(context, value), SMI_PARAMETERS); - break; - case FLOAT32_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), - SMI_PARAMETERS); - break; - case FLOAT64_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); - break; - case BIGUINT64_ELEMENTS: - case BIGINT64_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - UncheckedCast<BigInt>(value), SMI_PARAMETERS); - break; - default: - UNREACHABLE(); - } -} - +template <typename TIndex> TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( - Node* object, Node* slot_index_node, int additional_offset, - ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, IsFeedbackVector(object)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode)); + TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot, + int additional_offset) { int32_t header_size = FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag; - TNode<IntPtrT> offset = ElementOffsetFromIndex( - slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size); + TNode<IntPtrT> offset = + ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size); CSA_SLOW_ASSERT( - this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)), + this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), FeedbackVector::kHeaderSize)); - return UncheckedCast<MaybeObject>( - Load(MachineType::AnyTagged(), object, offset)); + return Load<MaybeObject>(feedback_vector, offset); } +template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( + TNode<FeedbackVector> feedback_vector, TNode<Smi> slot, + int additional_offset); +template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( + TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot, + int additional_offset); +template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + int additional_offset); + template <typename Array> TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement( TNode<Array> object, int array_header_size, Node* index_node, @@ -2617,6 +2680,13 @@ TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( return UncheckedCast<Float64T>(Load(machine_type, base, offset)); } +TNode<BoolT> CodeStubAssembler::LoadContextHasExtensionField( + SloppyTNode<Context> context) { + TNode<IntPtrT> value = + LoadAndUntagObjectField(context, Context::kLengthOffset); + return IsSetWord<Context::HasExtensionField>(value); +} + TNode<Object> CodeStubAssembler::LoadContextElement( SloppyTNode<Context> context, int slot_index) { int offset = Context::SlotOffset(slot_index); @@ -2626,15 +2696,15 @@ TNode<Object> CodeStubAssembler::LoadContextElement( TNode<Object> CodeStubAssembler::LoadContextElement( SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) { - TNode<IntPtrT> offset = ElementOffsetFromIndex( - slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0)); + TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, + Context::SlotOffset(0)); return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset)); } TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context, TNode<Smi> slot_index) { - TNode<IntPtrT> offset = ElementOffsetFromIndex( - slot_index, PACKED_ELEMENTS, SMI_PARAMETERS, Context::SlotOffset(0)); + TNode<IntPtrT> offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, + Context::SlotOffset(0)); return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset)); } @@ -2949,33 +3019,30 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement( StoreNoWriteBarrier(rep, object, offset, value_silenced); } -void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object, - Node* slot_index_node, - Node* value, - WriteBarrierMode barrier_mode, - int additional_offset, - ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, IsFeedbackVector(object)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode)); +void CodeStubAssembler::StoreFeedbackVectorSlot( + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<AnyTaggedT> value, WriteBarrierMode barrier_mode, + int additional_offset) { DCHECK(IsAligned(additional_offset, kTaggedSize)); DCHECK(barrier_mode == SKIP_WRITE_BARRIER || barrier_mode == UNSAFE_SKIP_WRITE_BARRIER || barrier_mode == UPDATE_WRITE_BARRIER); int header_size = FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag; - TNode<IntPtrT> offset = ElementOffsetFromIndex( - slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size); - // Check that slot_index_node <= object.length. + TNode<IntPtrT> offset = + ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size); + // Check that slot <= feedback_vector.length. CSA_ASSERT(this, - IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)), + IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), FeedbackVector::kHeaderSize)); if (barrier_mode == SKIP_WRITE_BARRIER) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value); + StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset, + value); } else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) { - UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, - value); + UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, + offset, value); } else { - Store(object, offset, value); + Store(feedback_vector, offset, value); } } @@ -3045,33 +3112,29 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Label success(this); TVARIABLE(Smi, var_tagged_length); ParameterMode mode = OptimalParameterMode(); - VARIABLE(var_length, OptimalParameterRepresentation(), - TaggedToParameter(LoadFastJSArrayLength(array), mode)); - VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); + TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); + TVARIABLE(FixedArrayBase, var_elements, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. TNode<IntPtrT> first = arg_index->value(); - Node* growth = IntPtrToParameter( - IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)), - first), - mode); + TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first)); PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), &var_elements, growth, &pre_bailout); // Push each argument onto the end of the array now that there is enough // capacity. CodeStubAssembler::VariableList push_vars({&var_length}, zone()); - Node* elements = var_elements.value(); + TNode<FixedArrayBase> elements = var_elements.value(); args->ForEach( push_vars, - [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) { + [&](TNode<Object> arg) { TryStoreArrayElement(kind, mode, &pre_bailout, elements, var_length.value(), arg); - Increment(&var_length, 1, mode); + Increment(&var_length); }, - first, nullptr); + first); { - TNode<Smi> length = ParameterToTagged(var_length.value(), mode); + TNode<Smi> length = BIntToSmi(var_length.value()); var_tagged_length = length; StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); Goto(&success); @@ -3111,8 +3174,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, CSA_SLOW_ASSERT(this, IsJSArray(array)); Comment("BuildAppendJSArray: ", ElementsKindToString(kind)); ParameterMode mode = OptimalParameterMode(); - VARIABLE(var_length, OptimalParameterRepresentation(), - TaggedToParameter(LoadFastJSArrayLength(array), mode)); + TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. @@ -3124,9 +3186,9 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, // capacity. TryStoreArrayElement(kind, mode, bailout, var_elements.value(), var_length.value(), value); - Increment(&var_length, 1, mode); + Increment(&var_length); - TNode<Smi> length = ParameterToTagged(var_length.value(), mode); + TNode<Smi> length = BIntToSmi(var_length.value()); StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); } @@ -3138,7 +3200,7 @@ Node* CodeStubAssembler::AllocateCellWithValue(Node* value, return result; } -Node* CodeStubAssembler::LoadCellValue(Node* cell) { +TNode<Object> CodeStubAssembler::LoadCellValue(Node* cell) { CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE)); return LoadObjectField(cell, Cell::kValueOffset); } @@ -3278,7 +3340,8 @@ TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length, TNode<IntPtrT> raw_size = GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS, ByteArray::kHeaderSize + kObjectAlignmentMask); - TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode<IntPtrT> size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3352,7 +3415,8 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString( TNode<IntPtrT> raw_size = GetArrayAllocationSize( Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); - TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode<IntPtrT> size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3423,7 +3487,8 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString( TNode<IntPtrT> raw_size = GetArrayAllocationSize( Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); - TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode<IntPtrT> size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3496,35 +3561,6 @@ TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString( offset); } -TNode<String> CodeStubAssembler::AllocateConsString(TNode<Uint32T> length, - TNode<String> left, - TNode<String> right) { - // Added string can be a cons string. - Comment("Allocating ConsString"); - TNode<Int32T> left_instance_type = LoadInstanceType(left); - TNode<Int32T> right_instance_type = LoadInstanceType(right); - - // Determine the resulting ConsString map to use depending on whether - // any of {left} or {right} has two byte encoding. - STATIC_ASSERT(kOneByteStringTag != 0); - STATIC_ASSERT(kTwoByteStringTag == 0); - TNode<Int32T> combined_instance_type = - Word32And(left_instance_type, right_instance_type); - TNode<Map> result_map = CAST(Select<Object>( - IsSetWord32(combined_instance_type, kStringEncodingMask), - [=] { return ConsOneByteStringMapConstant(); }, - [=] { return ConsStringMapConstant(); })); - TNode<HeapObject> result = AllocateInNewSpace(ConsString::kSize); - StoreMapNoWriteBarrier(result, result_map); - StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, - MachineRepresentation::kWord32); - StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset, - Int32Constant(String::kEmptyHashField), - MachineRepresentation::kWord32); - StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left); - StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right); - return CAST(result); -} TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary( int at_least_space_for) { @@ -3762,106 +3798,26 @@ template V8_EXPORT_PRIVATE TNode<SmallOrderedHashSet> CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>( TNode<IntPtrT> capacity); -template <typename CollectionType> -void CodeStubAssembler::FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found) { - // Get the index of the bucket. - TNode<IntPtrT> const number_of_buckets = - SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), CollectionType::NumberOfBucketsIndex()))); - TNode<WordT> const bucket = - WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); - TNode<IntPtrT> const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), bucket, - CollectionType::HashTableStartIndex() * kTaggedSize))); - - // Walk the bucket chain. - TNode<IntPtrT> entry_start; - Label if_key_found(this); - { - TVARIABLE(IntPtrT, var_entry, first_entry); - Label loop(this, {&var_entry, entry_start_position}), - continue_next_entry(this); - Goto(&loop); - BIND(&loop); - - // If the entry index is the not-found sentinel, we are done. - GotoIf(IntPtrEqual(var_entry.value(), - IntPtrConstant(CollectionType::kNotFound)), - not_found); - - // Make sure the entry index is within range. - CSA_ASSERT( - this, - UintPtrLessThan( - var_entry.value(), - SmiUntag(SmiAdd( - CAST(UnsafeLoadFixedArrayElement( - CAST(table), CollectionType::NumberOfElementsIndex())), - CAST(UnsafeLoadFixedArrayElement( - CAST(table), - CollectionType::NumberOfDeletedElementsIndex())))))); - - // Compute the index of the entry relative to kHashTableStartIndex. - entry_start = - IntPtrAdd(IntPtrMul(var_entry.value(), - IntPtrConstant(CollectionType::kEntrySize)), - number_of_buckets); - - // Load the key from the entry. - TNode<Object> const candidate_key = UnsafeLoadFixedArrayElement( - CAST(table), entry_start, - CollectionType::HashTableStartIndex() * kTaggedSize); - - key_compare(candidate_key, &if_key_found, &continue_next_entry); - - BIND(&continue_next_entry); - // Load the index of the next entry in the bucket chain. - var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), entry_start, - (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) * - kTaggedSize))); - - Goto(&loop); - } - - BIND(&if_key_found); - entry_start_position->Bind(entry_start); - Goto(entry_found); -} - -template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashMap>( - Node* table, Node* hash, - const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); -template void CodeStubAssembler::FindOrderedHashTableEntry<OrderedHashSet>( - Node* table, Node* hash, - const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); - Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) { Comment("AllocateStruct"); CSA_ASSERT(this, IsMap(map)); TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map)); TNode<HeapObject> object = Allocate(size, flags); StoreMapNoWriteBarrier(object, map); - InitializeStructBody(object, map, size, Struct::kHeaderSize); + InitializeStructBody(object, size, Struct::kHeaderSize); return object; } -void CodeStubAssembler::InitializeStructBody(Node* object, Node* map, - Node* size, int start_offset) { - CSA_SLOW_ASSERT(this, IsMap(map)); +void CodeStubAssembler::InitializeStructBody(TNode<HeapObject> object, + TNode<IntPtrT> size, + int start_offset) { Comment("InitializeStructBody"); TNode<Oddball> filler = UndefinedConstant(); // Calculate the untagged field addresses. - object = BitcastTaggedToWord(object); - TNode<WordT> start_address = - IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag)); - TNode<WordT> end_address = - IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag)); + TNode<IntPtrT> start_address = + IntPtrAdd(BitcastTaggedToWord(object), + IntPtrConstant(start_offset - kHeapObjectTag)); + TNode<IntPtrT> end_address = IntPtrAdd(start_address, size); StoreFieldsNoWriteBarrier(start_address, end_address, filler); } @@ -3883,8 +3839,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap( } void CodeStubAssembler::InitializeJSObjectFromMap( - Node* object, Node* map, Node* instance_size, Node* properties, - Node* elements, SlackTrackingMode slack_tracking_mode) { + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size, Node* properties, Node* elements, + SlackTrackingMode slack_tracking_mode) { CSA_SLOW_ASSERT(this, IsMap(map)); // This helper assumes that the object is in new-space, as guarded by the // check in AllocatedJSObjectFromMap. @@ -3915,7 +3872,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap( } void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( - Node* object, Node* map, Node* instance_size, int start_offset) { + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size, int start_offset) { STATIC_ASSERT(Map::kNoSlackTracking == 0); CSA_ASSERT( this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map))); @@ -3924,8 +3882,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( } void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( - Node* object, Node* map, Node* instance_size) { - CSA_SLOW_ASSERT(this, IsMap(map)); + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size) { Comment("InitializeJSObjectBodyNoSlackTracking"); // Perform in-object slack tracking if requested. @@ -3953,9 +3911,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( // The object still has in-object slack therefore the |unsed_or_unused| // field contain the "used" value. - TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord( + TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord( LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset, - MachineType::Uint8()))); + MachineType::Uint8())))); Comment("iInitialize filler fields"); InitializeFieldsWithRoot(object, used_size, instance_size, @@ -3984,19 +3942,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( BIND(&end); } -void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address, - Node* end_address, - Node* value) { +void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address, + TNode<IntPtrT> end_address, + TNode<Object> value) { Comment("StoreFieldsNoWriteBarrier"); CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize)); CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize)); - BuildFastLoop( + BuildFastLoop<IntPtrT>( start_address, end_address, - [this, value](Node* current) { + [=](TNode<IntPtrT> current) { UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current, value); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); } TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity( @@ -4008,12 +3966,12 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity( TNode<JSArray> CodeStubAssembler::AllocateJSArray( TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length, - Node* allocation_site, int array_header_size) { + TNode<AllocationSite> allocation_site, int array_header_size) { Comment("begin allocation of JSArray passing in elements"); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); int base_size = array_header_size; - if (allocation_site != nullptr) { + if (!allocation_site.is_null()) { base_size += AllocationMemento::kSize; } @@ -4027,8 +3985,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray( std::pair<TNode<JSArray>, TNode<FixedArrayBase>> CodeStubAssembler::AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode<Map> array_map, TNode<Smi> length, - Node* allocation_site, Node* capacity, ParameterMode capacity_mode, - AllocationFlags allocation_flags, int array_header_size) { + TNode<AllocationSite> allocation_site, Node* capacity, + ParameterMode capacity_mode, AllocationFlags allocation_flags, + int array_header_size) { Comment("begin allocation of JSArray with elements"); CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); @@ -4065,7 +4024,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( BIND(&nonempty); { int base_size = array_header_size; - if (allocation_site != nullptr) base_size += AllocationMemento::kSize; + if (!allocation_site.is_null()) { + base_size += AllocationMemento::kSize; + } const int elements_offset = base_size; @@ -4138,8 +4099,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( } TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray( - TNode<Map> array_map, TNode<Smi> length, Node* allocation_site, - TNode<IntPtrT> size_in_bytes) { + TNode<Map> array_map, TNode<Smi> length, + TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); // Allocate space for the JSArray and the elements FixedArray in one go. @@ -4150,7 +4111,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray( StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); - if (allocation_site != nullptr) { + if (!allocation_site.is_null()) { InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize), allocation_site); } @@ -4160,7 +4121,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray( TNode<JSArray> CodeStubAssembler::AllocateJSArray( ElementsKind kind, TNode<Map> array_map, Node* capacity, TNode<Smi> length, - Node* allocation_site, ParameterMode capacity_mode, + TNode<AllocationSite> allocation_site, ParameterMode capacity_mode, AllocationFlags allocation_flags) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode)); @@ -4189,10 +4150,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray( return array; } -Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array, - Node* begin, Node* count, - ParameterMode mode, Node* capacity, - Node* allocation_site) { +Node* CodeStubAssembler::ExtractFastJSArray( + TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count, + ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) { TNode<Map> original_array_map = LoadMap(array); TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map); @@ -4209,18 +4169,16 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array, return result; } -Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, - ParameterMode mode, - Node* allocation_site, - HoleConversionMode convert_holes) { +TNode<JSArray> CodeStubAssembler::CloneFastJSArray( + TNode<Context> context, TNode<JSArray> array, ParameterMode mode, + TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) { // TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this // function is also used to copy boilerplates even when the no-elements // protector is invalid. This function should be renamed to reflect its uses. - CSA_ASSERT(this, IsJSArray(array)); TNode<Number> length = LoadJSArrayLength(array); - Node* new_elements = nullptr; - VARIABLE(var_new_elements, MachineRepresentation::kTagged); + TNode<FixedArrayBase> new_elements; + TVARIABLE(FixedArrayBase, var_new_elements); TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array))); Label allocate_jsarray(this), holey_extract(this), @@ -4240,7 +4198,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, TaggedToParameter(CAST(length), mode), nullptr, ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode, nullptr, var_elements_kind.value()); - var_new_elements.Bind(new_elements); + var_new_elements = new_elements; Goto(&allocate_jsarray); if (need_conversion) { @@ -4257,7 +4215,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, LoadElements(array), IntPtrOrSmiConstant(0, mode), TaggedToParameter(CAST(length), mode), nullptr, ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted); - var_new_elements.Bind(new_elements); + var_new_elements = new_elements; // If the array type didn't change, use the original elements kind. GotoIfNot(var_holes_converted.value(), &allocate_jsarray); // Otherwise use PACKED_ELEMENTS for the target's elements kind. @@ -4283,8 +4241,8 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, TNode<Map> array_map = LoadJSArrayElementsMap(var_elements_kind.value(), native_context); - TNode<JSArray> result = AllocateJSArray( - array_map, CAST(var_new_elements.value()), CAST(length), allocation_site); + TNode<JSArray> result = AllocateJSArray(array_map, var_new_elements.value(), + CAST(length), allocation_site); return result; } @@ -4555,14 +4513,14 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode<IntPtrT> first_from_element_offset = ElementOffsetFromIndex(first, kind, mode, 0); - TNode<WordT> limit_offset = IntPtrAdd(first_from_element_offset, - IntPtrConstant(first_element_offset)); + TNode<IntPtrT> limit_offset = IntPtrAdd(first_from_element_offset, + IntPtrConstant(first_element_offset)); TVARIABLE(IntPtrT, var_from_offset, ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind, mode, first_element_offset)); Label decrement(this, {&var_from_offset}), done(this); - TNode<WordT> to_array_adjusted = + TNode<IntPtrT> to_array_adjusted = IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset); Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement); @@ -4908,12 +4866,10 @@ void CodeStubAssembler::MoveElements(ElementsKind kind, TNode<IntPtrT> elements_intptr = BitcastTaggedToWord(elements); TNode<IntPtrT> target_data_ptr = IntPtrAdd(elements_intptr, - ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS, - fa_base_data_offset)); + ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset)); TNode<IntPtrT> source_data_ptr = IntPtrAdd(elements_intptr, - ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS, - fa_base_data_offset)); + ElementOffsetFromIndex(src_index, kind, fa_base_data_offset)); TNode<ExternalReference> memmove = ExternalConstant(ExternalReference::libc_memmove_function()); CallCFunction(memmove, MachineType::Pointer(), @@ -4997,10 +4953,10 @@ void CodeStubAssembler::CopyElements(ElementsKind kind, IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind))); static const int32_t fa_base_data_offset = FixedArrayBase::kHeaderSize - kHeapObjectTag; - TNode<IntPtrT> src_offset_start = ElementOffsetFromIndex( - src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); - TNode<IntPtrT> dst_offset_start = ElementOffsetFromIndex( - dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); + TNode<IntPtrT> src_offset_start = + ElementOffsetFromIndex(src_index, kind, fa_base_data_offset); + TNode<IntPtrT> dst_offset_start = + ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset); TNode<IntPtrT> src_elements_intptr = BitcastTaggedToWord(src_elements); TNode<IntPtrT> source_data_ptr = IntPtrAdd(src_elements_intptr, src_offset_start); @@ -5283,65 +5239,6 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array, Comment("] CopyPropertyArrayValues"); } -void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, - TNode<IntPtrT> from_index, - TNode<IntPtrT> to_index, - TNode<IntPtrT> character_count, - String::Encoding from_encoding, - String::Encoding to_encoding) { - // Cannot assert IsString(from_string) and IsString(to_string) here because - // CSA::SubString can pass in faked sequential strings when handling external - // subject strings. - bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING; - bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING; - DCHECK_IMPLIES(to_one_byte, from_one_byte); - Comment("CopyStringCharacters ", - from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ", - to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING"); - - ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; - ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); - int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag; - TNode<IntPtrT> from_offset = ElementOffsetFromIndex( - from_index, from_kind, INTPTR_PARAMETERS, header_size); - TNode<IntPtrT> to_offset = - ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size); - TNode<IntPtrT> byte_count = - ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS); - TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count); - - // Prepare the fast loop - MachineType type = - from_one_byte ? MachineType::Uint8() : MachineType::Uint16(); - MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8 - : MachineRepresentation::kWord16; - int from_increment = 1 << ElementsKindToShiftSize(from_kind); - int to_increment = 1 << ElementsKindToShiftSize(to_kind); - - VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset); - VariableList vars({¤t_to_offset}, zone()); - int to_index_constant = 0, from_index_constant = 0; - bool index_same = (from_encoding == to_encoding) && - (from_index == to_index || - (ToInt32Constant(from_index, &from_index_constant) && - ToInt32Constant(to_index, &to_index_constant) && - from_index_constant == to_index_constant)); - BuildFastLoop( - vars, from_offset, limit_offset, - [this, from_string, to_string, ¤t_to_offset, to_increment, type, - rep, index_same](Node* offset) { - Node* value = Load(type, from_string, offset); - StoreNoWriteBarrier(rep, to_string, - index_same ? offset : current_to_offset.value(), - value); - if (!index_same) { - Increment(¤t_to_offset, to_increment); - } - }, - from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); -} - Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, Node* offset, ElementsKind from_kind, @@ -5381,9 +5278,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity, return IntPtrOrSmiAdd(new_capacity, padding, mode); } -Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, - ElementsKind kind, Node* key, - Label* bailout) { +TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity( + Node* object, Node* elements, ElementsKind kind, Node* key, + Label* bailout) { CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); CSA_SLOW_ASSERT(this, TaggedIsSmi(key)); @@ -5395,11 +5292,9 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, TaggedToParameter(capacity, mode), mode, bailout); } -Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, - ElementsKind kind, Node* key, - Node* capacity, - ParameterMode mode, - Label* bailout) { +TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity( + Node* object, Node* elements, ElementsKind kind, Node* key, Node* capacity, + ParameterMode mode, Label* bailout) { Comment("TryGrowElementsCapacity"); CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); @@ -5418,7 +5313,7 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, new_capacity, mode, bailout); } -Node* CodeStubAssembler::GrowElementsCapacity( +TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity( Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind, Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) { Comment("[ GrowElementsCapacity"); @@ -5471,45 +5366,22 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base, Comment("]"); } -Node* CodeStubAssembler::TryTaggedToFloat64(Node* value, - Label* if_valueisnotnumber) { - Label out(this); - VARIABLE(var_result, MachineRepresentation::kFloat64); - - // Check if the {value} is a Smi or a HeapObject. - Label if_valueissmi(this), if_valueisnotsmi(this); - Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi); - - BIND(&if_valueissmi); - { - // Convert the Smi {value}. - var_result.Bind(SmiToFloat64(value)); - Goto(&out); - } - - BIND(&if_valueisnotsmi); - { - // Check if {value} is a HeapNumber. - Label if_valueisheapnumber(this); - Branch(IsHeapNumber(value), &if_valueisheapnumber, if_valueisnotnumber); - - BIND(&if_valueisheapnumber); - { - // Load the floating point value. - var_result.Bind(LoadHeapNumberValue(value)); - Goto(&out); - } - } - BIND(&out); - return var_result.value(); +TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64( + TNode<Object> value, Label* if_valueisnotnumber) { + return Select<Float64T>( + TaggedIsSmi(value), [&]() { return SmiToFloat64(CAST(value)); }, + [&]() { + GotoIfNot(IsHeapNumber(CAST(value)), if_valueisnotnumber); + return LoadHeapNumberValue(CAST(value)); + }); } -Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { +TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64( + SloppyTNode<Context> context, SloppyTNode<Object> value) { // We might need to loop once due to ToNumber conversion. - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_result, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_value, value); + TVARIABLE(Float64T, var_result); Label loop(this, &var_value), done_loop(this, &var_result); - var_value.Bind(value); Goto(&loop); BIND(&loop); { @@ -5520,14 +5392,13 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { // Convert {value} to Float64 if it is a number and convert it to a number // otherwise. - Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber); - var_result.Bind(result); + var_result = TryTaggedToFloat64(value, &if_valueisnotnumber); Goto(&done_loop); BIND(&if_valueisnotnumber); { // Convert the {value} to a Number first. - var_value.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, value)); + var_value = CallBuiltin(Builtins::kNonNumberToNumber, context, value); Goto(&loop); } } @@ -5535,8 +5406,9 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { return var_result.value(); } -Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { - VARIABLE(var_result, MachineRepresentation::kWord32); +TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32( + SloppyTNode<Context> context, SloppyTNode<Object> value) { + TVARIABLE(Word32T, var_result); Label done(this); TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value, &done, &var_result); @@ -5546,38 +5418,33 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { // Truncate {value} to word32 and jump to {if_number} if it is a Number, // or find that it is a BigInt and jump to {if_bigint}. -void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value, - Label* if_number, - Variable* var_word32, - Label* if_bigint, - Variable* var_bigint) { +void CodeStubAssembler::TaggedToWord32OrBigInt( + TNode<Context> context, TNode<Object> value, Label* if_number, + TVariable<Word32T>* var_word32, Label* if_bigint, + TVariable<Object>* var_maybe_bigint) { TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>( - context, value, if_number, var_word32, if_bigint, var_bigint); + context, value, if_number, var_word32, if_bigint, var_maybe_bigint); } // Truncate {value} to word32 and jump to {if_number} if it is a Number, // or find that it is a BigInt and jump to {if_bigint}. In either case, // store the type feedback in {var_feedback}. void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback) { + TNode<Context> context, TNode<Object> value, Label* if_number, + TVariable<Word32T>* var_word32, Label* if_bigint, + TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) { TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumeric>( - context, value, if_number, var_word32, if_bigint, var_bigint, + context, value, if_number, var_word32, if_bigint, var_maybe_bigint, var_feedback); } template <Object::Conversion conversion> void CodeStubAssembler::TaggedToWord32OrBigIntImpl( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback) { - DCHECK(var_word32->rep() == MachineRepresentation::kWord32); - DCHECK(var_bigint == nullptr || - var_bigint->rep() == MachineRepresentation::kTagged); - DCHECK(var_feedback == nullptr || - var_feedback->rep() == MachineRepresentation::kTaggedSigned); - + TNode<Context> context, TNode<Object> value, Label* if_number, + TVariable<Word32T>* var_word32, Label* if_bigint, + TVariable<Object>* var_maybe_bigint, TVariable<Smi>* var_feedback) { // We might need to loop after conversion. - VARIABLE(var_value, MachineRepresentation::kTagged, value); + TVARIABLE(Object, var_value, value); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone); Variable* loop_vars[] = {&var_value, var_feedback}; int num_vars = @@ -5592,12 +5459,13 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( GotoIf(TaggedIsNotSmi(value), ¬_smi); // {value} is a Smi. - var_word32->Bind(SmiToInt32(value)); + *var_word32 = SmiToInt32(CAST(value)); CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall); Goto(if_number); BIND(¬_smi); - TNode<Map> map = LoadMap(value); + TNode<HeapObject> value_heap_object = CAST(value); + TNode<Map> map = LoadMap(value_heap_object); GotoIf(IsHeapNumberMap(map), &is_heap_number); TNode<Uint16T> instance_type = LoadMapInstanceType(map); if (conversion == Object::Conversion::kToNumeric) { @@ -5610,7 +5478,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( // We do not require an Or with earlier feedback here because once we // convert the value to a Numeric, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. - CSA_ASSERT(this, SmiEqual(CAST(var_feedback->value()), + CSA_ASSERT(this, SmiEqual(var_feedback->value(), SmiConstant(BinaryOperationFeedback::kNone))); } GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball); @@ -5618,25 +5486,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( auto builtin = conversion == Object::Conversion::kToNumeric ? Builtins::kNonNumberToNumeric : Builtins::kNonNumberToNumber; - var_value.Bind(CallBuiltin(builtin, context, value)); + var_value = CallBuiltin(builtin, context, value); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny); Goto(&loop); BIND(&is_oddball); - var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset)); + var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumberOrOddball); Goto(&loop); } BIND(&is_heap_number); - var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value))); + *var_word32 = TruncateHeapNumberValueToWord32(CAST(value)); CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber); Goto(if_number); if (conversion == Object::Conversion::kToNumeric) { BIND(&is_bigint); - var_bigint->Bind(value); + *var_maybe_bigint = value; CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt); Goto(if_bigint); } @@ -5650,14 +5518,14 @@ TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32( } void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number, - TVariable<Smi>& var_result_smi, + TVariable<Smi>* var_result_smi, Label* if_smi) { TNode<Float64T> value = LoadHeapNumberValue(number); TryFloat64ToSmi(value, var_result_smi, if_smi); } void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value, - TVariable<Smi>& var_result_smi, + TVariable<Smi>* var_result_smi, Label* if_smi) { TNode<Int32T> value32 = RoundFloat64ToInt32(value); TNode<Float64T> value64 = ChangeInt32ToFloat64(value32); @@ -5674,13 +5542,13 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value, BIND(&if_int32); { if (SmiValuesAre32Bits()) { - var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32)); + *var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32)); } else { DCHECK(SmiValuesAre31Bits()); TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32); TNode<BoolT> overflow = Projection<1>(pair); GotoIf(overflow, &if_heap_number); - var_result_smi = + *var_result_smi = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); } Goto(if_smi); @@ -5693,7 +5561,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged( Label if_smi(this), done(this); TVARIABLE(Smi, var_smi_result); TVARIABLE(Number, var_result); - TryFloat64ToSmi(value, var_smi_result, &if_smi); + TryFloat64ToSmi(value, &var_smi_result, &if_smi); var_result = AllocateHeapNumberWithValue(value); Goto(&done); @@ -6144,42 +6012,42 @@ TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) { } TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = NoElementsProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = ArrayIteratorProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); - TNode<Cell> cell = PromiseResolveProtectorConstant(); - TNode<Object> cell_value = LoadObjectField(cell, Cell::kValueOffset); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode<PropertyCell> cell = PromiseResolveProtectorConstant(); + TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsPromiseThenProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = PromiseThenProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = ArraySpeciesProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = TypedArraySpeciesProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); @@ -6190,12 +6058,12 @@ TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid( TNode<PropertyCell> cell = CAST(LoadContextElement( native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX)); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); return TaggedEqual(cell_value, invalid); } TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() { - TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid); TNode<PropertyCell> cell = PromiseSpeciesProtectorConstant(); TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); @@ -6394,6 +6262,10 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy( return IsJSGlobalProxyMap(LoadMap(object)); } +TNode<BoolT> CodeStubAssembler::IsJSGeneratorMap(TNode<Map> map) { + return InstanceTypeEqual(LoadMapInstanceType(map), JS_GENERATOR_OBJECT_TYPE); +} + TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType( SloppyTNode<Int32T> instance_type) { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); @@ -6428,6 +6300,11 @@ TNode<BoolT> CodeStubAssembler::IsJSStringIterator( return HasInstanceType(object, JS_STRING_ITERATOR_TYPE); } +TNode<BoolT> CodeStubAssembler::IsJSRegExpStringIterator( + SloppyTNode<HeapObject> object) { + return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE); +} + TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) { return IsMetaMap(LoadMap(map)); } @@ -6656,7 +6533,7 @@ TNode<BoolT> CodeStubAssembler::IsBigInt(SloppyTNode<HeapObject> object) { TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType( SloppyTNode<Int32T> instance_type) { return Int32LessThanOrEqual(instance_type, - Int32Constant(LAST_PRIMITIVE_TYPE)); + Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE)); } TNode<BoolT> CodeStubAssembler::IsPrivateSymbol( @@ -6716,8 +6593,7 @@ TNode<BoolT> CodeStubAssembler::IsNumberDictionary( return HasInstanceType(object, NUMBER_DICTIONARY_TYPE); } -TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject( - SloppyTNode<HeapObject> object) { +TNode<BoolT> CodeStubAssembler::IsJSGeneratorObject(TNode<HeapObject> object) { return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE); } @@ -6762,7 +6638,7 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) { } TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) { - return HasInstanceType(object, JS_REGEXP_TYPE); + return HasInstanceType(object, JS_REG_EXP_TYPE); } TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) { @@ -7011,189 +6887,6 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) { return CAST(var_result.value()); } -// A wrapper around CopyStringCharacters which determines the correct string -// encoding, allocates a corresponding sequential string, and then copies the -// given character range using CopyStringCharacters. -// |from_string| must be a sequential string. -// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length. -TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters( - Node* from, Node* from_instance_type, TNode<IntPtrT> from_index, - TNode<IntPtrT> character_count) { - Label end(this), one_byte_sequential(this), two_byte_sequential(this); - TVARIABLE(String, var_result); - - Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential, - &two_byte_sequential); - - // The subject string is a sequential one-byte string. - BIND(&one_byte_sequential); - { - TNode<String> result = AllocateSeqOneByteString( - Unsigned(TruncateIntPtrToInt32(character_count))); - CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - character_count, String::ONE_BYTE_ENCODING, - String::ONE_BYTE_ENCODING); - var_result = result; - Goto(&end); - } - - // The subject string is a sequential two-byte string. - BIND(&two_byte_sequential); - { - TNode<String> result = AllocateSeqTwoByteString( - Unsigned(TruncateIntPtrToInt32(character_count))); - CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - character_count, String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - var_result = result; - Goto(&end); - } - - BIND(&end); - return var_result.value(); -} - -TNode<String> CodeStubAssembler::SubString(TNode<String> string, - TNode<IntPtrT> from, - TNode<IntPtrT> to) { - TVARIABLE(String, var_result); - ToDirectStringAssembler to_direct(state(), string); - Label end(this), runtime(this); - - TNode<IntPtrT> const substr_length = IntPtrSub(to, from); - TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string); - - // Begin dispatching based on substring length. - - Label original_string_or_invalid_length(this); - GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length), - &original_string_or_invalid_length); - - // A real substring (substr_length < string_length). - Label empty(this); - GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty); - - Label single_char(this); - GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char); - - // Deal with different string types: update the index if necessary - // and extract the underlying string. - - TNode<String> direct_string = to_direct.TryToDirect(&runtime); - TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset()); - TNode<Int32T> const instance_type = to_direct.instance_type(); - - // The subject string can only be external or sequential string of either - // encoding at this point. - Label external_string(this); - { - if (FLAG_string_slices) { - Label next(this); - - // Short slice. Copy instead of slicing. - GotoIf(IntPtrLessThan(substr_length, - IntPtrConstant(SlicedString::kMinLength)), - &next); - - // Allocate new sliced string. - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Label one_byte_slice(this), two_byte_slice(this); - Branch(IsOneByteStringInstanceType(to_direct.instance_type()), - &one_byte_slice, &two_byte_slice); - - BIND(&one_byte_slice); - { - var_result = AllocateSlicedOneByteString( - Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, - SmiTag(offset)); - Goto(&end); - } - - BIND(&two_byte_slice); - { - var_result = AllocateSlicedTwoByteString( - Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, - SmiTag(offset)); - Goto(&end); - } - - BIND(&next); - } - - // The subject string can only be external or sequential string of either - // encoding at this point. - GotoIf(to_direct.is_external(), &external_string); - - var_result = AllocAndCopyStringCharacters(direct_string, instance_type, - offset, substr_length); - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Goto(&end); - } - - // Handle external string. - BIND(&external_string); - { - TNode<RawPtrT> const fake_sequential_string = - to_direct.PointerToString(&runtime); - - var_result = AllocAndCopyStringCharacters( - fake_sequential_string, instance_type, offset, substr_length); - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Goto(&end); - } - - BIND(&empty); - { - var_result = EmptyStringConstant(); - Goto(&end); - } - - // Substrings of length 1 are generated through CharCodeAt and FromCharCode. - BIND(&single_char); - { - TNode<Int32T> char_code = StringCharCodeAt(string, from); - var_result = StringFromSingleCharCode(char_code); - Goto(&end); - } - - BIND(&original_string_or_invalid_length); - { - CSA_ASSERT(this, IntPtrEqual(substr_length, string_length)); - - // Equal length - check if {from, to} == {0, str.length}. - GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime); - - // Return the original string (substr_length == string_length). - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - var_result = string; - Goto(&end); - } - - // Fall back to a runtime call. - BIND(&runtime); - { - var_result = - CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string, - SmiTag(from), SmiTag(to))); - Goto(&end); - } - - BIND(&end); - return var_result.value(); -} - ToDirectStringAssembler::ToDirectStringAssembler( compiler::CodeAssemblerState* state, TNode<String> string, Flags flags) : CodeStubAssembler(state), @@ -7204,8 +6897,7 @@ ToDirectStringAssembler::ToDirectStringAssembler( flags_(flags) {} TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) { - VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone()); - Label dispatch(this, vars); + Label dispatch(this, {&var_string_, &var_offset_, &var_instance_type_}); Label if_iscons(this); Label if_isexternal(this); Label if_issliced(this); @@ -7333,232 +7025,6 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential( return var_result.value(); } -void CodeStubAssembler::BranchIfCanDerefIndirectString( - TNode<String> string, TNode<Int32T> instance_type, Label* can_deref, - Label* cannot_deref) { - TNode<Int32T> representation = - Word32And(instance_type, Int32Constant(kStringRepresentationMask)); - GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref); - GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), - cannot_deref); - // Cons string. - TNode<String> rhs = - LoadObjectField<String>(string, ConsString::kSecondOffset); - GotoIf(IsEmptyString(rhs), can_deref); - Goto(cannot_deref); -} - -TNode<String> CodeStubAssembler::DerefIndirectString( - TNode<String> string, TNode<Int32T> instance_type, Label* cannot_deref) { - Label deref(this); - BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref); - BIND(&deref); - STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) == - static_cast<int>(ConsString::kFirstOffset)); - return LoadObjectField<String>(string, ThinString::kActualOffset); -} - -void CodeStubAssembler::DerefIndirectString(TVariable<String>* var_string, - TNode<Int32T> instance_type) { -#ifdef DEBUG - Label can_deref(this), cannot_deref(this); - BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref, - &cannot_deref); - BIND(&cannot_deref); - DebugBreak(); // Should be able to dereference string. - Goto(&can_deref); - BIND(&can_deref); -#endif // DEBUG - - STATIC_ASSERT(static_cast<int>(ThinString::kActualOffset) == - static_cast<int>(ConsString::kFirstOffset)); - *var_string = - LoadObjectField<String>(var_string->value(), ThinString::kActualOffset); -} - -void CodeStubAssembler::MaybeDerefIndirectString(TVariable<String>* var_string, - TNode<Int32T> instance_type, - Label* did_deref, - Label* cannot_deref) { - Label deref(this); - BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref, - cannot_deref); - - BIND(&deref); - { - DerefIndirectString(var_string, instance_type); - Goto(did_deref); - } -} - -void CodeStubAssembler::MaybeDerefIndirectStrings( - TVariable<String>* var_left, TNode<Int32T> left_instance_type, - TVariable<String>* var_right, TNode<Int32T> right_instance_type, - Label* did_something) { - Label did_nothing_left(this), did_something_left(this), - didnt_do_anything(this); - MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left, - &did_nothing_left); - - BIND(&did_something_left); - { - MaybeDerefIndirectString(var_right, right_instance_type, did_something, - did_something); - } - - BIND(&did_nothing_left); - { - MaybeDerefIndirectString(var_right, right_instance_type, did_something, - &didnt_do_anything); - } - - BIND(&didnt_do_anything); - // Fall through if neither string was an indirect string. -} - -TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left, - TNode<String> right) { - TVARIABLE(String, result); - Label check_right(this), runtime(this, Label::kDeferred), cons(this), - done(this, &result), done_native(this, &result); - Counters* counters = isolate()->counters(); - - TNode<Uint32T> left_length = LoadStringLengthAsWord32(left); - GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right); - result = right; - Goto(&done_native); - - BIND(&check_right); - TNode<Uint32T> right_length = LoadStringLengthAsWord32(right); - GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons); - result = left; - Goto(&done_native); - - BIND(&cons); - { - TNode<Uint32T> new_length = Uint32Add(left_length, right_length); - - // If new length is greater than String::kMaxLength, goto runtime to - // throw. Note: we also need to invalidate the string length protector, so - // can't just throw here directly. - GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)), - &runtime); - - TVARIABLE(String, var_left, left); - TVARIABLE(String, var_right, right); - Variable* input_vars[2] = {&var_left, &var_right}; - Label non_cons(this, 2, input_vars); - Label slow(this, Label::kDeferred); - GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)), - &non_cons); - - result = - AllocateConsString(new_length, var_left.value(), var_right.value()); - Goto(&done_native); - - BIND(&non_cons); - - Comment("Full string concatenate"); - TNode<Int32T> left_instance_type = LoadInstanceType(var_left.value()); - TNode<Int32T> right_instance_type = LoadInstanceType(var_right.value()); - // Compute intersection and difference of instance types. - - TNode<Int32T> ored_instance_types = - Word32Or(left_instance_type, right_instance_type); - TNode<Word32T> xored_instance_types = - Word32Xor(left_instance_type, right_instance_type); - - // Check if both strings have the same encoding and both are sequential. - GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime); - GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow); - - TNode<IntPtrT> word_left_length = Signed(ChangeUint32ToWord(left_length)); - TNode<IntPtrT> word_right_length = Signed(ChangeUint32ToWord(right_length)); - - Label two_byte(this); - GotoIf(Word32Equal(Word32And(ored_instance_types, - Int32Constant(kStringEncodingMask)), - Int32Constant(kTwoByteStringTag)), - &two_byte); - // One-byte sequential string case - result = AllocateSeqOneByteString(new_length); - CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), - IntPtrConstant(0), word_left_length, - String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); - CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), - word_left_length, word_right_length, - String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); - Goto(&done_native); - - BIND(&two_byte); - { - // Two-byte sequential string case - result = AllocateSeqTwoByteString(new_length); - CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), - IntPtrConstant(0), word_left_length, - String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), - word_left_length, word_right_length, - String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - Goto(&done_native); - } - - BIND(&slow); - { - // Try to unwrap indirect strings, restart the above attempt on success. - MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right, - right_instance_type, &non_cons); - Goto(&runtime); - } - } - BIND(&runtime); - { - result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right)); - Goto(&done); - } - - BIND(&done_native); - { - IncrementCounter(counters->string_add_native(), 1); - Goto(&done); - } - - BIND(&done); - return result.value(); -} - -TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint( - TNode<Int32T> codepoint) { - VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); - - Label if_isword16(this), if_isword32(this), return_result(this); - - Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16, - &if_isword32); - - BIND(&if_isword16); - { - var_result.Bind(StringFromSingleCharCode(codepoint)); - Goto(&return_result); - } - - BIND(&if_isword32); - { - TNode<String> value = AllocateSeqTwoByteString(2); - StoreNoWriteBarrier( - MachineRepresentation::kWord32, value, - IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), - codepoint); - var_result.Bind(value); - Goto(&return_result); - } - - BIND(&return_result); - return CAST(var_result.value()); -} - TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) { Label runtime(this, Label::kDeferred); Label end(this); @@ -7585,22 +7051,22 @@ TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) { return var_result.value(); } -TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) { +TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input, + Label* bailout) { TVARIABLE(String, result); TVARIABLE(Smi, smi_input); - Label runtime(this, Label::kDeferred), if_smi(this), if_heap_number(this), - done(this, &result); + Label if_smi(this), if_heap_number(this), done(this, &result); // Load the number string cache. TNode<FixedArray> number_string_cache = NumberStringCacheConstant(); // Make the hash mask from the length of the number string cache. It // contains two elements (number and string) for each cache entry. - // TODO(ishell): cleanup mask handling. - TNode<IntPtrT> mask = - BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache)); - TNode<IntPtrT> one = IntPtrConstant(1); - mask = IntPtrSub(mask, one); + TNode<IntPtrT> number_string_cache_length = + LoadAndUntagFixedArrayBaseLength(number_string_cache); + TNode<Int32T> one = Int32Constant(1); + TNode<Word32T> mask = Int32Sub( + Word32Shr(TruncateWordToInt32(number_string_cache_length), one), one); GotoIfNot(TaggedIsSmi(input), &if_heap_number); smi_input = CAST(input); @@ -7611,36 +7077,35 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) { Comment("NumberToString - HeapNumber"); TNode<HeapNumber> heap_number_input = CAST(input); // Try normalizing the HeapNumber. - TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi); + TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi); // Make a hash from the two 32-bit values of the double. TNode<Int32T> low = LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset); TNode<Int32T> high = LoadObjectField<Int32T>( heap_number_input, HeapNumber::kValueOffset + kIntSize); - TNode<Word32T> hash = Word32Xor(low, high); - TNode<IntPtrT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one); - TNode<WordT> index = - WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant())); + TNode<Word32T> hash = Word32And(Word32Xor(low, high), mask); + TNode<IntPtrT> entry_index = + Signed(ChangeUint32ToWord(Int32Add(hash, hash))); // Cache entry's key must be a heap number TNode<Object> number_key = - UnsafeLoadFixedArrayElement(number_string_cache, index); - GotoIf(TaggedIsSmi(number_key), &runtime); + UnsafeLoadFixedArrayElement(number_string_cache, entry_index); + GotoIf(TaggedIsSmi(number_key), bailout); TNode<HeapObject> number_key_heap_object = CAST(number_key); - GotoIfNot(IsHeapNumber(number_key_heap_object), &runtime); + GotoIfNot(IsHeapNumber(number_key_heap_object), bailout); // Cache entry's key must match the heap number value we're looking for. TNode<Int32T> low_compare = LoadObjectField<Int32T>( number_key_heap_object, HeapNumber::kValueOffset); TNode<Int32T> high_compare = LoadObjectField<Int32T>( number_key_heap_object, HeapNumber::kValueOffset + kIntSize); - GotoIfNot(Word32Equal(low, low_compare), &runtime); - GotoIfNot(Word32Equal(high, high_compare), &runtime); + GotoIfNot(Word32Equal(low, low_compare), bailout); + GotoIfNot(Word32Equal(high, high_compare), bailout); // Heap number match, return value from cache entry. - result = CAST( - UnsafeLoadFixedArrayElement(number_string_cache, index, kTaggedSize)); + result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, + kTaggedSize)); Goto(&done); } @@ -7648,17 +7113,28 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) { { Comment("NumberToString - Smi"); // Load the smi key, make sure it matches the smi we're looking for. - TNode<Object> smi_index = BitcastWordToTagged(WordAnd( - WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask)); + TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask); + TNode<IntPtrT> entry_index = + Signed(ChangeUint32ToWord(Int32Add(hash, hash))); TNode<Object> smi_key = UnsafeLoadFixedArrayElement( - number_string_cache, smi_index, 0, SMI_PARAMETERS); - GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &runtime); + number_string_cache, entry_index, 0, INTPTR_PARAMETERS); + GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout); // Smi match, return value from cache entry. - result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, smi_index, - kTaggedSize, SMI_PARAMETERS)); + result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, + kTaggedSize, INTPTR_PARAMETERS)); Goto(&done); } + BIND(&done); + return result.value(); +} + +TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) { + TVARIABLE(String, result); + Label runtime(this, Label::kDeferred), done(this, &result); + + result = NumberToString(input, &runtime); + Goto(&done); BIND(&runtime); { @@ -8290,102 +7766,129 @@ void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) { } } -void CodeStubAssembler::Increment(Variable* variable, int value, - ParameterMode mode) { - DCHECK_IMPLIES(mode == INTPTR_PARAMETERS, - variable->rep() == MachineType::PointerRepresentation()); - DCHECK_IMPLIES(mode == SMI_PARAMETERS, CanBeTaggedSigned(variable->rep())); - variable->Bind(IntPtrOrSmiAdd(variable->value(), - IntPtrOrSmiConstant(value, mode), mode)); +template <typename TIndex> +void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) { + *variable = + IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value)); } +// Instantiate Increment for Smi and IntPtrT. +// TODO(v8:9708): Consider renaming to [Smi|IntPtrT|RawPtrT]Increment. +template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable, + int value); +template void CodeStubAssembler::Increment<IntPtrT>( + TVariable<IntPtrT>* variable, int value); +template void CodeStubAssembler::Increment<RawPtrT>( + TVariable<RawPtrT>* variable, int value); + void CodeStubAssembler::Use(Label* label) { GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label); } -void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex, - Variable* var_index, Label* if_keyisunique, - Variable* var_unique, Label* if_bailout, +void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex, + TVariable<IntPtrT>* var_index, + Label* if_keyisunique, + TVariable<Name>* var_unique, + Label* if_bailout, Label* if_notinternalized) { - DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep()); - DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep()); Comment("TryToName"); - Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this), - if_keyisother(this, Label::kDeferred); + Label if_keyisnotindex(this); // Handle Smi and HeapNumber keys. - var_index->Bind(TryToIntptr(key, &if_keyisnotindex)); + *var_index = TryToIntptr(key, &if_keyisnotindex); Goto(if_keyisindex); BIND(&if_keyisnotindex); - TNode<Map> key_map = LoadMap(key); - var_unique->Bind(key); - // Symbols are unique. - GotoIf(IsSymbolMap(key_map), if_keyisunique); - TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map); - // Miss if |key| is not a String. - STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); - GotoIfNot(IsStringInstanceType(key_instance_type), &if_keyisother); - - // |key| is a String. Check if it has a cached array index. - TNode<Uint32T> hash = LoadNameHashField(key); - GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask), - &if_hascachedindex); - // No cached array index. If the string knows that it contains an index, - // then it must be an uncacheable index. Handle this case in the runtime. - GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout); - // Check if we have a ThinString. - GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE), - &if_thinstring); - GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE), - &if_thinstring); - // Finally, check if |key| is internalized. - STATIC_ASSERT(kNotInternalizedTag != 0); - GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask), - if_notinternalized != nullptr ? if_notinternalized : if_bailout); - Goto(if_keyisunique); + { + Label if_symbol(this), if_string(this), + if_keyisother(this, Label::kDeferred); + TNode<HeapObject> key_heap_object = CAST(key); + TNode<Map> key_map = LoadMap(key_heap_object); - BIND(&if_thinstring); - var_unique->Bind( - LoadObjectField<String>(CAST(key), ThinString::kActualOffset)); - Goto(if_keyisunique); + GotoIf(IsSymbolMap(key_map), &if_symbol); - BIND(&if_hascachedindex); - var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash)); - Goto(if_keyisindex); + // Miss if |key| is not a String. + STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); + TNode<Uint16T> key_instance_type = LoadMapInstanceType(key_map); + Branch(IsStringInstanceType(key_instance_type), &if_string, &if_keyisother); + + // Symbols are unique. + BIND(&if_symbol); + { + *var_unique = CAST(key); + Goto(if_keyisunique); + } - BIND(&if_keyisother); - GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout); - var_unique->Bind(LoadObjectField(key, Oddball::kToStringOffset)); - Goto(if_keyisunique); + BIND(&if_string); + { + Label if_hascachedindex(this), if_thinstring(this); + + // |key| is a String. Check if it has a cached array index. + TNode<String> key_string = CAST(key); + TNode<Uint32T> hash = LoadNameHashField(key_string); + GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask), + &if_hascachedindex); + // No cached array index. If the string knows that it contains an index, + // then it must be an uncacheable index. Handle this case in the runtime. + GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout); + // Check if we have a ThinString. + GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE), + &if_thinstring); + GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE), + &if_thinstring); + // Finally, check if |key| is internalized. + STATIC_ASSERT(kNotInternalizedTag != 0); + GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask), + if_notinternalized != nullptr ? if_notinternalized : if_bailout); + + *var_unique = key_string; + Goto(if_keyisunique); + + BIND(&if_thinstring); + *var_unique = + LoadObjectField<String>(key_string, ThinString::kActualOffset); + Goto(if_keyisunique); + + BIND(&if_hascachedindex); + *var_index = + Signed(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash)); + Goto(if_keyisindex); + } + + BIND(&if_keyisother); + { + GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout); + *var_unique = + LoadObjectField<String>(key_heap_object, Oddball::kToStringOffset); + Goto(if_keyisunique); + } + } } void CodeStubAssembler::TryInternalizeString( - Node* string, Label* if_index, Variable* var_index, Label* if_internalized, - Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) { - DCHECK(var_index->rep() == MachineType::PointerRepresentation()); - DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged); - CSA_SLOW_ASSERT(this, IsString(string)); + SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index, + Label* if_internalized, TVariable<Name>* var_internalized, + Label* if_not_internalized, Label* if_bailout) { TNode<ExternalReference> function = ExternalConstant(ExternalReference::try_internalize_string_function()); TNode<ExternalReference> const isolate_ptr = ExternalConstant(ExternalReference::isolate_address(isolate())); - Node* result = - CallCFunction(function, MachineType::AnyTagged(), - std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair(MachineType::AnyTagged(), string)); + TNode<Object> result = + CAST(CallCFunction(function, MachineType::AnyTagged(), + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair(MachineType::AnyTagged(), string))); Label internalized(this); GotoIf(TaggedIsNotSmi(result), &internalized); - TNode<IntPtrT> word_result = SmiUntag(result); + TNode<IntPtrT> word_result = SmiUntag(CAST(result)); GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)), if_not_internalized); GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)), if_bailout); - var_index->Bind(word_result); + *var_index = word_result; Goto(if_index); BIND(&internalized); - var_internalized->Bind(result); + *var_internalized = CAST(result); Goto(if_internalized); } @@ -8712,31 +8215,6 @@ TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement( return LoadValueByKeyIndex<NumberDictionary>(dictionary, index); } -void CodeStubAssembler::BasicStoreNumberDictionaryElement( - TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index, - TNode<Object> value, Label* not_data, Label* if_hole, Label* read_only) { - TVARIABLE(IntPtrT, var_entry); - Label if_found(this); - NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry, - if_hole); - BIND(&if_found); - - // Check that the value is a data property. - TNode<IntPtrT> index = EntryToIndex<NumberDictionary>(var_entry.value()); - TNode<Uint32T> details = - LoadDetailsByKeyIndex<NumberDictionary>(dictionary, index); - TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details); - // TODO(jkummerow): Support accessors without missing? - GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data); - - // Check that the property is writeable. - GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), - read_only); - - // Finally, store the value. - StoreValueByKeyIndex<NumberDictionary>(dictionary, index, value); -} - template <class Dictionary> void CodeStubAssembler::FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key, @@ -8858,16 +8336,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name, first_inclusive, IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor)); - BuildFastLoop( + BuildFastLoop<IntPtrT>( last_exclusive, first_inclusive, - [=](SloppyTNode<IntPtrT> name_index) { + [=](TNode<IntPtrT> name_index) { TNode<MaybeObject> element = LoadArrayElement(array, Array::kHeaderSize, name_index); TNode<Name> candidate_name = CAST(element); *var_name_index = name_index; GotoIf(TaggedEqual(candidate_name, unique_name), if_found); }, - -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre); + -Array::kEntrySize, IndexAdvanceMode::kPre); Goto(if_not_found); } @@ -9029,7 +8507,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( TNode<Uint16T> type = LoadMapInstanceType(map); TNode<Uint32T> bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout); - TNode<DescriptorArray> descriptors = LoadMapDescriptors(map); + TVARIABLE(DescriptorArray, var_descriptors, LoadMapDescriptors(map)); TNode<Uint32T> nof_descriptors = DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3); @@ -9044,25 +8522,23 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( // Note: var_end_key_index is exclusive for the loop TVARIABLE(IntPtrT, var_end_key_index, ToKeyIndex<DescriptorArray>(nof_descriptors)); - VariableList list( - {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index}, - zone()); + VariableList list({&var_descriptors, &var_stable, &var_has_symbol, + &var_is_symbol_processing_loop, &var_start_key_index, + &var_end_key_index}, + zone()); Label descriptor_array_loop( - this, {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index}); + this, {&var_descriptors, &var_stable, &var_has_symbol, + &var_is_symbol_processing_loop, &var_start_key_index, + &var_end_key_index}); Goto(&descriptor_array_loop); BIND(&descriptor_array_loop); - BuildFastLoop( + BuildFastLoop<IntPtrT>( list, var_start_key_index.value(), var_end_key_index.value(), - [=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index](Node* index) { - TNode<IntPtrT> descriptor_key_index = - TNode<IntPtrT>::UncheckedCast(index); + [&](TNode<IntPtrT> descriptor_key_index) { TNode<Name> next_key = - LoadKeyByKeyIndex(descriptors, descriptor_key_index); + LoadKeyByKeyIndex(var_descriptors.value(), descriptor_key_index); TVARIABLE(Object, var_value, SmiConstant(0)); Label callback(this), next_iteration(this); @@ -9117,7 +8593,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( // Directly decode from the descriptor array if |object| did not // change shape. var_map = map; - var_meta_storage = descriptors; + var_meta_storage = var_descriptors.value(); var_entry = Signed(descriptor_key_index); Goto(&if_found_fast); } @@ -9183,19 +8659,21 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( BIND(&callback); body(next_key, var_value.value()); - // Check if |object| is still stable, i.e. we can proceed using - // property details from preloaded |descriptors|. - var_stable = Select<BoolT>( - var_stable.value(), - [=] { return TaggedEqual(LoadMap(object), map); }, - [=] { return Int32FalseConstant(); }); + // Check if |object| is still stable, i.e. the descriptors in the + // preloaded |descriptors| are still the same modulo in-place + // representation changes. + GotoIfNot(var_stable.value(), &next_iteration); + var_stable = TaggedEqual(LoadMap(object), map); + // Reload the descriptors just in case the actual array changed, and + // any of the field representations changed in-place. + var_descriptors = LoadMapDescriptors(map); Goto(&next_iteration); } } BIND(&next_iteration); }, - DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + DescriptorArray::kEntrySize, IndexAdvanceMode::kPost); if (mode == kEnumerationOrder) { Label done(this); @@ -9205,14 +8683,73 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( var_is_symbol_processing_loop = Int32TrueConstant(); // Add DescriptorArray::kEntrySize to make the var_end_key_index exclusive // as BuildFastLoop() expects. - Increment(&var_end_key_index, DescriptorArray::kEntrySize, - INTPTR_PARAMETERS); + Increment(&var_end_key_index, DescriptorArray::kEntrySize); Goto(&descriptor_array_loop); BIND(&done); } } +TNode<Object> CodeStubAssembler::GetConstructor(TNode<Map> map) { + TVARIABLE(HeapObject, var_maybe_constructor); + var_maybe_constructor = map; + Label loop(this, &var_maybe_constructor), done(this); + GotoIfNot(IsMap(var_maybe_constructor.value()), &done); + Goto(&loop); + + BIND(&loop); + { + var_maybe_constructor = CAST(LoadObjectField( + var_maybe_constructor.value(), Map::kConstructorOrBackPointerOffset)); + GotoIf(IsMap(var_maybe_constructor.value()), &loop); + Goto(&done); + } + + BIND(&done); + return var_maybe_constructor.value(); +} + +TNode<NativeContext> CodeStubAssembler::GetCreationContext( + TNode<JSReceiver> receiver, Label* if_bailout) { + TNode<Map> receiver_map = LoadMap(receiver); + TNode<Object> constructor = GetConstructor(receiver_map); + + TVARIABLE(JSFunction, var_function); + + Label done(this), if_jsfunction(this), if_jsgenerator(this); + GotoIf(TaggedIsSmi(constructor), if_bailout); + + TNode<Map> function_map = LoadMap(CAST(constructor)); + GotoIf(IsJSFunctionMap(function_map), &if_jsfunction); + GotoIf(IsJSGeneratorMap(function_map), &if_jsgenerator); + // Remote objects don't have a creation context. + GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout); + + CSA_ASSERT(this, IsJSFunctionMap(receiver_map)); + var_function = CAST(receiver); + Goto(&done); + + BIND(&if_jsfunction); + { + var_function = CAST(constructor); + Goto(&done); + } + + BIND(&if_jsgenerator); + { + var_function = LoadJSGeneratorObjectFunction(CAST(receiver)); + Goto(&done); + } + + BIND(&done); + TNode<Context> context = LoadJSFunctionContext(var_function.value()); + + GotoIfNot(IsContext(context), if_bailout); + + TNode<NativeContext> native_context = LoadNativeContext(context); + return native_context; +} + void CodeStubAssembler::DescriptorLookup( SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors, SloppyTNode<Uint32T> bitfield3, Label* if_found, @@ -9302,7 +8839,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject( } void CodeStubAssembler::TryLookupProperty( - SloppyTNode<JSObject> object, SloppyTNode<Map> map, + SloppyTNode<JSReceiver> object, SloppyTNode<Map> map, SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name, Label* if_found_fast, Label* if_found_dict, Label* if_found_global, TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index, @@ -9310,7 +8847,7 @@ void CodeStubAssembler::TryLookupProperty( Label if_objectisspecial(this); GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial); - TryLookupPropertyInSimpleObject(object, map, unique_name, if_found_fast, + TryLookupPropertyInSimpleObject(CAST(object), map, unique_name, if_found_fast, if_found_dict, var_meta_storage, var_name_index, if_not_found); @@ -9547,25 +9084,44 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor( // AccessorPair case. { if (mode == kCallJSGetter) { + Label if_callable(this), if_function_template_info(this); Node* accessor_pair = value; TNode<HeapObject> getter = CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset)); TNode<Map> getter_map = LoadMap(getter); - TNode<Uint16T> instance_type = LoadMapInstanceType(getter_map); - // FunctionTemplateInfo getters are not supported yet. - GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE), - if_bailout); + + GotoIf(IsCallableMap(getter_map), &if_callable); + GotoIf(IsFunctionTemplateInfoMap(getter_map), &if_function_template_info); // Return undefined if the {getter} is not callable. var_value.Bind(UndefinedConstant()); - GotoIfNot(IsCallableMap(getter_map), &done); + Goto(&done); + + BIND(&if_callable); + { + // Call the accessor. + Callable callable = CodeFactory::Call(isolate()); + Node* result = CallJS(callable, context, getter, receiver); + var_value.Bind(result); + Goto(&done); + } - // Call the accessor. - Callable callable = CodeFactory::Call(isolate()); - Node* result = CallJS(callable, context, getter, receiver); - var_value.Bind(result); + BIND(&if_function_template_info); + { + TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>( + getter, FunctionTemplateInfo::kCachedPropertyNameOffset); + GotoIfNot(IsTheHole(cached_property_name), if_bailout); + + TNode<NativeContext> creation_context = + GetCreationContext(CAST(receiver), if_bailout); + var_value.Bind(CallBuiltin( + Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver, + creation_context, getter, IntPtrConstant(0), receiver)); + Goto(&done); + } + } else { + Goto(&done); } - Goto(&done); } // AccessorInfo case. @@ -9617,10 +9173,11 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor( GotoIfNot(IsLengthString( LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), if_bailout); - Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); + TNode<Object> receiver_value = + LoadJSPrimitiveWrapperValue(CAST(receiver)); GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout); - GotoIfNot(IsString(receiver_value), if_bailout); - var_value.Bind(LoadStringLengthAsSmi(receiver_value)); + GotoIfNot(IsString(CAST(receiver_value)), if_bailout); + var_value.Bind(LoadStringLengthAsSmi(CAST(receiver_value))); Goto(&done); } } @@ -9808,18 +9365,14 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isfaststringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); - Node* string = LoadJSPrimitiveWrapperValue(object); - CSA_ASSERT(this, IsString(string)); + TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object))); TNode<IntPtrT> length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); Goto(&if_isobjectorsmi); } BIND(&if_isslowstringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); - Node* string = LoadJSPrimitiveWrapperValue(object); - CSA_ASSERT(this, IsString(string)); + TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(CAST(object))); TNode<IntPtrT> length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); Goto(&if_isdictionary); @@ -9892,8 +9445,8 @@ void CodeStubAssembler::TryPrototypeChainLookup( GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); } - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_keyisindex(this), if_iskeyunique(this); TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique, @@ -9905,9 +9458,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( TVARIABLE(Map, var_holder_map, map); TVARIABLE(Int32T, var_holder_instance_type, instance_type); - VariableList merged_variables( - {&var_holder, &var_holder_map, &var_holder_instance_type}, zone()); - Label loop(this, merged_variables); + Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type}); Goto(&loop); BIND(&loop); { @@ -9950,9 +9501,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( TVARIABLE(Map, var_holder_map, map); TVARIABLE(Int32T, var_holder_instance_type, instance_type); - VariableList merged_variables( - {&var_holder, &var_holder_map, &var_holder_instance_type}, zone()); - Label loop(this, merged_variables); + Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type}); Goto(&loop); BIND(&loop); { @@ -9978,22 +9527,22 @@ void CodeStubAssembler::TryPrototypeChainLookup( } } -Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, - SloppyTNode<Object> prototype) { - CSA_ASSERT(this, TaggedIsNotSmi(object)); - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode<Oddball> CodeStubAssembler::HasInPrototypeChain(TNode<Context> context, + TNode<HeapObject> object, + TNode<Object> prototype) { + TVARIABLE(Oddball, var_result); Label return_false(this), return_true(this), return_runtime(this, Label::kDeferred), return_result(this); // Loop through the prototype chain looking for the {prototype}. - VARIABLE(var_object_map, MachineRepresentation::kTagged, LoadMap(object)); + TVARIABLE(Map, var_object_map, LoadMap(object)); Label loop(this, &var_object_map); Goto(&loop); BIND(&loop); { // Check if we can determine the prototype directly from the {object_map}. Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred); - Node* object_map = var_object_map.value(); + TNode<Map> object_map = var_object_map.value(); TNode<Uint16T> object_instance_type = LoadMapInstanceType(object_map); Branch(IsSpecialReceiverInstanceType(object_instance_type), &if_objectisspecial, &if_objectisdirect); @@ -10018,22 +9567,22 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, // Continue with the prototype. CSA_ASSERT(this, TaggedIsNotSmi(object_prototype)); - var_object_map.Bind(LoadMap(object_prototype)); + var_object_map = LoadMap(object_prototype); Goto(&loop); } BIND(&return_true); - var_result.Bind(TrueConstant()); + var_result = TrueConstant(); Goto(&return_result); BIND(&return_false); - var_result.Bind(FalseConstant()); + var_result = FalseConstant(); Goto(&return_result); BIND(&return_runtime); { // Fallback to the runtime implementation. - var_result.Bind( + var_result = CAST( CallRuntime(Runtime::kHasInPrototypeChain, context, object, prototype)); } Goto(&return_result); @@ -10042,63 +9591,67 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, return var_result.value(); } -Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable, - Node* object) { - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode<Oddball> CodeStubAssembler::OrdinaryHasInstance( + TNode<Context> context, TNode<Object> callable_maybe_smi, + TNode<Object> object_maybe_smi) { + TVARIABLE(Oddball, var_result); Label return_runtime(this, Label::kDeferred), return_result(this); GotoIfForceSlowPath(&return_runtime); // Goto runtime if {object} is a Smi. - GotoIf(TaggedIsSmi(object), &return_runtime); + GotoIf(TaggedIsSmi(object_maybe_smi), &return_runtime); // Goto runtime if {callable} is a Smi. - GotoIf(TaggedIsSmi(callable), &return_runtime); - - // Load map of {callable}. - TNode<Map> callable_map = LoadMap(callable); - - // Goto runtime if {callable} is not a JSFunction. - TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map); - GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE), - &return_runtime); + GotoIf(TaggedIsSmi(callable_maybe_smi), &return_runtime); - GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map, - &return_runtime); - - // Get the "prototype" (or initial map) of the {callable}. - TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>( - CAST(callable), JSFunction::kPrototypeOrInitialMapOffset); { - Label no_initial_map(this), walk_prototype_chain(this); - TVARIABLE(HeapObject, var_callable_prototype, callable_prototype); + // Load map of {callable}. + TNode<HeapObject> object = CAST(object_maybe_smi); + TNode<HeapObject> callable = CAST(callable_maybe_smi); + TNode<Map> callable_map = LoadMap(callable); - // Resolve the "prototype" if the {callable} has an initial map. - GotoIfNot(IsMap(callable_prototype), &no_initial_map); - var_callable_prototype = - LoadObjectField<HeapObject>(callable_prototype, Map::kPrototypeOffset); - Goto(&walk_prototype_chain); + // Goto runtime if {callable} is not a JSFunction. + TNode<Uint16T> callable_instance_type = LoadMapInstanceType(callable_map); + GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE), + &return_runtime); - BIND(&no_initial_map); - // {callable_prototype} is the hole if the "prototype" property hasn't been - // requested so far. - Branch(TaggedEqual(callable_prototype, TheHoleConstant()), &return_runtime, - &walk_prototype_chain); + GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map, + &return_runtime); - BIND(&walk_prototype_chain); - callable_prototype = var_callable_prototype.value(); - } + // Get the "prototype" (or initial map) of the {callable}. + TNode<HeapObject> callable_prototype = LoadObjectField<HeapObject>( + callable, JSFunction::kPrototypeOrInitialMapOffset); + { + Label no_initial_map(this), walk_prototype_chain(this); + TVARIABLE(HeapObject, var_callable_prototype, callable_prototype); + + // Resolve the "prototype" if the {callable} has an initial map. + GotoIfNot(IsMap(callable_prototype), &no_initial_map); + var_callable_prototype = LoadObjectField<HeapObject>( + callable_prototype, Map::kPrototypeOffset); + Goto(&walk_prototype_chain); + + BIND(&no_initial_map); + // {callable_prototype} is the hole if the "prototype" property hasn't + // been requested so far. + Branch(TaggedEqual(callable_prototype, TheHoleConstant()), + &return_runtime, &walk_prototype_chain); + + BIND(&walk_prototype_chain); + callable_prototype = var_callable_prototype.value(); + } - // Loop through the prototype chain looking for the {callable} prototype. - CSA_ASSERT(this, IsJSReceiver(callable_prototype)); - var_result.Bind(HasInPrototypeChain(context, object, callable_prototype)); - Goto(&return_result); + // Loop through the prototype chain looking for the {callable} prototype. + var_result = HasInPrototypeChain(context, object, callable_prototype); + Goto(&return_result); + } BIND(&return_runtime); { // Fallback to the runtime implementation. - var_result.Bind( - CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object)); + var_result = CAST(CallRuntime(Runtime::kOrdinaryHasInstance, context, + callable_maybe_smi, object_maybe_smi)); } Goto(&return_result); @@ -10111,34 +9664,72 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node, ParameterMode mode, int base_size) { CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, mode)); + if (mode == SMI_PARAMETERS) { + return ElementOffsetFromIndex(ReinterpretCast<Smi>(index_node), kind, + base_size); + } else { + DCHECK(mode == INTPTR_PARAMETERS); + return ElementOffsetFromIndex(ReinterpretCast<IntPtrT>(index_node), kind, + base_size); + } +} + +template <typename TIndex> +TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex( + TNode<TIndex> index_node, ElementsKind kind, int base_size) { + // TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT. + static_assert(std::is_same<TIndex, Smi>::value || + std::is_same<TIndex, IntPtrT>::value || + std::is_same<TIndex, UintPtrT>::value, + "Only Smi, UintPtrT or IntPtrT index nodes are allowed"); int element_size_shift = ElementsKindToShiftSize(kind); int element_size = 1 << element_size_shift; int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; intptr_t index = 0; + TNode<IntPtrT> intptr_index_node; bool constant_index = false; - if (mode == SMI_PARAMETERS) { + if (std::is_same<TIndex, Smi>::value) { + TNode<Smi> smi_index_node = ReinterpretCast<Smi>(index_node); element_size_shift -= kSmiShiftBits; Smi smi_index; - constant_index = ToSmiConstant(index_node, &smi_index); - if (constant_index) index = smi_index.value(); - index_node = BitcastTaggedSignedToWord(index_node); + constant_index = ToSmiConstant(smi_index_node, &smi_index); + if (constant_index) { + index = smi_index.value(); + } else { + if (COMPRESS_POINTERS_BOOL) { + smi_index_node = NormalizeSmiIndex(smi_index_node); + } + } + intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node); } else { - DCHECK(mode == INTPTR_PARAMETERS); - constant_index = ToIntPtrConstant(index_node, &index); + intptr_index_node = ReinterpretCast<IntPtrT>(index_node); + constant_index = ToIntPtrConstant(intptr_index_node, &index); } if (constant_index) { return IntPtrConstant(base_size + element_size * index); } - TNode<WordT> shifted_index = + TNode<IntPtrT> shifted_index = (element_size_shift == 0) - ? UncheckedCast<WordT>(index_node) + ? intptr_index_node : ((element_size_shift > 0) - ? WordShl(index_node, IntPtrConstant(element_size_shift)) - : WordSar(index_node, IntPtrConstant(-element_size_shift))); + ? WordShl(intptr_index_node, + IntPtrConstant(element_size_shift)) + : WordSar(intptr_index_node, + IntPtrConstant(-element_size_shift))); return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index)); } +// Instantiate ElementOffsetFromIndex for Smi and IntPtrT. +template V8_EXPORT_PRIVATE TNode<IntPtrT> +CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node, + ElementsKind kind, + int base_size); +template V8_EXPORT_PRIVATE TNode<IntPtrT> +CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node, + ElementsKind kind, + int base_size); + TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset, SloppyTNode<IntPtrT> length, int header_size, @@ -10146,8 +9737,7 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset, // Make sure we point to the last field. int element_size = 1 << ElementsKindToShiftSize(kind); int correction = header_size - kHeapObjectTag - element_size; - TNode<IntPtrT> last_offset = - ElementOffsetFromIndex(length, kind, INTPTR_PARAMETERS, correction); + TNode<IntPtrT> last_offset = ElementOffsetFromIndex(length, kind, correction); return IntPtrLessThanOrEqual(offset, last_offset); } @@ -10203,8 +9793,9 @@ TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVectorForStub() { return CAST(LoadFeedbackVector(function)); } -void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, - Node* slot_id) { +void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback, + TNode<HeapObject> maybe_vector, + TNode<UintPtrT> slot_id) { Label end(this); // If feedback_vector is not valid, then nothing to do. GotoIf(IsUndefined(maybe_vector), &end); @@ -10216,7 +9807,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(feedback_vector, slot_id); TNode<Smi> previous_feedback = CAST(feedback_element); - TNode<Smi> combined_feedback = SmiOr(previous_feedback, CAST(feedback)); + TNode<Smi> combined_feedback = SmiOr(previous_feedback, feedback); GotoIf(SmiEqual(previous_feedback, combined_feedback), &end); { @@ -10230,7 +9821,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, } void CodeStubAssembler::ReportFeedbackUpdate( - SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot_id, + TNode<FeedbackVector> feedback_vector, SloppyTNode<UintPtrT> slot_id, const char* reason) { // Reset profiler ticks. StoreObjectFieldNoWriteBarrier( @@ -10241,7 +9832,7 @@ void CodeStubAssembler::ReportFeedbackUpdate( // Trace the update. CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(), LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset), - SmiTag(slot_id), StringConstant(reason)); + SmiTag(Signed(slot_id)), StringConstant(reason)); #endif // V8_TRACE_FEEDBACK_UPDATES } @@ -10285,14 +9876,16 @@ TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) { [=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); }); } -TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) { +TNode<IntPtrT> CodeStubAssembler::TryToIntptr(SloppyTNode<Object> key, + Label* miss) { TVARIABLE(IntPtrT, var_intptr_key); Label done(this, &var_intptr_key), key_is_smi(this); GotoIf(TaggedIsSmi(key), &key_is_smi); + // Try to convert a heap number to a Smi. - GotoIfNot(IsHeapNumber(key), miss); + GotoIfNot(IsHeapNumber(CAST(key)), miss); { - TNode<Float64T> value = LoadHeapNumberValue(key); + TNode<Float64T> value = LoadHeapNumberValue(CAST(key)); TNode<Int32T> int_value = RoundFloat64ToInt32(value); GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss); var_intptr_key = ChangeInt32ToIntPtr(int_value); @@ -10301,7 +9894,7 @@ TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) { BIND(&key_is_smi); { - var_intptr_key = SmiUntag(key); + var_intptr_key = SmiUntag(CAST(key)); Goto(&done); } @@ -10354,7 +9947,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments( } Label if_mapped(this), if_unmapped(this), end(this, &var_result); TNode<IntPtrT> intptr_two = IntPtrConstant(2); - TNode<WordT> adjusted_length = IntPtrSub(elements_length, intptr_two); + TNode<IntPtrT> adjusted_length = IntPtrSub(elements_length, intptr_two); GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped); @@ -10510,33 +10103,35 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind, } } -Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) { +TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped( + TNode<Int32T> int32_value) { Label done(this); TNode<Int32T> int32_zero = Int32Constant(0); TNode<Int32T> int32_255 = Int32Constant(255); - VARIABLE(var_value, MachineRepresentation::kWord32, int32_value); + TVARIABLE(Word32T, var_value, int32_value); GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done); - var_value.Bind(int32_zero); + var_value = int32_zero; GotoIf(Int32LessThan(int32_value, int32_zero), &done); - var_value.Bind(int32_255); + var_value = int32_255; Goto(&done); BIND(&done); - return var_value.value(); + return UncheckedCast<Uint8T>(var_value.value()); } -Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) { +TNode<Uint8T> CodeStubAssembler::Float64ToUint8Clamped( + TNode<Float64T> float64_value) { Label done(this); - VARIABLE(var_value, MachineRepresentation::kWord32, Int32Constant(0)); + TVARIABLE(Word32T, var_value, Int32Constant(0)); GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done); - var_value.Bind(Int32Constant(255)); + var_value = Int32Constant(255); GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done); { TNode<Float64T> rounded_value = Float64RoundToEven(float64_value); - var_value.Bind(TruncateFloat64ToWord32(rounded_value)); + var_value = TruncateFloat64ToWord32(rounded_value); Goto(&done); } BIND(&done); - return var_value.value(); + return UncheckedCast<Uint8T>(var_value.value()); } Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( @@ -10716,8 +10311,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout); } - TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object)); - StoreElement(backing_store, elements_kind, intptr_key, converted_value, + TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object)); + StoreElement(data_ptr, elements_kind, intptr_key, converted_value, parameter_mode); Goto(&done); @@ -10807,7 +10402,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, if (IsSmiElementsKind(elements_kind)) { GotoIfNot(TaggedIsSmi(value), bailout); } else if (IsDoubleElementsKind(elements_kind)) { - value = TryTaggedToFloat64(value, bailout); + value = TryTaggedToFloat64(CAST(value), bailout); } if (IsGrowStoreMode(store_mode) && @@ -11047,7 +10642,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) { } TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector( - SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot) { + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) { TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext); TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured); StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap); @@ -11090,19 +10685,16 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector( StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site); StoreFullTaggedNoWriteBarrier(site_list, site); - StoreFeedbackVectorSlot(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0, - SMI_PARAMETERS); + StoreFeedbackVectorSlot(feedback_vector, slot, site); return CAST(site); } TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector( - SloppyTNode<FeedbackVector> feedback_vector, Node* slot, - SloppyTNode<HeapObject> value, int additional_offset, - ParameterMode parameter_mode) { + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<HeapObject> value, int additional_offset) { TNode<MaybeObject> weak_value = MakeWeak(value); StoreFeedbackVectorSlot(feedback_vector, slot, weak_value, - UPDATE_WRITE_BARRIER, additional_offset, - parameter_mode); + UPDATE_WRITE_BARRIER, additional_offset); return weak_value; } @@ -11135,14 +10727,14 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind( return elements_kind; } -Node* CodeStubAssembler::BuildFastLoop( - const CodeStubAssembler::VariableList& vars, Node* start_index, - Node* end_index, const FastLoopBody& body, int increment, - ParameterMode parameter_mode, IndexAdvanceMode advance_mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode)); - MachineRepresentation index_rep = ParameterRepresentation(parameter_mode); - VARIABLE(var, index_rep, start_index); +template <typename TIndex> +TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars, + TNode<TIndex> start_index, + TNode<TIndex> end_index, + const FastLoopBody<TIndex>& body, + int increment, + IndexAdvanceMode advance_mode) { + TVARIABLE(TIndex, var, start_index); VariableList vars_copy(vars.begin(), vars.end(), zone()); vars_copy.push_back(&var); Label loop(this, vars_copy); @@ -11154,8 +10746,7 @@ Node* CodeStubAssembler::BuildFastLoop( // to force the loop header check at the end of the loop and branch forward to // it from the pre-header). The extra branch is slower in the case that the // loop actually iterates. - TNode<BoolT> first_check = - IntPtrOrSmiEqual(var.value(), end_index, parameter_mode); + TNode<BoolT> first_check = IntPtrOrSmiEqual(var.value(), end_index); int32_t first_check_val; if (ToInt32Constant(first_check, &first_check_val)) { if (first_check_val) return var.value(); @@ -11167,19 +10758,28 @@ Node* CodeStubAssembler::BuildFastLoop( BIND(&loop); { if (advance_mode == IndexAdvanceMode::kPre) { - Increment(&var, increment, parameter_mode); + Increment(&var, increment); } body(var.value()); if (advance_mode == IndexAdvanceMode::kPost) { - Increment(&var, increment, parameter_mode); + Increment(&var, increment); } - Branch(IntPtrOrSmiNotEqual(var.value(), end_index, parameter_mode), &loop, - &after_loop); + Branch(IntPtrOrSmiNotEqual(var.value(), end_index), &loop, &after_loop); } BIND(&after_loop); return var.value(); } +// Instantiate BuildFastLoop for Smi and IntPtrT. +template TNode<Smi> CodeStubAssembler::BuildFastLoop<Smi>( + const VariableList& vars, TNode<Smi> start_index, TNode<Smi> end_index, + const FastLoopBody<Smi>& body, int increment, + IndexAdvanceMode advance_mode); +template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>( + const VariableList& vars, TNode<IntPtrT> start_index, + TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment, + IndexAdvanceMode advance_mode); + void CodeStubAssembler::BuildFastFixedArrayForEach( const CodeStubAssembler::VariableList& vars, Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, @@ -11201,17 +10801,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( if (direction == ForEachDirection::kForward) { for (int i = first_val; i < last_val; ++i) { TNode<IntPtrT> index = IntPtrConstant(i); - TNode<IntPtrT> offset = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, - FixedArray::kHeaderSize - kHeapObjectTag); + TNode<IntPtrT> offset = ElementOffsetFromIndex( + index, kind, FixedArray::kHeaderSize - kHeapObjectTag); body(fixed_array, offset); } } else { for (int i = last_val - 1; i >= first_val; --i) { TNode<IntPtrT> index = IntPtrConstant(i); - TNode<IntPtrT> offset = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, - FixedArray::kHeaderSize - kHeapObjectTag); + TNode<IntPtrT> offset = ElementOffsetFromIndex( + index, kind, FixedArray::kHeaderSize - kHeapObjectTag); body(fixed_array, offset); } } @@ -11228,11 +10826,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( if (direction == ForEachDirection::kReverse) std::swap(start, limit); int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize; - BuildFastLoop( + BuildFastLoop<IntPtrT>( vars, start, limit, - [fixed_array, &body](Node* offset) { body(fixed_array, offset); }, + [&](TNode<IntPtrT> offset) { body(fixed_array, offset); }, direction == ForEachDirection::kReverse ? -increment : increment, - INTPTR_PARAMETERS, direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre : IndexAdvanceMode::kPost); } @@ -11243,22 +10840,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace( doesnt_fit); } -void CodeStubAssembler::InitializeFieldsWithRoot(Node* object, - Node* start_offset, - Node* end_offset, +void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object, + TNode<IntPtrT> start_offset, + TNode<IntPtrT> end_offset, RootIndex root_index) { CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag)); end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag)); TNode<Object> root_value = LoadRoot(root_index); - BuildFastLoop( + BuildFastLoop<IntPtrT>( end_offset, start_offset, - [this, object, root_value](Node* current) { + [=](TNode<IntPtrT> current) { StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current, root_value); }, - -kTaggedSize, INTPTR_PARAMETERS, - CodeStubAssembler::IndexAdvanceMode::kPre); + -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre); } void CodeStubAssembler::BranchIfNumberRelationalComparison( @@ -11384,11 +10980,9 @@ Operation Reverse(Operation op) { } } // anonymous namespace -Node* CodeStubAssembler::RelationalComparison(Operation op, - SloppyTNode<Object> left, - SloppyTNode<Object> right, - SloppyTNode<Context> context, - Variable* var_type_feedback) { +TNode<Oddball> CodeStubAssembler::RelationalComparison( + Operation op, TNode<Object> left, TNode<Object> right, + TNode<Context> context, TVariable<Smi>* var_type_feedback) { Label return_true(this), return_false(this), do_float_comparison(this), end(this); TVARIABLE(Oddball, var_result); // Actually only "true" or "false". @@ -11403,7 +10997,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, if (var_type_feedback != nullptr) { // Initialize the type feedback to None. The current feedback is combined // with the previous feedback. - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kNone); loop_variable_list.push_back(var_type_feedback); } Label loop(this, loop_variable_list); @@ -11914,17 +11508,17 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value, } // ES6 section 7.2.12 Abstract Equality Comparison -Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, - SloppyTNode<Object> right, - SloppyTNode<Context> context, - Variable* var_type_feedback) { +TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left, + SloppyTNode<Object> right, + SloppyTNode<Context> context, + TVariable<Smi>* var_type_feedback) { // This is a slightly optimized version of Object::Equals. Whenever you // change something functionality wise in here, remember to update the // Object::Equals method as well. Label if_equal(this), if_notequal(this), do_float_comparison(this), do_right_stringtonumber(this, Label::kDeferred), end(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); TVARIABLE(Float64T, var_left_float); TVARIABLE(Float64T, var_right_float); @@ -11984,7 +11578,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber); // {left} is Smi and {right} is not HeapNumber or Smi. if (var_type_feedback != nullptr) { - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } GotoIf(IsBooleanMap(right_map), &if_right_boolean); TNode<Uint16T> right_type = LoadMapInstanceType(right_map); @@ -12009,8 +11603,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, BIND(&if_right_bigint); { - result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber, - NoContextConstant(), right, left)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, + NoContextConstant(), right, left)); Goto(&end); } @@ -12046,7 +11640,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, BIND(&if_left_string); { GotoIfNot(IsStringInstanceType(right_type), &use_symmetry); - result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right)); + result = + CAST(CallBuiltin(Builtins::kStringEqual, context, left, right)); CombineFeedback(var_type_feedback, SmiOr(CollectFeedbackForString(left_type), CollectFeedbackForString(right_type))); @@ -12067,8 +11662,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, { Label if_right_boolean(this); if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber); GotoIf(IsBooleanMap(right_map), &if_right_boolean); @@ -12098,38 +11692,35 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, BIND(&if_right_heapnumber); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } - result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_bigint); { CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); - result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_string); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } - result.Bind(CallRuntime(Runtime::kBigIntEqualToString, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToString, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_boolean); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); Goto(&loop); @@ -12154,8 +11745,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, if (var_type_feedback != nullptr) { // If {right} is undetectable, it must be either also // Null or Undefined, or a Receiver (aka document.all). - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); } Goto(&if_equal); } @@ -12164,12 +11755,11 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, { if (var_type_feedback != nullptr) { // Track whether {right} is Null, Undefined or Receiver. - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal); GotoIfNot(IsBooleanMap(right_map), &if_notequal); - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Goto(&if_notequal); } @@ -12178,8 +11768,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, BIND(&if_left_boolean); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } // If {right} is a Boolean too, it must be a different Boolean. @@ -12200,7 +11789,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, if (var_type_feedback != nullptr) { Label if_right_symbol(this); GotoIf(IsSymbolInstanceType(right_type), &if_right_symbol); - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); Goto(&if_notequal); BIND(&if_right_symbol); @@ -12218,8 +11807,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, // {left} is a Primitive and {right} is a JSReceiver, so swapping // the order is not observable. if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Goto(&use_symmetry); } @@ -12254,8 +11842,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, // When we get here, {right} must be either Null or Undefined. CSA_ASSERT(this, IsNullOrUndefined(right)); if (var_type_feedback != nullptr) { - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); } Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal); } @@ -12265,8 +11853,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, // {right} is a Primitive, and neither Null or Undefined; // convert {left} to Primitive too. if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); var_left = CallStub(callable, context, left); @@ -12298,13 +11885,13 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, BIND(&if_equal); { - result.Bind(TrueConstant()); + result = TrueConstant(); Goto(&end); } BIND(&if_notequal); { - result.Bind(FalseConstant()); + result = FalseConstant(); Goto(&end); } @@ -12312,9 +11899,9 @@ Node* CodeStubAssembler::Equal(SloppyTNode<Object> left, return result.value(); } -TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs, - SloppyTNode<Object> rhs, - Variable* var_type_feedback) { +TNode<Oddball> CodeStubAssembler::StrictEqual( + SloppyTNode<Object> lhs, SloppyTNode<Object> rhs, + TVariable<Smi>* var_type_feedback) { // Pseudo-code for the algorithm below: // // if (lhs == rhs) { @@ -12482,7 +12069,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs, CollectFeedbackForString(lhs_instance_type); TNode<Smi> rhs_feedback = CollectFeedbackForString(rhs_instance_type); - var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback)); + *var_type_feedback = SmiOr(lhs_feedback, rhs_feedback); } result = CAST(CallBuiltin(Builtins::kStringEqual, NoContextConstant(), lhs, rhs)); @@ -12556,7 +12143,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs, BIND(&if_lhsisoddball); { - STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE); + STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE); GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types); GotoIf(Int32LessThan(rhs_instance_type, Int32Constant(ODDBALL_TYPE)), @@ -12855,8 +12442,8 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context, return result.value(); } -Node* CodeStubAssembler::Typeof(Node* value) { - VARIABLE(result_var, MachineRepresentation::kTagged); +TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) { + TVARIABLE(String, result_var); Label return_number(this, Label::kDeferred), if_oddball(this), return_function(this), return_undefined(this), return_object(this), @@ -12864,7 +12451,8 @@ Node* CodeStubAssembler::Typeof(Node* value) { GotoIf(TaggedIsSmi(value), &return_number); - TNode<Map> map = LoadMap(value); + TNode<HeapObject> value_heap_object = CAST(value); + TNode<Map> map = LoadMap(value_heap_object); GotoIf(IsHeapNumberMap(map), &return_number); @@ -12890,49 +12478,50 @@ Node* CodeStubAssembler::Typeof(Node* value) { GotoIf(IsBigIntInstanceType(instance_type), &return_bigint); CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE)); - result_var.Bind(HeapConstant(isolate()->factory()->symbol_string())); + result_var = HeapConstant(isolate()->factory()->symbol_string()); Goto(&return_result); BIND(&return_number); { - result_var.Bind(HeapConstant(isolate()->factory()->number_string())); + result_var = HeapConstant(isolate()->factory()->number_string()); Goto(&return_result); } BIND(&if_oddball); { - TNode<Object> type = LoadObjectField(value, Oddball::kTypeOfOffset); - result_var.Bind(type); + TNode<String> type = + CAST(LoadObjectField(value_heap_object, Oddball::kTypeOfOffset)); + result_var = type; Goto(&return_result); } BIND(&return_function); { - result_var.Bind(HeapConstant(isolate()->factory()->function_string())); + result_var = HeapConstant(isolate()->factory()->function_string()); Goto(&return_result); } BIND(&return_undefined); { - result_var.Bind(HeapConstant(isolate()->factory()->undefined_string())); + result_var = HeapConstant(isolate()->factory()->undefined_string()); Goto(&return_result); } BIND(&return_object); { - result_var.Bind(HeapConstant(isolate()->factory()->object_string())); + result_var = HeapConstant(isolate()->factory()->object_string()); Goto(&return_result); } BIND(&return_string); { - result_var.Bind(HeapConstant(isolate()->factory()->string_string())); + result_var = HeapConstant(isolate()->factory()->string_string()); Goto(&return_result); } BIND(&return_bigint); { - result_var.Bind(HeapConstant(isolate()->factory()->bigint_string())); + result_var = HeapConstant(isolate()->factory()->bigint_string()); Goto(&return_result); } @@ -12941,7 +12530,7 @@ Node* CodeStubAssembler::Typeof(Node* value) { } TNode<Object> CodeStubAssembler::GetSuperConstructor( - SloppyTNode<Context> context, SloppyTNode<JSFunction> active_function) { + TNode<Context> context, TNode<JSFunction> active_function) { Label is_not_constructor(this, Label::kDeferred), out(this); TVARIABLE(Object, result); @@ -13004,9 +12593,10 @@ TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor( return var_result.value(); } -Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, - Node* context) { - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object, + TNode<Object> callable, + TNode<Context> context) { + TVARIABLE(Oddball, var_result); Label if_notcallable(this, Label::kDeferred), if_notreceiver(this, Label::kDeferred), if_otherhandler(this), if_nohandler(this, Label::kDeferred), return_true(this), @@ -13014,7 +12604,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, // Ensure that the {callable} is actually a JSReceiver. GotoIf(TaggedIsSmi(callable), &if_notreceiver); - GotoIfNot(IsJSReceiver(callable), &if_notreceiver); + GotoIfNot(IsJSReceiver(CAST(callable)), &if_notreceiver); // Load the @@hasInstance property from {callable}. TNode<Object> inst_of_handler = @@ -13032,8 +12622,8 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, // Call to Function.prototype[@@hasInstance] directly. Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance), CallTrampolineDescriptor{}); - Node* result = CallJS(builtin, context, inst_of_handler, callable, object); - var_result.Bind(result); + var_result = + CAST(CallJS(builtin, context, inst_of_handler, callable, object)); Goto(&return_result); } @@ -13055,12 +12645,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, BIND(&if_nohandler); { // Ensure that the {callable} is actually Callable. - GotoIfNot(IsCallable(callable), &if_notcallable); + GotoIfNot(IsCallable(CAST(callable)), &if_notcallable); // Use the OrdinaryHasInstance algorithm. - TNode<Object> result = - CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object); - var_result.Bind(result); + var_result = CAST( + CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object)); Goto(&return_result); } @@ -13071,11 +12660,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, { ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); } BIND(&return_true); - var_result.Bind(TrueConstant()); + var_result = TrueConstant(); Goto(&return_result); BIND(&return_false); - var_result.Bind(FalseConstant()); + var_result = FalseConstant(); Goto(&return_result); BIND(&return_result); @@ -13294,9 +12883,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult( return CAST(result); } -Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, - Node* key, - Node* value) { +TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry( + TNode<Context> context, TNode<Object> key, SloppyTNode<Object> value) { TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Smi> length = SmiConstant(2); int const elements_size = FixedArray::SizeFor(2); @@ -13326,7 +12914,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array); StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset, RootIndex::kFalseValue); - return result; + return CAST(result); } TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context, @@ -13393,21 +12981,19 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength( return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset); } -CodeStubArguments::CodeStubArguments( - CodeStubAssembler* assembler, Node* argc, Node* fp, - CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode) +CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, + TNode<IntPtrT> argc, TNode<RawPtrT> fp, + ReceiverMode receiver_mode) : assembler_(assembler), - argc_mode_(param_mode), receiver_mode_(receiver_mode), argc_(argc), base_(), fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) { TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex( - argc_, SYSTEM_POINTER_ELEMENTS, param_mode, + argc_, SYSTEM_POINTER_ELEMENTS, (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kSystemPointerSize); - base_ = - assembler_->UncheckedCast<RawPtrT>(assembler_->IntPtrAdd(fp_, offset)); + base_ = assembler_->RawPtrAdd(fp_, offset); } TNode<Object> CodeStubArguments::GetReceiver() const { @@ -13422,24 +13008,18 @@ void CodeStubArguments::SetReceiver(TNode<Object> object) const { base_, assembler_->IntPtrConstant(kSystemPointerSize), object); } -TNode<WordT> CodeStubArguments::AtIndexPtr( - Node* index, CodeStubAssembler::ParameterMode mode) const { - using Node = compiler::Node; - Node* negated_index = assembler_->IntPtrOrSmiSub( - assembler_->IntPtrOrSmiConstant(0, mode), index, mode); +TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const { + TNode<IntPtrT> negated_index = + assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index); TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex( - negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0); - return assembler_->IntPtrAdd(assembler_->UncheckedCast<IntPtrT>(base_), - offset); + negated_index, SYSTEM_POINTER_ELEMENTS, 0); + return assembler_->RawPtrAdd(base_, offset); } -TNode<Object> CodeStubArguments::AtIndex( - Node* index, CodeStubAssembler::ParameterMode mode) const { - DCHECK_EQ(argc_mode_, mode); - CSA_ASSERT(assembler_, - assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode)); +TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const { + CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength())); return assembler_->UncheckedCast<Object>( - assembler_->LoadFullTagged(AtIndexPtr(index, mode))); + assembler_->LoadFullTagged(AtIndexPtr(index))); } TNode<Object> CodeStubArguments::AtIndex(int index) const { @@ -13452,9 +13032,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue( CodeStubAssembler::Label argument_missing(assembler_), argument_done(assembler_, &result); - assembler_->GotoIf(assembler_->UintPtrOrSmiGreaterThanOrEqual( - assembler_->IntPtrOrSmiConstant(index, argc_mode_), - argc_, argc_mode_), + assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual( + assembler_->IntPtrConstant(index), argc_), &argument_missing); result = AtIndex(index); assembler_->Goto(&argument_done); @@ -13473,10 +13052,8 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue( CodeStubAssembler::Label argument_missing(assembler_), argument_done(assembler_, &result); - assembler_->GotoIf( - assembler_->UintPtrOrSmiGreaterThanOrEqual( - assembler_->IntPtrToParameter(index, argc_mode_), argc_, argc_mode_), - &argument_missing); + assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_), + &argument_missing); result = AtIndex(index); assembler_->Goto(&argument_done); @@ -13490,43 +13067,38 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue( void CodeStubArguments::ForEach( const CodeStubAssembler::VariableList& vars, - const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last, - CodeStubAssembler::ParameterMode mode) { + const CodeStubArguments::ForEachBodyFunction& body, TNode<IntPtrT> first, + TNode<IntPtrT> last) const { assembler_->Comment("CodeStubArguments::ForEach"); if (first == nullptr) { - first = assembler_->IntPtrOrSmiConstant(0, mode); + first = assembler_->IntPtrConstant(0); } if (last == nullptr) { - DCHECK_EQ(mode, argc_mode_); last = argc_; } - TNode<IntPtrT> start = assembler_->IntPtrSub( - assembler_->UncheckedCast<IntPtrT>(base_), - assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode)); - TNode<IntPtrT> end = assembler_->IntPtrSub( - assembler_->UncheckedCast<IntPtrT>(base_), - assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode)); - assembler_->BuildFastLoop( + TNode<RawPtrT> start = assembler_->RawPtrSub( + base_, + assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS)); + TNode<RawPtrT> end = assembler_->RawPtrSub( + base_, assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS)); + assembler_->BuildFastLoop<RawPtrT>( vars, start, end, - [this, &body](Node* current) { - Node* arg = assembler_->Load(MachineType::AnyTagged(), current); + [&](TNode<RawPtrT> current) { + TNode<Object> arg = assembler_->Load<Object>(current); body(arg); }, - -kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS, - CodeStubAssembler::IndexAdvanceMode::kPost); + -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost); } void CodeStubArguments::PopAndReturn(Node* value) { - Node* pop_count; + TNode<IntPtrT> pop_count; if (receiver_mode_ == ReceiverMode::kHasReceiver) { - pop_count = assembler_->IntPtrOrSmiAdd( - argc_, assembler_->IntPtrOrSmiConstant(1, argc_mode_), argc_mode_); + pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)); } else { pop_count = argc_; } - assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_), - value); + assembler_->PopAndReturn(pop_count, value); } TNode<BoolT> CodeStubAssembler::IsFastElementsKind( @@ -13642,21 +13214,15 @@ Node* CodeStubAssembler:: } TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) { - CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0))); - CSA_ASSERT(this, - SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count))); + CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::builtin_count))); - int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; - int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits; - TNode<WordT> table_index = - index_shift >= 0 - ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift) - : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift); - - return CAST( - Load(MachineType::TaggedPointer(), + TNode<IntPtrT> offset = + ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS); + + return CAST(BitcastWordToTagged( + Load(MachineType::Pointer(), ExternalConstant(ExternalReference::builtins_address(isolate())), - table_index)); + offset))); } TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode( @@ -13765,11 +13331,9 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode( return sfi_code.value(); } -Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, - Node* shared_info, - Node* context) { - CSA_SLOW_ASSERT(this, IsMap(map)); - +TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext( + TNode<Map> map, TNode<SharedFunctionInfo> shared_info, + TNode<Context> context) { TNode<Code> const code = GetSharedFunctionInfoCode(shared_info); // TODO(ishell): All the callers of this function pass map loaded from @@ -13790,7 +13354,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code); - return fun; + return CAST(fun); } void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, @@ -13839,8 +13403,9 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, } } -Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty, - Label* if_runtime) { +TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<HeapObject> receiver, + Label* if_empty, + Label* if_runtime) { Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred); TNode<Map> receiver_map = LoadMap(receiver); @@ -13855,7 +13420,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty, { // Avoid runtime-call for empty dictionary receivers. GotoIfNot(IsDictionaryMap(receiver_map), if_runtime); - TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver)); + TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver))); TNode<Smi> length = GetNumberOfElements(properties); GotoIfNot(TaggedEqual(length, SmiConstant(0)), if_runtime); // Check that there are no elements on the {receiver} and its prototype @@ -13881,8 +13446,7 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args, TorqueStructArguments CodeStubAssembler::GetFrameArguments( TNode<RawPtrT> frame, TNode<IntPtrT> argc) { - return CodeStubArguments(this, argc, frame, INTPTR_PARAMETERS) - .GetTorqueArguments(); + return CodeStubArguments(this, argc, frame).GetTorqueArguments(); } void CodeStubAssembler::Print(const char* s) { @@ -13976,9 +13540,8 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context, // TODO(delphick): Consider using // AllocateUninitializedJSArrayWithElements to avoid initializing an // array and then writing over it. - array = - AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, SmiConstant(0), - nullptr, ParameterMode::SMI_PARAMETERS); + array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, + SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS); Goto(&done); BIND(&done); diff --git a/chromium/v8/src/codegen/code-stub-assembler.h b/chromium/v8/src/codegen/code-stub-assembler.h index 9884d04e66e..eee3e7a376a 100644 --- a/chromium/v8/src/codegen/code-stub-assembler.h +++ b/chromium/v8/src/codegen/code-stub-assembler.h @@ -97,6 +97,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(iterator_symbol, iterator_symbol, IteratorSymbol) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ + V(match_symbol, match_symbol, MatchSymbol) \ V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ V(MetaMap, meta_map, MetaMap) \ V(MinusZeroValue, minus_zero_value, MinusZero) \ @@ -114,7 +115,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(object_to_string, object_to_string, ObjectToString) \ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \ - V(premonomorphic_symbol, premonomorphic_symbol, PremonomorphicSymbol) \ V(PreparseDataMap, preparse_data_map, PreparseDataMap) \ V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \ V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \ @@ -157,11 +157,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) #ifdef DEBUG -#define CSA_CHECK(csa, x) \ - (csa)->Check( \ - [&]() -> compiler::Node* { \ - return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \ - }, \ +#define CSA_CHECK(csa, x) \ + (csa)->Check( \ + [&]() -> compiler::Node* { \ + return implicit_cast<SloppyTNode<Word32T>>(x); \ + }, \ #x, __FILE__, __LINE__) #else #define CSA_CHECK(csa, x) (csa)->FastCheck(x) @@ -255,10 +255,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler public TorqueGeneratedExportedMacrosAssembler { public: using Node = compiler::Node; - template <class T> - using TNode = compiler::TNode<T>; - template <class T> - using SloppyTNode = compiler::SloppyTNode<T>; template <typename T> using LazyNode = std::function<TNode<T>()>; @@ -303,11 +299,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return ParameterRepresentation(OptimalParameterMode()); } + TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); } + TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; } + // TODO(v8:9708): remove once all uses are ported. TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) { if (mode == SMI_PARAMETERS) value = SmiUntag(value); return UncheckedCast<IntPtrT>(value); } + template <typename TIndex> + TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value); + Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) { if (mode == SMI_PARAMETERS) return SmiTag(value); return value; @@ -364,6 +366,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #error Unknown architecture. #endif + // Pointer compression specific. Returns true if the upper 32 bits of a Smi + // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi + // can be directly used as an index in element offset computation. + TNode<BoolT> IsValidSmiIndex(TNode<Smi> smi); + + // Pointer compression specific. Ensures that the upper 32 bits of a Smi + // contain the sign of a lower 32 bits so that the Smi can be directly used + // as an index in element offset computation. + TNode<Smi> NormalizeSmiIndex(TNode<Smi> smi_index); + TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) { GotoIf(TaggedIsNotSmi(value), fail); return UncheckedCast<Smi>(value); @@ -443,18 +455,52 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* MatchesParameterMode(Node* value, ParameterMode mode); -#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ - Node* OpName(Node* a, Node* b, ParameterMode mode) { \ - if (mode == SMI_PARAMETERS) { \ - return SmiOpName(CAST(a), CAST(b)); \ - } else { \ - DCHECK_EQ(INTPTR_PARAMETERS, mode); \ - return IntPtrOpName(a, b); \ - } \ - } +#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ + /* TODO(v8:9708): remove once all uses are ported. */ \ + Node* OpName(Node* a, Node* b, ParameterMode mode) { \ + if (mode == SMI_PARAMETERS) { \ + return SmiOpName(CAST(a), CAST(b)); \ + } else { \ + DCHECK_EQ(INTPTR_PARAMETERS, mode); \ + return IntPtrOpName(UncheckedCast<IntPtrT>(a), \ + UncheckedCast<IntPtrT>(b)); \ + } \ + } \ + TNode<Smi> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \ + TNode<IntPtrT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \ + return IntPtrOpName(a, b); \ + } \ + TNode<RawPtrT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \ + return ReinterpretCast<RawPtrT>(IntPtrOpName( \ + ReinterpretCast<IntPtrT>(a), ReinterpretCast<IntPtrT>(b))); \ + } + // TODO(v8:9708): Define BInt operations once all uses are ported. PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin) PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) +#undef PARAMETER_BINOP + +#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ + /* TODO(v8:9708): remove once all uses are ported. */ \ + TNode<BoolT> OpName(Node* a, Node* b, ParameterMode mode) { \ + if (mode == SMI_PARAMETERS) { \ + return SmiOpName(CAST(a), CAST(b)); \ + } else { \ + DCHECK_EQ(INTPTR_PARAMETERS, mode); \ + return IntPtrOpName(UncheckedCast<IntPtrT>(a), \ + UncheckedCast<IntPtrT>(b)); \ + } \ + } \ + TNode<BoolT> OpName(TNode<Smi> a, TNode<Smi> b) { return SmiOpName(a, b); } \ + TNode<BoolT> OpName(TNode<IntPtrT> a, TNode<IntPtrT> b) { \ + return IntPtrOpName(a, b); \ + } \ + TNode<BoolT> OpName(TNode<RawPtrT> a, TNode<RawPtrT> b) { \ + return IntPtrOpName(a, b); \ + } + // TODO(v8:9708): Define BInt operations once all uses are ported. + PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual) + PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual) PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan) PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, SmiLessThanOrEqual) @@ -473,31 +519,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler intptr_t ConstexprWordNot(intptr_t a) { return ~a; } uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; } - TNode<BoolT> TaggedEqual(TNode<UnionT<Object, MaybeObject>> a, - TNode<UnionT<Object, MaybeObject>> b) { - // In pointer-compressed architectures, the instruction selector will narrow - // this comparison to a 32-bit one. + TNode<BoolT> TaggedEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) { +#ifdef V8_COMPRESS_POINTERS + return Word32Equal(ChangeTaggedToCompressed(a), + ChangeTaggedToCompressed(b)); +#else return WordEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b)); +#endif } - TNode<BoolT> TaggedNotEqual(TNode<UnionT<Object, MaybeObject>> a, - TNode<UnionT<Object, MaybeObject>> b) { - // In pointer-compressed architectures, the instruction selector will narrow - // this comparison to a 32-bit one. - return WordNotEqual(ReinterpretCast<WordT>(a), ReinterpretCast<WordT>(b)); + TNode<BoolT> TaggedNotEqual(TNode<AnyTaggedT> a, TNode<AnyTaggedT> b) { + return Word32BinaryNot(TaggedEqual(a, b)); } TNode<Object> NoContextConstant(); #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ + TNode<std::remove_pointer<std::remove_reference<decltype( \ std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ + TNode<std::remove_pointer<std::remove_reference<decltype( \ std::declval<Heap>().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) @@ -511,11 +556,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BInt> BIntConstant(int value); + template <typename TIndex> + TNode<TIndex> IntPtrOrSmiConstant(int value); + // TODO(v8:9708): remove once all uses are ported. Node* IntPtrOrSmiConstant(int value, ParameterMode mode); - TNode<BoolT> IntPtrOrSmiEqual(Node* left, Node* right, ParameterMode mode); - TNode<BoolT> IntPtrOrSmiNotEqual(Node* left, Node* right, ParameterMode mode); + bool IsIntPtrOrSmiConstantZero(TNode<Smi> test); + bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test); + // TODO(v8:9708): remove once all uses are ported. bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); + bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode); @@ -557,25 +607,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value); TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); } TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value); + TNode<Smi> SmiFromUint32(TNode<Uint32T> value); TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); } TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value); // Smi operations. -#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \ - if (SmiValuesAre32Bits()) { \ - return BitcastWordToTaggedSigned(IntPtrOpName( \ - BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ - Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \ - } \ +#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \ + if (SmiValuesAre32Bits()) { \ + return BitcastWordToTaggedSigned( \ + IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ + BitcastTaggedToWordForTagAndSmiBits(b))); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \ + } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) @@ -595,38 +647,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Smi> SmiShl(TNode<Smi> a, int shift) { return BitcastWordToTaggedSigned( - WordShl(BitcastTaggedSignedToWord(a), shift)); + WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift)); } TNode<Smi> SmiShr(TNode<Smi> a, int shift) { if (kTaggedSize == kInt64Size) { return BitcastWordToTaggedSigned( - WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // leaking into the sign bit of the smi. return BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Shr( - TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + shift)), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } } TNode<Smi> SmiSar(TNode<Smi> a, int shift) { if (kTaggedSize == kInt64Size) { return BitcastWordToTaggedSigned( - WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // changing the sign bit of the smi. return BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Sar( - TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + shift)), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } } @@ -648,21 +702,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } } -#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \ - if (kTaggedSize == kInt64Size) { \ - return IntPtrOpName(BitcastTaggedSignedToWord(a), \ - BitcastTaggedSignedToWord(b)); \ - } else { \ - DCHECK_EQ(kTaggedSize, kInt32Size); \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \ - } \ +#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \ + if (kTaggedSize == kInt64Size) { \ + return IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ + BitcastTaggedToWordForTagAndSmiBits(b)); \ + } else { \ + DCHECK_EQ(kTaggedSize, kInt32Size); \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return Int32OpName( \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \ + } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) @@ -856,9 +911,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value); // Check a value for smi-ness - TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a); TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a); - TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a); + TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a) { + return TaggedIsSmi(UncheckedCast<MaybeObject>(a)); + } + TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a); + TNode<BoolT> TaggedIsNotSmi(SloppyTNode<Object> a) { + return TaggedIsNotSmi(UncheckedCast<MaybeObject>(a)); + } // Check that the value is a non-negative smi. TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a); // Check that a word has a word-aligned address. @@ -918,9 +978,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); - // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect. - void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true); - // Load value from current parent frame by given offset in bytes. Node* LoadFromParentFrame(int offset, MachineType type = MachineType::AnyTagged()); @@ -1060,9 +1117,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map); void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow); - // Load the properties backing store of a JSObject. - TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object); - TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object); + // Load the properties backing store of a JSReceiver. + TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSReceiver> object); + TNode<HeapObject> LoadFastProperties(SloppyTNode<JSReceiver> object); // Load the elements backing store of a JSObject. TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) { return LoadJSObjectElements(object); @@ -1148,10 +1205,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string); // Load length field of a String object as uint32_t value. TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string); - // Loads a pointer to the sequential String char array. - Node* PointerToSeqStringData(Node* seq_string); // Load value field of a JSPrimitiveWrapper object. - Node* LoadJSPrimitiveWrapperValue(Node* object); + TNode<Object> LoadJSPrimitiveWrapperValue(TNode<JSPrimitiveWrapper> object); // Figures out whether the value of maybe_object is: // - a SMI (jump to "if_smi", "extracted" will be the SMI value) @@ -1175,7 +1230,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BoolT> IsWeakOrCleared(TNode<MaybeObject> value); TNode<BoolT> IsCleared(TNode<MaybeObject> value); - TNode<BoolT> IsNotCleared(TNode<MaybeObject> value); + TNode<BoolT> IsNotCleared(TNode<MaybeObject> value) { + return Word32BinaryNot(IsCleared(value)); + } // Removes the weak bit + asserts it was set. TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value); @@ -1183,12 +1240,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<HeapObject> GetHeapObjectAssumeWeak(TNode<MaybeObject> value, Label* if_cleared); - TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> object, - TNode<Object> value); - TNode<BoolT> IsNotWeakReferenceTo(TNode<MaybeObject> object, - TNode<Object> value); - TNode<BoolT> IsStrongReferenceTo(TNode<MaybeObject> object, - TNode<Object> value); + // Checks if |maybe_object| is a weak reference to given |heap_object|. + // Works for both any tagged |maybe_object| values. + TNode<BoolT> IsWeakReferenceTo(TNode<MaybeObject> maybe_object, + TNode<HeapObject> heap_object); + // Returns true if the |object| is a HeapObject and |maybe_object| is a weak + // reference to |object|. + // The |maybe_object| must not be a Smi. + TNode<BoolT> IsWeakReferenceToObject(TNode<MaybeObject> maybe_object, + TNode<Object> object); TNode<MaybeObject> MakeWeak(TNode<HeapObject> value); @@ -1341,9 +1401,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole); // Load a feedback slot from a FeedbackVector. + template <typename TIndex> TNode<MaybeObject> LoadFeedbackVectorSlot( - Node* object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot, + int additional_offset = 0); TNode<IntPtrT> LoadFeedbackVectorLength(TNode<FeedbackVector>); TNode<Float64T> LoadDoubleWithHoleCheck(TNode<FixedDoubleArray> array, @@ -1383,13 +1444,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high); TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high); - void StoreJSTypedArrayElementFromTagged(TNode<Context> context, - TNode<JSTypedArray> typed_array, - TNode<Smi> index_node, - TNode<Object> value, - ElementsKind elements_kind); - // Context manipulation + TNode<BoolT> LoadContextHasExtensionField(SloppyTNode<Context> context); TNode<Object> LoadContextElement(SloppyTNode<Context> context, int slot_index); TNode<Object> LoadContextElement(SloppyTNode<Context> context, @@ -1608,10 +1664,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } void StoreFeedbackVectorSlot( - Node* object, Node* index, Node* value, + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<AnyTaggedT> value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + int additional_offset = 0); void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout); @@ -1633,8 +1689,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value, Label* bailout); - void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address, - Node* value); + void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address, + TNode<IntPtrT> end_address, + TNode<Object> value); Node* AllocateCellWithValue(Node* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); @@ -1642,7 +1699,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER); } - Node* LoadCellValue(Node* cell); + TNode<Object> LoadCellValue(Node* cell); void StoreCellValue(Node* cell, Node* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); @@ -1698,11 +1755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<String> parent, TNode<Smi> offset); - // Allocate an appropriate one- or two-byte ConsString with the first and - // second parts specified by |left| and |right|. - TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left, - TNode<String> right); - TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for); TNode<NameDictionary> AllocateNameDictionary( TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone); @@ -1714,26 +1766,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler template <typename CollectionType> Node* AllocateOrderedHashTable(); - // Builds code that finds OrderedHashTable entry for a key with hash code - // {hash} with using the comparison code generated by {key_compare}. The code - // jumps to {entry_found} if the key is found, or to {not_found} if the key - // was not found. In the {entry_found} branch, the variable - // entry_start_position will be bound to the index of the entry (relative to - // OrderedHashTable::kHashTableStartIndex). - // - // The {CollectionType} template parameter stands for the particular instance - // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. - template <typename CollectionType> - void FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function<void(TNode<Object>, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); - template <typename CollectionType> TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity); Node* AllocateStruct(Node* map, AllocationFlags flags = kNone); - void InitializeStructBody(Node* object, Node* map, Node* size, + void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size, int start_offset = Struct::kHeaderSize); TNode<JSObject> AllocateJSObjectFromMap( @@ -1742,14 +1779,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectFromMap( - Node* object, Node* map, Node* instance_size, Node* properties = nullptr, + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr, Node* elements = nullptr, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); - void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map, - Node* instance_size); + void InitializeJSObjectBodyWithSlackTracking( + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size); void InitializeJSObjectBodyNoSlackTracking( - Node* object, Node* map, Node* instance_size, + SloppyTNode<HeapObject> object, SloppyTNode<Map> map, + SloppyTNode<IntPtrT> instance_size, int start_offset = JSObject::kHeaderSize); TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity, @@ -1762,7 +1802,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::pair<TNode<JSArray>, TNode<FixedArrayBase>> AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode<Map> array_map, TNode<Smi> length, - Node* allocation_site, Node* capacity, + TNode<AllocationSite> allocation_site, Node* capacity, ParameterMode capacity_mode = INTPTR_PARAMETERS, AllocationFlags allocation_flags = kNone, int array_header_size = JSArray::kSize); @@ -1771,20 +1811,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // The ParameterMode argument is only used for the capacity parameter. TNode<JSArray> AllocateJSArray( ElementsKind kind, TNode<Map> array_map, Node* capacity, - TNode<Smi> length, Node* allocation_site = nullptr, + TNode<Smi> length, TNode<AllocationSite> allocation_site = {}, ParameterMode capacity_mode = INTPTR_PARAMETERS, AllocationFlags allocation_flags = kNone); TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity, TNode<Smi> length) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, + return AllocateJSArray(kind, array_map, capacity, length, {}, SMI_PARAMETERS); } TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity, TNode<Smi> length, AllocationFlags allocation_flags = kNone) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, + return AllocateJSArray(kind, array_map, capacity, length, {}, INTPTR_PARAMETERS, allocation_flags); } @@ -1792,7 +1832,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<JSArray> AllocateJSArray(TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length, - Node* allocation_site = nullptr, + TNode<AllocationSite> allocation_site = {}, int array_header_size = JSArray::kSize); enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; @@ -1806,15 +1846,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // If |convert_holes| is set kDontConvert, holes are also copied to the // resulting array, who will have the same elements kind as |array|. The // function generates significantly less code in this case. - Node* CloneFastJSArray( - Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS, - Node* allocation_site = nullptr, + TNode<JSArray> CloneFastJSArray( + TNode<Context> context, TNode<JSArray> array, + ParameterMode mode = INTPTR_PARAMETERS, + TNode<AllocationSite> allocation_site = {}, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); - Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count, + Node* ExtractFastJSArray(TNode<Context> context, TNode<JSArray> array, + Node* begin, Node* count, ParameterMode mode = INTPTR_PARAMETERS, Node* capacity = nullptr, - Node* allocation_site = nullptr); + TNode<AllocationSite> allocation_site = {}); TNode<FixedArrayBase> AllocateFixedArray( ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, @@ -1828,6 +1870,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler fixed_array_map); } + TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver, + Label* if_bailout); + TNode<Object> GetConstructor(TNode<Map> map); + TNode<Map> GetStructMap(InstanceType instance_type); TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) { @@ -1879,10 +1925,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Object> object, IterationKind mode); + // TODO(v8:9722): Return type should be JSIteratorResult TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context, SloppyTNode<Object> value, SloppyTNode<Oddball> done); - Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); + + // TODO(v8:9722): Return type should be JSIteratorResult + TNode<JSObject> AllocateJSIteratorResultForEntry(TNode<Context> context, + TNode<Object> key, + SloppyTNode<Object> value); TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context, TNode<Object> originalArray, @@ -1904,6 +1955,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum class DestroySource { kNo, kYes }; + // Collect the callable |maybe_target| feedback for either a CALL_IC or + // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|. + void CollectCallableFeedback(TNode<Object> maybe_target, + TNode<Context> context, + TNode<FeedbackVector> feedback_vector, + TNode<UintPtrT> slot_id); + + // Collect CALL_IC feedback for |maybe_target| function in the + // |feedback_vector| at |slot_id|, and the call counts in + // the |feedback_vector| at |slot_id+1|. + void CollectCallFeedback(TNode<Object> maybe_target, TNode<Context> context, + TNode<HeapObject> maybe_feedback_vector, + TNode<UintPtrT> slot_id); + + // Increment the call count for a CALL_IC or construct call. + // The call count is located at feedback_vector[slot_id + 1]. + void IncrementCallCount(TNode<FeedbackVector> feedback_vector, + TNode<UintPtrT> slot_id); + // Specify DestroySource::kYes if {from_array} is being supplanted by // {to_array}. This offers a slight performance benefit by simply copying the // array word by word. The source may be destroyed at the end of this macro. @@ -2152,27 +2222,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // kAllFixedArrays, the generated code is more compact and efficient if the // caller can specify whether only FixedArrays or FixedDoubleArrays will be // passed as the |source| parameter. - Node* CloneFixedArray(Node* source, - ExtractFixedArrayFlags flags = - ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { + TNode<FixedArrayBase> CloneFixedArray( + TNode<FixedArrayBase> source, + ExtractFixedArrayFlags flags = + ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { ParameterMode mode = OptimalParameterMode(); return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr, nullptr, flags, mode); } - // Copies |character_count| elements from |from_string| to |to_string| - // starting at the |from_index|'th character. |from_string| and |to_string| - // can either be one-byte strings or two-byte strings, although if - // |from_string| is two-byte, then |to_string| must be two-byte. - // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= - // |from_index| <= |from_index| + |character_count| <= from_string.length and - // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. - void CopyStringCharacters(Node* from_string, Node* to_string, - TNode<IntPtrT> from_index, TNode<IntPtrT> to_index, - TNode<IntPtrT> character_count, - String::Encoding from_encoding, - String::Encoding to_encoding); - // Loads an element from |array| of |from_kind| elements by given |offset| // (NOTE: not index!), does a hole check if |if_hole| is provided and // converts the value so that it becomes ready for storing to array of @@ -2194,21 +2252,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Label* bailout); + TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements, + ElementsKind kind, Node* key, + Label* bailout); // Tries to grow the |capacity|-length |elements| array of given |object| // to store the |key| or bails out if the growing gap is too big. Returns // new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Node* capacity, ParameterMode mode, - Label* bailout); + TNode<FixedArrayBase> TryGrowElementsCapacity(Node* object, Node* elements, + ElementsKind kind, Node* key, + Node* capacity, + ParameterMode mode, + Label* bailout); // Grows elements capacity of given object. Returns new elements. - Node* GrowElementsCapacity(Node* object, Node* elements, - ElementsKind from_kind, ElementsKind to_kind, - Node* capacity, Node* new_capacity, - ParameterMode mode, Label* bailout); + TNode<FixedArrayBase> GrowElementsCapacity(Node* object, Node* elements, + ElementsKind from_kind, + ElementsKind to_kind, + Node* capacity, Node* new_capacity, + ParameterMode mode, + Label* bailout); // Given a need to grow by |growth|, allocate an appropriate new capacity // if necessary, and return a new elements FixedArray object. Label |bailout| @@ -2223,25 +2286,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* base_allocation_size, Node* allocation_site); - Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber); - Node* TruncateTaggedToFloat64(Node* context, Node* value); - Node* TruncateTaggedToWord32(Node* context, Node* value); - void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number, - Variable* var_word32, Label* if_bigint, - Variable* var_bigint); - void TaggedToWord32OrBigIntWithFeedback( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback); + TNode<Float64T> TryTaggedToFloat64(TNode<Object> value, + Label* if_valueisnotnumber); + TNode<Float64T> TruncateTaggedToFloat64(SloppyTNode<Context> context, + SloppyTNode<Object> value); + TNode<Word32T> TruncateTaggedToWord32(SloppyTNode<Context> context, + SloppyTNode<Object> value); + void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value, + Label* if_number, TVariable<Word32T>* var_word32, + Label* if_bigint, + TVariable<Object>* var_maybe_bigint); + void TaggedToWord32OrBigIntWithFeedback(TNode<Context> context, + TNode<Object> value, Label* if_number, + TVariable<Word32T>* var_word32, + Label* if_bigint, + TVariable<Object>* var_maybe_bigint, + TVariable<Smi>* var_feedback); // Truncate the floating point value of a HeapNumber to an Int32. TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object); // Conversions. - void TryHeapNumberToSmi(TNode<HeapNumber> number, - TVariable<Smi>& output, // NOLINT(runtime/references) + void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>* output, Label* if_smi); - void TryFloat64ToSmi(TNode<Float64T> number, - TVariable<Smi>& output, // NOLINT(runtime/references) + void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output, Label* if_smi); TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value); TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value); @@ -2377,7 +2445,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type); TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map); TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object); - TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object); + TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object); TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type); TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map); TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object); @@ -2388,6 +2456,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object); TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object); TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object); + TNode<BoolT> IsJSRegExpStringIterator(SloppyTNode<HeapObject> object); TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type); TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map); TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object); @@ -2395,6 +2464,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type); TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map); TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object); + TNode<BoolT> IsJSGeneratorMap(TNode<Map> map); TNode<BoolT> IsJSPrimitiveWrapperInstanceType( SloppyTNode<Int32T> instance_type); TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map); @@ -2537,47 +2607,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Return the single character string with only {code}. TNode<String> StringFromSingleCharCode(TNode<Int32T> code); - // Return a new string object which holds a substring containing the range - // [from,to[ of string. - TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from, - TNode<IntPtrT> to); - - // Return a new string object produced by concatenating |first| with |second|. - TNode<String> StringAdd(Node* context, TNode<String> first, - TNode<String> second); - - // Check if |string| is an indirect (thin or flat cons) string type that can - // be dereferenced by DerefIndirectString. - void BranchIfCanDerefIndirectString(TNode<String> string, - TNode<Int32T> instance_type, - Label* can_deref, Label* cannot_deref); - // Unpack an indirect (thin or flat cons) string type. - void DerefIndirectString(TVariable<String>* var_string, - TNode<Int32T> instance_type); - // Check if |var_string| has an indirect (thin or flat cons) string type, - // and unpack it if so. - void MaybeDerefIndirectString(TVariable<String>* var_string, - TNode<Int32T> instance_type, Label* did_deref, - Label* cannot_deref); - // Check if |var_left| or |var_right| has an indirect (thin or flat cons) - // string type, and unpack it/them if so. Fall through if nothing was done. - void MaybeDerefIndirectStrings(TVariable<String>* var_left, - TNode<Int32T> left_instance_type, - TVariable<String>* var_right, - TNode<Int32T> right_instance_type, - Label* did_something); - TNode<String> DerefIndirectString(TNode<String> string, - TNode<Int32T> instance_type, - Label* cannot_deref); - - TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint); - // Type conversion helpers. enum class BigIntHandling { kConvertToNumber, kThrow }; // Convert a String to a Number. TNode<Number> StringToNumber(TNode<String> input); // Convert a Number to a String. TNode<String> NumberToString(TNode<Number> input); + TNode<String> NumberToString(TNode<Number> input, Label* bailout); + // Convert a Non-Number object to a Number. TNode<Number> NonNumberToNumber( SloppyTNode<Context> context, SloppyTNode<HeapObject> input, @@ -2715,6 +2752,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return Word32Equal(Word32And(word32, const_mask), const_mask); } + // Returns true if the bit field |BitField| in |word32| is equal to a given. + // constant |value|. Avoids a shift compared to using DecodeWord32. + template <typename BitField> + TNode<BoolT> IsEqualInWord32(TNode<Word32T> word32, + typename BitField::FieldType value) { + TNode<Word32T> masked_word32 = + Word32And(word32, Int32Constant(BitField::kMask)); + return Word32Equal(masked_word32, Int32Constant(BitField::encode(value))); + } + // Returns true if any of the |T|'s bits in given |word| are set. template <typename T> TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) { @@ -2730,9 +2777,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Smi-encoding of the mask is performed implicitly! TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) { intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask)); - return WordNotEqual( - WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), - IntPtrConstant(0)); + return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi), + IntPtrConstant(mask_word)), + IntPtrConstant(0)); } // Returns true if all of the |T|'s bits in given |word32| are clear. @@ -2762,11 +2809,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void IncrementCounter(StatsCounter* counter, int delta); void DecrementCounter(StatsCounter* counter, int delta); - void Increment(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS); - void Decrement(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS) { - Increment(variable, -value, mode); + template <typename TIndex> + void Increment(TVariable<TIndex>* variable, int value = 1); + + template <typename TIndex> + void Decrement(TVariable<TIndex>* variable, int value = 1) { + Increment(variable, -value); } // Generates "if (false) goto label" code. Useful for marking a label as @@ -2780,8 +2828,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken // even if |key| is an array index. |if_keyisunique| will never // be taken for array indices. - void TryToName(Node* key, Label* if_keyisindex, Variable* var_index, - Label* if_keyisunique, Variable* var_unique, Label* if_bailout, + void TryToName(SloppyTNode<Object> key, Label* if_keyisindex, + TVariable<IntPtrT>* var_index, Label* if_keyisunique, + TVariable<Name>* var_unique, Label* if_bailout, Label* if_notinternalized = nullptr); // Performs a hash computation and string table lookup for the given string, @@ -2793,8 +2842,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // - |if_not_internalized| if the string is not in the string table (but // does not add it). // - |if_bailout| for unsupported cases (e.g. uncachable array index). - void TryInternalizeString(Node* string, Label* if_index, Variable* var_index, - Label* if_internalized, Variable* var_internalized, + void TryInternalizeString(SloppyTNode<String> string, Label* if_index, + TVariable<IntPtrT>* var_index, + Label* if_internalized, + TVariable<Name>* var_internalized, Label* if_not_internalized, Label* if_bailout); // Calculates array index for given dictionary entry and entry field. @@ -2938,10 +2989,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Object> BasicLoadNumberDictionaryElement( TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index, Label* not_data, Label* if_hole); - void BasicStoreNumberDictionaryElement(TNode<NumberDictionary> dictionary, - TNode<IntPtrT> intptr_index, - TNode<Object> value, Label* not_data, - Label* if_hole, Label* read_only); template <class Dictionary> void FindInsertionEntry(TNode<Dictionary> dictionary, TNode<Name> key, @@ -3053,7 +3100,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // // Note: this code does not check if the global dictionary points to deleted // entry! This has to be done by the caller. - void TryLookupProperty(SloppyTNode<JSObject> object, SloppyTNode<Map> map, + void TryLookupProperty(SloppyTNode<JSReceiver> object, SloppyTNode<Map> map, SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name, Label* if_found_fast, Label* if_found_dict, Label* if_found_global, @@ -3113,10 +3160,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Returns true if {object} has {prototype} somewhere in it's prototype // chain, otherwise false is returned. Might cause arbitrary side effects // due to [[GetPrototypeOf]] invocations. - Node* HasInPrototypeChain(Node* context, Node* object, - SloppyTNode<Object> prototype); + TNode<Oddball> HasInPrototypeChain(TNode<Context> context, + TNode<HeapObject> object, + TNode<Object> prototype); // ES6 section 7.3.19 OrdinaryHasInstance (C, O) - Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object); + TNode<Oddball> OrdinaryHasInstance(TNode<Context> context, + TNode<Object> callable, + TNode<Object> object); // Load type feedback vector from the stub caller's frame. TNode<FeedbackVector> LoadFeedbackVectorForStub(); @@ -3137,12 +3187,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SloppyTNode<JSFunction> closure); // Update the type feedback vector. - void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id); + void UpdateFeedback(TNode<Smi> feedback, + TNode<HeapObject> maybe_feedback_vector, + TNode<UintPtrT> slot_id); // Report that there was a feedback update, performing any tasks that should // be done after a feedback update. - void ReportFeedbackUpdate(SloppyTNode<FeedbackVector> feedback_vector, - SloppyTNode<IntPtrT> slot_id, const char* reason); + void ReportFeedbackUpdate(TNode<FeedbackVector> feedback_vector, + SloppyTNode<UintPtrT> slot_id, const char* reason); // Combine the new feedback with the existing_feedback. Do nothing if // existing_feedback is nullptr. @@ -3185,8 +3237,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode<Context> LoadScriptContext(TNode<Context> context, TNode<IntPtrT> context_index); - Node* Int32ToUint8Clamped(Node* int32_value); - Node* Float64ToUint8Clamped(Node* float64_value); + TNode<Uint8T> Int32ToUint8Clamped(TNode<Int32T> int32_value); + TNode<Uint8T> Float64ToUint8Clamped(TNode<Float64T> float64_value); Node* PrepareValueForWriteToTypedArray(TNode<Object> input, ElementsKind elements_kind, @@ -3229,13 +3281,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Store a weak in-place reference into the FeedbackVector. TNode<MaybeObject> StoreWeakReferenceInFeedbackVector( - SloppyTNode<FeedbackVector> feedback_vector, Node* slot, - SloppyTNode<HeapObject> value, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, + TNode<HeapObject> value, int additional_offset = 0); // Create a new AllocationSite and install it into a feedback vector. TNode<AllocationSite> CreateAllocationSiteInFeedbackVector( - SloppyTNode<FeedbackVector> feedback_vector, TNode<Smi> slot); + TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot); // TODO(ishell, cbruni): Change to HasBoilerplate. TNode<BoolT> NotHasBoilerplate(TNode<Object> maybe_literal_site); @@ -3245,19 +3296,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum class IndexAdvanceMode { kPre, kPost }; - using FastLoopBody = std::function<void(Node* index)>; + template <typename TIndex> + using FastLoopBody = std::function<void(TNode<TIndex> index)>; - Node* BuildFastLoop(const VariableList& var_list, Node* start_index, - Node* end_index, const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); + template <typename TIndex> + TNode<TIndex> BuildFastLoop( + const VariableList& var_list, TNode<TIndex> start_index, + TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); - Node* BuildFastLoop(Node* start_index, Node* end_index, - const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { + template <typename TIndex> + TNode<TIndex> BuildFastLoop( + TNode<TIndex> start_index, TNode<TIndex> end_index, + const FastLoopBody<TIndex>& body, int increment, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, - increment, parameter_mode, advance_mode); + increment, advance_mode); } enum class ForEachDirection { kForward, kReverse }; @@ -3304,13 +3358,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* doesnt_fit, int base_size, ParameterMode mode); - void InitializeFieldsWithRoot(Node* object, Node* start_offset, - Node* end_offset, RootIndex root); + void InitializeFieldsWithRoot(TNode<HeapObject> object, + TNode<IntPtrT> start_offset, + TNode<IntPtrT> end_offset, RootIndex root); - Node* RelationalComparison(Operation op, SloppyTNode<Object> left, - SloppyTNode<Object> right, - SloppyTNode<Context> context, - Variable* var_type_feedback = nullptr); + TNode<Oddball> RelationalComparison( + Operation op, TNode<Object> left, TNode<Object> right, + TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr); void BranchIfNumberRelationalComparison(Operation op, SloppyTNode<Number> left, @@ -3360,12 +3414,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false); - Node* Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs, - SloppyTNode<Context> context, - Variable* var_type_feedback = nullptr); + TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs, + SloppyTNode<Context> context, + TVariable<Smi>* var_type_feedback = nullptr); TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs, - Variable* var_type_feedback = nullptr); + TVariable<Smi>* var_type_feedback = nullptr); // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero @@ -3395,16 +3449,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler HasPropertyLookupMode::kHasProperty); } - Node* Typeof(Node* value); + TNode<String> Typeof(SloppyTNode<Object> value); - TNode<Object> GetSuperConstructor(SloppyTNode<Context> context, - SloppyTNode<JSFunction> active_function); + TNode<Object> GetSuperConstructor(TNode<Context> context, + TNode<JSFunction> active_function); TNode<JSReceiver> SpeciesConstructor( SloppyTNode<Context> context, SloppyTNode<Object> object, SloppyTNode<JSReceiver> default_constructor); - Node* InstanceOf(Node* object, Node* callable, Node* context); + TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable, + TNode<Context> context); // Debug helpers Node* IsDebugActive(); @@ -3431,8 +3486,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // JSTypedArray helpers TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array); - TNode<RawPtrT> LoadJSTypedArrayBackingStore(TNode<JSTypedArray> typed_array); + TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array); + template <typename TIndex> + TNode<IntPtrT> ElementOffsetFromIndex(TNode<TIndex> index, ElementsKind kind, + int base_size = 0); + // TODO(v8:9708): remove once all uses are ported. TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind, ParameterMode mode, int base_size = 0); @@ -3451,8 +3510,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SloppyTNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy = nullptr); - Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info, - Node* context); + TNode<JSFunction> AllocateFunctionWithMapAndContext( + TNode<Map> map, TNode<SharedFunctionInfo> shared_info, + TNode<Context> context); // Promise helpers Node* IsPromiseHookEnabled(); @@ -3463,7 +3523,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // for..in helpers void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, Label* if_slow); - Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime); + TNode<Map> CheckEnumCache(TNode<HeapObject> receiver, Label* if_empty, + Label* if_runtime); TNode<Object> GetArgumentValue(TorqueStructArguments args, TNode<IntPtrT> index); @@ -3620,11 +3681,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* receiver, Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter); - TNode<IntPtrT> TryToIntptr(Node* key, Label* miss); - - void BranchIfPrototypesHaveNoElements(Node* receiver_map, - Label* definitely_no_elements, - Label* possibly_elements); + TNode<IntPtrT> TryToIntptr(SloppyTNode<Object> key, Label* miss); void InitializeFunctionContext(Node* native_context, Node* context, int slots); @@ -3655,13 +3712,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Allocate and return a JSArray of given total size in bytes with header // fields initialized. - TNode<JSArray> AllocateUninitializedJSArray(TNode<Map> array_map, - TNode<Smi> length, - Node* allocation_site, - TNode<IntPtrT> size_in_bytes); + TNode<JSArray> AllocateUninitializedJSArray( + TNode<Map> array_map, TNode<Smi> length, + TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes); TNode<BoolT> IsValidSmi(TNode<Smi> smi); - Node* SmiShiftBitsConstant(); + + TNode<IntPtrT> SmiShiftBitsConstant() { + return IntPtrConstant(kSmiShiftSize + kSmiTagSize); + } + TNode<Int32T> SmiShiftBitsConstant32() { + return Int32Constant(kSmiShiftSize + kSmiTagSize); + } // Emits keyed sloppy arguments load if the |value| is nullptr or store // otherwise. Returns either the loaded value or |value|. @@ -3689,10 +3751,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void GenerateEqual_Same(SloppyTNode<Object> value, Label* if_equal, Label* if_notequal, Variable* var_type_feedback = nullptr); - TNode<String> AllocAndCopyStringCharacters(Node* from, - Node* from_instance_type, - TNode<IntPtrT> from_index, - TNode<IntPtrT> character_count); static const int kElementLoopUnrollThreshold = 8; @@ -3705,11 +3763,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Variable* var_numeric, Variable* var_feedback); template <Object::Conversion conversion> - void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number, - Variable* var_word32, + void TaggedToWord32OrBigIntImpl(TNode<Context> context, TNode<Object> value, + Label* if_number, + TVariable<Word32T>* var_word32, Label* if_bigint = nullptr, - Variable* var_bigint = nullptr, - Variable* var_feedback = nullptr); + TVariable<Object>* var_maybe_bigint = nullptr, + TVariable<Smi>* var_feedback = nullptr); private: // Low-level accessors for Descriptor arrays. @@ -3727,36 +3786,48 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } }; +// template <typename TIndex> class V8_EXPORT_PRIVATE CodeStubArguments { public: using Node = compiler::Node; - template <class T> - using TNode = compiler::TNode<T>; - template <class T> - using SloppyTNode = compiler::SloppyTNode<T>; enum ReceiverMode { kHasReceiver, kNoReceiver }; - // |argc| is an intptr value which specifies the number of arguments passed - // to the builtin excluding the receiver. The arguments will include a - // receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, + // |argc| specifies the number of arguments passed to the builtin excluding + // the receiver. The arguments will include a receiver iff |receiver_mode| + // is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc, ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, argc, nullptr, - CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) { - } + : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {} + + CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc), + TNode<RawPtrT>(), receiver_mode) {} - // |argc| is either a smi or intptr depending on |param_mode|. The arguments - // include a receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp, - CodeStubAssembler::ParameterMode param_mode, + // TODO(v8:9708): Consider removing this variant + CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), + TNode<RawPtrT>(), receiver_mode) {} + + // |argc| specifies the number of arguments passed to the builtin excluding + // the receiver. The arguments will include a receiver iff |receiver_mode| + // is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc, + TNode<RawPtrT> fp, ReceiverMode receiver_mode = ReceiverMode::kHasReceiver); + CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc, + TNode<RawPtrT> fp, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp, + receiver_mode) {} + // Used by Torque to construct arguments based on a Torque-defined // struct of values. CodeStubArguments(CodeStubAssembler* assembler, TorqueStructArguments torque_arguments) : assembler_(assembler), - argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS), receiver_mode_(ReceiverMode::kHasReceiver), argc_(torque_arguments.length), base_(torque_arguments.base), @@ -3769,14 +3840,17 @@ class V8_EXPORT_PRIVATE CodeStubArguments { void SetReceiver(TNode<Object> object) const; // Computes address of the index'th argument. - TNode<WordT> AtIndexPtr(Node* index, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) const; + TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const; + TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const { + return AtIndexPtr(assembler_->ParameterToIntPtr(index)); + } // |index| is zero-based and does not include the receiver - TNode<Object> AtIndex(Node* index, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) const; + TNode<Object> AtIndex(TNode<IntPtrT> index) const; + // TODO(v8:9708): Consider removing this variant + TNode<Object> AtIndex(TNode<Smi> index) const { + return AtIndex(assembler_->ParameterToIntPtr(index)); + } TNode<Object> AtIndex(int index) const; @@ -3786,15 +3860,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments { TNode<Object> GetOptionalArgumentValue(int index, TNode<Object> default_value); - Node* GetLength(CodeStubAssembler::ParameterMode mode) const { - DCHECK_EQ(mode, argc_mode_); - return argc_; - } + TNode<IntPtrT> GetLength() const { return argc_; } TorqueStructArguments GetTorqueArguments() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return TorqueStructArguments{assembler_->UncheckedCast<RawPtrT>(fp_), base_, - assembler_->UncheckedCast<IntPtrT>(argc_)}; + return TorqueStructArguments{fp_, base_, argc_}; } TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) { @@ -3802,28 +3871,32 @@ class V8_EXPORT_PRIVATE CodeStubArguments { } TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index, TNode<Object> default_value); - TNode<IntPtrT> GetLength() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return assembler_->UncheckedCast<IntPtrT>(argc_); - } - using ForEachBodyFunction = std::function<void(Node* arg)>; + using ForEachBodyFunction = std::function<void(TNode<Object> arg)>; // Iteration doesn't include the receiver. |first| and |last| are zero-based. - void ForEach(const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) { + template <typename TIndex> + void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {}, + TNode<TIndex> last = {}) const { CodeStubAssembler::VariableList list(0, assembler_->zone()); ForEach(list, body, first, last); } // Iteration doesn't include the receiver. |first| and |last| are zero-based. void ForEach(const CodeStubAssembler::VariableList& vars, - const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS); + const ForEachBodyFunction& body, TNode<IntPtrT> first = {}, + TNode<IntPtrT> last = {}) const; + + void ForEach(const CodeStubAssembler::VariableList& vars, + const ForEachBodyFunction& body, TNode<Smi> first, + TNode<Smi> last = {}) const { + TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first); + TNode<IntPtrT> last_intptr; + if (last != nullptr) { + last_intptr = assembler_->ParameterToIntPtr(last); + } + return ForEach(vars, body, first_intptr, last_intptr); + } void PopAndReturn(Node* value); @@ -3831,11 +3904,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments { Node* GetArguments(); CodeStubAssembler* assembler_; - CodeStubAssembler::ParameterMode argc_mode_; ReceiverMode receiver_mode_; - Node* argc_; + TNode<IntPtrT> argc_; TNode<RawPtrT> base_; - Node* fp_; + TNode<RawPtrT> fp_; }; class ToDirectStringAssembler : public CodeStubAssembler { diff --git a/chromium/v8/src/codegen/compilation-cache.cc b/chromium/v8/src/codegen/compilation-cache.cc index 6e9613005e7..ef3d83a06eb 100644 --- a/chromium/v8/src/codegen/compilation-cache.cc +++ b/chromium/v8/src/codegen/compilation-cache.cc @@ -28,7 +28,7 @@ CompilationCache::CompilationCache(Isolate* isolate) eval_global_(isolate), eval_contextual_(isolate), reg_exp_(isolate, kRegExpGenerations), - enabled_(true) { + enabled_script_and_eval_(true) { CompilationSubCache* subcaches[kSubCacheCount] = { &script_, &eval_global_, &eval_contextual_, ®_exp_}; for (int i = 0; i < kSubCacheCount; ++i) { @@ -254,7 +254,7 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags, } void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; eval_global_.Remove(function_info); eval_contextual_.Remove(function_info); @@ -265,7 +265,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript( Handle<String> source, MaybeHandle<Object> name, int line_offset, int column_offset, ScriptOriginOptions resource_options, Handle<Context> native_context, LanguageMode language_mode) { - if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>(); + if (!IsEnabledScriptAndEval()) return MaybeHandle<SharedFunctionInfo>(); return script_.Lookup(source, name, line_offset, column_offset, resource_options, native_context, language_mode); @@ -277,7 +277,7 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source, LanguageMode language_mode, int position) { InfoCellPair result; - if (!IsEnabled()) return result; + if (!IsEnabledScriptAndEval()) return result; const char* cache_type; @@ -303,8 +303,6 @@ InfoCellPair CompilationCache::LookupEval(Handle<String> source, MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source, JSRegExp::Flags flags) { - if (!IsEnabled()) return MaybeHandle<FixedArray>(); - return reg_exp_.Lookup(source, flags); } @@ -312,7 +310,7 @@ void CompilationCache::PutScript(Handle<String> source, Handle<Context> native_context, LanguageMode language_mode, Handle<SharedFunctionInfo> function_info) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; LOG(isolate(), CompilationCacheEvent("put", "script", *function_info)); script_.Put(source, native_context, language_mode, function_info); @@ -324,7 +322,7 @@ void CompilationCache::PutEval(Handle<String> source, Handle<SharedFunctionInfo> function_info, Handle<FeedbackCell> feedback_cell, int position) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; const char* cache_type; HandleScope scope(isolate()); @@ -344,8 +342,6 @@ void CompilationCache::PutEval(Handle<String> source, void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags, Handle<FixedArray> data) { - if (!IsEnabled()) return; - reg_exp_.Put(source, flags, data); } @@ -367,10 +363,12 @@ void CompilationCache::MarkCompactPrologue() { } } -void CompilationCache::Enable() { enabled_ = true; } +void CompilationCache::EnableScriptAndEval() { + enabled_script_and_eval_ = true; +} -void CompilationCache::Disable() { - enabled_ = false; +void CompilationCache::DisableScriptAndEval() { + enabled_script_and_eval_ = false; Clear(); } diff --git a/chromium/v8/src/codegen/compilation-cache.h b/chromium/v8/src/codegen/compilation-cache.h index 35595b19858..04bea44a82b 100644 --- a/chromium/v8/src/codegen/compilation-cache.h +++ b/chromium/v8/src/codegen/compilation-cache.h @@ -202,9 +202,14 @@ class V8_EXPORT_PRIVATE CompilationCache { void MarkCompactPrologue(); // Enable/disable compilation cache. Used by debugger to disable compilation - // cache during debugging to make sure new scripts are always compiled. - void Enable(); - void Disable(); + // cache during debugging so that eval and new scripts are always compiled. + // TODO(bmeurer, chromium:992277): The RegExp cache cannot be enabled and/or + // disabled, since it doesn't affect debugging. However ideally the other + // caches should also be always on, even in the presence of the debugger, + // but at this point there are too many unclear invariants, and so I decided + // to just fix the pressing performance problem for RegExp individually first. + void EnableScriptAndEval(); + void DisableScriptAndEval(); private: explicit CompilationCache(Isolate* isolate); @@ -215,7 +220,9 @@ class V8_EXPORT_PRIVATE CompilationCache { // The number of sub caches covering the different types to cache. static const int kSubCacheCount = 4; - bool IsEnabled() const { return FLAG_compilation_cache && enabled_; } + bool IsEnabledScriptAndEval() const { + return FLAG_compilation_cache && enabled_script_and_eval_; + } Isolate* isolate() const { return isolate_; } @@ -227,8 +234,8 @@ class V8_EXPORT_PRIVATE CompilationCache { CompilationCacheRegExp reg_exp_; CompilationSubCache* subcaches_[kSubCacheCount]; - // Current enable state of the compilation cache. - bool enabled_; + // Current enable state of the compilation cache for scripts and eval. + bool enabled_script_and_eval_; friend class Isolate; diff --git a/chromium/v8/src/codegen/compiler.cc b/chromium/v8/src/codegen/compiler.cc index fbd181f5c8e..d73be13a30a 100644 --- a/chromium/v8/src/codegen/compiler.cc +++ b/chromium/v8/src/codegen/compiler.cc @@ -666,21 +666,25 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache( function->GetIsolate(), RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap); Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate()); + Isolate* isolate = function->GetIsolate(); DisallowHeapAllocation no_gc; - if (osr_offset.IsNone()) { - if (function->has_feedback_vector()) { - FeedbackVector feedback_vector = function->feedback_vector(); - feedback_vector.EvictOptimizedCodeMarkedForDeoptimization( - function->shared(), "GetCodeFromOptimizedCodeCache"); - Code code = feedback_vector.optimized_code(); - - if (!code.is_null()) { - // Caching of optimized code enabled and optimized code found. - DCHECK(!code.marked_for_deoptimization()); - DCHECK(function->shared().is_compiled()); - return Handle<Code>(code, feedback_vector.GetIsolate()); - } - } + Code code; + if (osr_offset.IsNone() && function->has_feedback_vector()) { + FeedbackVector feedback_vector = function->feedback_vector(); + feedback_vector.EvictOptimizedCodeMarkedForDeoptimization( + function->shared(), "GetCodeFromOptimizedCodeCache"); + code = feedback_vector.optimized_code(); + } else if (!osr_offset.IsNone()) { + code = function->context() + .native_context() + .GetOSROptimizedCodeCache() + .GetOptimizedCode(shared, osr_offset, isolate); + } + if (!code.is_null()) { + // Caching of optimized code enabled and optimized code found. + DCHECK(!code.marked_for_deoptimization()); + DCHECK(function->shared().is_compiled()); + return Handle<Code>(code, isolate); } return MaybeHandle<Code>(); } @@ -711,12 +715,15 @@ void InsertCodeIntoOptimizedCodeCache( // Cache optimized context-specific code. Handle<JSFunction> function = compilation_info->closure(); Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate()); - Handle<Context> native_context(function->context().native_context(), - function->GetIsolate()); + Handle<NativeContext> native_context(function->context().native_context(), + function->GetIsolate()); if (compilation_info->osr_offset().IsNone()) { Handle<FeedbackVector> vector = handle(function->feedback_vector(), function->GetIsolate()); FeedbackVector::SetOptimizedCode(vector, code); + } else { + OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code, + compilation_info->osr_offset()); } } @@ -1904,6 +1911,12 @@ struct ScriptCompileTimerScope { case CacheBehaviour::kConsumeCodeCache: return isolate_->counters()->compile_script_with_consume_cache(); + // Note that this only counts the finalization part of streaming, the + // actual streaming compile is counted by BackgroundCompileTask into + // "compile_script_on_background". + case CacheBehaviour::kNoCacheBecauseStreamingSource: + return isolate_->counters()->compile_script_streaming_finalization(); + case CacheBehaviour::kNoCacheBecauseInlineScript: return isolate_->counters() ->compile_script_no_cache_because_inline_script(); @@ -1923,9 +1936,6 @@ struct ScriptCompileTimerScope { // TODO(leszeks): Consider counting separately once modules are more // common. case CacheBehaviour::kNoCacheBecauseModule: - // TODO(leszeks): Count separately or remove entirely once we have - // background compilation. - case CacheBehaviour::kNoCacheBecauseStreamingSource: case CacheBehaviour::kNoCacheBecauseV8Extension: case CacheBehaviour::kNoCacheBecauseExtensionModule: case CacheBehaviour::kNoCacheBecausePacScript: diff --git a/chromium/v8/src/codegen/constant-pool.cc b/chromium/v8/src/codegen/constant-pool.cc index 6816c5b7ad5..42b2fa6e9a0 100644 --- a/chromium/v8/src/codegen/constant-pool.cc +++ b/chromium/v8/src/codegen/constant-pool.cc @@ -49,22 +49,22 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess( } ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( - ConstantPoolEntry& entry, ConstantPoolEntry::Type type) { + ConstantPoolEntry* entry, ConstantPoolEntry::Type type) { DCHECK(!emitted_label_.is_bound()); PerTypeEntryInfo& info = info_[type]; const int entry_size = ConstantPoolEntry::size(type); bool merged = false; - if (entry.sharing_ok()) { + if (entry->sharing_ok()) { // Try to merge entries std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin(); int end = static_cast<int>(info.shared_entries.size()); for (int i = 0; i < end; i++, it++) { if ((entry_size == kSystemPointerSize) - ? entry.value() == it->value() - : entry.value64() == it->value64()) { + ? entry->value() == it->value() + : entry->value64() == it->value64()) { // Merge with found entry. - entry.set_merged_index(i); + entry->set_merged_index(i); merged = true; break; } @@ -72,16 +72,16 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( } // By definition, merged entries have regular access. - DCHECK(!merged || entry.merged_index() < info.regular_count); + DCHECK(!merged || entry->merged_index() < info.regular_count); ConstantPoolEntry::Access access = (merged ? ConstantPoolEntry::REGULAR : NextAccess(type)); // Enforce an upper bound on search time by limiting the search to // unique sharable entries which fit in the regular section. - if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { - info.shared_entries.push_back(entry); + if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { + info.shared_entries.push_back(*entry); } else { - info.entries.push_back(entry); + info.entries.push_back(*entry); } // We're done if we found a match or have already triggered the diff --git a/chromium/v8/src/codegen/constant-pool.h b/chromium/v8/src/codegen/constant-pool.h index d07452336b4..d2ab5641aea 100644 --- a/chromium/v8/src/codegen/constant-pool.h +++ b/chromium/v8/src/codegen/constant-pool.h @@ -102,13 +102,13 @@ class ConstantPoolBuilder { ConstantPoolEntry::Access AddEntry(int position, intptr_t value, bool sharing_ok) { ConstantPoolEntry entry(position, value, sharing_ok); - return AddEntry(entry, ConstantPoolEntry::INTPTR); + return AddEntry(&entry, ConstantPoolEntry::INTPTR); } // Add double constant to the embedded constant pool ConstantPoolEntry::Access AddEntry(int position, Double value) { ConstantPoolEntry entry(position, value); - return AddEntry(entry, ConstantPoolEntry::DOUBLE); + return AddEntry(&entry, ConstantPoolEntry::DOUBLE); } // Add double constant to the embedded constant pool @@ -138,9 +138,8 @@ class ConstantPoolBuilder { inline Label* EmittedPosition() { return &emitted_label_; } private: - ConstantPoolEntry::Access AddEntry( - ConstantPoolEntry& entry, // NOLINT(runtime/references) - ConstantPoolEntry::Type type); + ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry, + ConstantPoolEntry::Type type); void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, ConstantPoolEntry::Type type); diff --git a/chromium/v8/src/codegen/cpu-features.h b/chromium/v8/src/codegen/cpu-features.h index dae9992c57f..6b3d3934d0c 100644 --- a/chromium/v8/src/codegen/cpu-features.h +++ b/chromium/v8/src/codegen/cpu-features.h @@ -13,7 +13,7 @@ namespace internal { // CPU feature flags. enum CpuFeature { - // x86 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 SSE4_2, SSE4_1, SSSE3, @@ -26,39 +26,46 @@ enum CpuFeature { LZCNT, POPCNT, ATOM, - // ARM + +#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 // - Standard configurations. The baseline is ARMv6+VFPv2. ARMv7, // ARMv7-A + VFPv3-D32 + NEON ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV ARMv8, // ARMv8-A (+ all of the above) - // MIPS, MIPS64 + + // ARM feature aliases (based on the standard configurations above). + VFPv3 = ARMv7, + NEON = ARMv7, + VFP32DREGS = ARMv7, + SUDIV = ARMv7_SUDIV, + +#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 FPU, FP64FPU, MIPSr1, MIPSr2, MIPSr6, MIPS_SIMD, // MSA instructions - // PPC + +#elif V8_TARGET_ARCH_PPC + FPU, FPR_GPR_MOV, LWSYNC, ISELECT, VSX, MODULO, - // S390 + +#elif V8_TARGET_ARCH_S390X + FPU, DISTINCT_OPS, GENERAL_INSTR_EXT, FLOATING_POINT_EXT, VECTOR_FACILITY, VECTOR_ENHANCE_FACILITY_1, MISC_INSTR_EXT2, +#endif - NUMBER_OF_CPU_FEATURES, - - // ARM feature aliases (based on the standard configurations above). - VFPv3 = ARMv7, - NEON = ARMv7, - VFP32DREGS = ARMv7, - SUDIV = ARMv7_SUDIV + NUMBER_OF_CPU_FEATURES }; // CpuFeatures keeps track of which features are supported by the target CPU. diff --git a/chromium/v8/src/codegen/external-reference.cc b/chromium/v8/src/codegen/external-reference.cc index 44503e532d1..e1f873cb38d 100644 --- a/chromium/v8/src/codegen/external-reference.cc +++ b/chromium/v8/src/codegen/external-reference.cc @@ -217,10 +217,8 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> { FUNCTION_REFERENCE(incremental_marking_record_write_function, IncrementalMarking::RecordWriteFromCode) -ExternalReference ExternalReference::store_buffer_overflow_function() { - return ExternalReference( - Redirect(Heap::store_buffer_overflow_function_address())); -} +FUNCTION_REFERENCE(insert_remembered_set_function, + Heap::InsertIntoRememberedSetFromCode) FUNCTION_REFERENCE(delete_handle_scope_extensions, HandleScope::DeleteExtensions) @@ -342,10 +340,6 @@ ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) { return ExternalReference(address); } -ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { - return ExternalReference(isolate->heap()->store_buffer_top_address()); -} - ExternalReference ExternalReference::heap_is_marking_flag_address( Isolate* isolate) { return ExternalReference(isolate->heap()->IsMarkingFlagAddress()); @@ -529,19 +523,19 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address( FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2, BUILTIN_FP_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh, @@ -549,7 +543,7 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh, FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p, diff --git a/chromium/v8/src/codegen/external-reference.h b/chromium/v8/src/codegen/external-reference.h index 45c26bdfb09..7cc0241fc4a 100644 --- a/chromium/v8/src/codegen/external-reference.h +++ b/chromium/v8/src/codegen/external-reference.h @@ -38,7 +38,6 @@ class StatsCounter; V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \ V(address_of_jslimit, "StackGuard::address_of_jslimit()") \ V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \ - V(store_buffer_top, "store_buffer_top") \ V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \ V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \ V(new_space_allocation_limit_address, \ @@ -143,6 +142,7 @@ class StatsCounter; V(ieee754_tanh_function, "base::ieee754::tanh") \ V(incremental_marking_record_write_function, \ "IncrementalMarking::RecordWrite") \ + V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \ V(invalidate_prototype_chains_function, \ "JSObject::InvalidatePrototypeChains()") \ V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \ @@ -170,7 +170,6 @@ class StatsCounter; V(search_string_raw_two_one, "search_string_raw_two_one") \ V(search_string_raw_two_two, "search_string_raw_two_two") \ V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \ - V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \ V(try_internalize_string_function, "try_internalize_string_function") \ V(wasm_call_trap_callback_for_testing, \ "wasm::call_trap_callback_for_testing") \ diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h b/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h index e274b41fa33..174a4838683 100644 --- a/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h +++ b/chromium/v8/src/codegen/ia32/assembler-ia32-inl.h @@ -39,6 +39,7 @@ #include "src/codegen/ia32/assembler-ia32.h" +#include "src/base/memory.h" #include "src/codegen/assembler.h" #include "src/debug/debug.h" #include "src/objects/objects-inl.h" @@ -58,12 +59,12 @@ void RelocInfo::apply(intptr_t delta) { RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY))); if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) || IsOffHeapTarget(rmode_)) { - int32_t* p = reinterpret_cast<int32_t*>(pc_); - *p -= delta; // Relocate entry. + base::WriteUnalignedValue(pc_, + base::ReadUnalignedValue<int32_t>(pc_) - delta); } else if (IsInternalReference(rmode_)) { - // absolute code pointer inside code object moves with the code object. - int32_t* p = reinterpret_cast<int32_t*>(pc_); - *p += delta; // Relocate entry. + // Absolute code pointer inside code object moves with the code object. + base::WriteUnalignedValue(pc_, + base::ReadUnalignedValue<int32_t>(pc_) + delta); } } @@ -103,7 +104,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc_, sizeof(Address)); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.cc b/chromium/v8/src/codegen/ia32/assembler-ia32.cc index aefcab7299c..405e4b7c553 100644 --- a/chromium/v8/src/codegen/ia32/assembler-ia32.cc +++ b/chromium/v8/src/codegen/ia32/assembler-ia32.cc @@ -272,8 +272,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle<HeapObject> object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); @@ -2163,70 +2163,6 @@ void Assembler::divsd(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::xorpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x57); - emit_sse_operand(dst, src); -} - -void Assembler::andps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x54); - emit_sse_operand(dst, src); -} - -void Assembler::andnps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x55); - emit_sse_operand(dst, src); -} - -void Assembler::orps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x56); - emit_sse_operand(dst, src); -} - -void Assembler::xorps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x57); - emit_sse_operand(dst, src); -} - -void Assembler::addps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x58); - emit_sse_operand(dst, src); -} - -void Assembler::subps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x5C); - emit_sse_operand(dst, src); -} - -void Assembler::mulps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x59); - emit_sse_operand(dst, src); -} - -void Assembler::divps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x5E); - emit_sse_operand(dst, src); -} - void Assembler::rcpps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); @@ -2234,29 +2170,31 @@ void Assembler::rcpps(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::rsqrtps(XMMRegister dst, Operand src) { +void Assembler::sqrtps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x52); + EMIT(0x51); emit_sse_operand(dst, src); } -void Assembler::minps(XMMRegister dst, Operand src) { +void Assembler::rsqrtps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x5D); + EMIT(0x52); emit_sse_operand(dst, src); } -void Assembler::maxps(XMMRegister dst, Operand src) { +void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x5F); + EMIT(0xC2); emit_sse_operand(dst, src); + EMIT(cmp); } -void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) { +void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) { EnsureSpace ensure_space(this); + EMIT(0x66); EMIT(0x0F); EMIT(0xC2); emit_sse_operand(dst, src); @@ -2280,22 +2218,6 @@ void Assembler::haddps(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::andpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x54); - emit_sse_operand(dst, src); -} - -void Assembler::orpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x56); - emit_sse_operand(dst, src); -} - void Assembler::ucomisd(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x66); @@ -2398,6 +2320,16 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) { EMIT(imm8); } +void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) { + DCHECK(is_uint8(imm8)); + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x0F); + EMIT(0xC6); + emit_sse_operand(dst, src); + EMIT(imm8); +} + void Assembler::movdqa(Operand dst, XMMRegister src) { EnsureSpace ensure_space(this); EMIT(0x66); @@ -2776,6 +2708,23 @@ void Assembler::minss(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } +// Packed single-precision floating-point SSE instructions. +void Assembler::ps(byte opcode, XMMRegister dst, Operand src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(opcode); + emit_sse_operand(dst, src); +} + +// Packed double-precision floating-point SSE instructions. +void Assembler::pd(byte opcode, XMMRegister dst, Operand src) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x0F); + EMIT(opcode); + emit_sse_operand(dst, src); +} + // AVX instructions void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) { @@ -2811,12 +2760,25 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) { vinstr(op, dst, src1, src2, k66, k0F, kWIG); } +void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, + byte imm8) { + DCHECK(is_uint8(imm8)); + vpd(0xC6, dst, src1, src2); + EMIT(imm8); +} + void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp) { vps(0xC2, dst, src1, src2); EMIT(cmp); } +void Assembler::vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, + uint8_t cmp) { + vpd(0xC2, dst, src1, src2); + EMIT(cmp); +} + void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8) { DCHECK(is_uint8(imm8)); @@ -2848,6 +2810,12 @@ void Assembler::vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) { EMIT(imm8); } +void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8) { + XMMRegister iop = XMMRegister::from_code(2); + vinstr(0x73, iop, dst, Operand(src), k66, k0F, kWIG); + EMIT(imm8); +} + void Assembler::vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) { XMMRegister iop = XMMRegister::from_code(4); vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); @@ -3158,11 +3126,10 @@ void Assembler::emit_operand(int code, Operand adr) { DCHECK_GT(length, 0); // Emit updated ModRM byte containing the given register. - pc_[0] = (adr.buf_[0] & ~0x38) | (code << 3); + EMIT((adr.buf_[0] & ~0x38) | (code << 3)); // Emit the rest of the encoded operand. - for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i]; - pc_ += length; + for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]); // Emit relocation information if necessary. if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) { diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.h b/chromium/v8/src/codegen/ia32/assembler-ia32.h index 52256212763..8161ff83223 100644 --- a/chromium/v8/src/codegen/ia32/assembler-ia32.h +++ b/chromium/v8/src/codegen/ia32/assembler-ia32.h @@ -38,6 +38,7 @@ #define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_ #include <deque> +#include <memory> #include "src/codegen/assembler.h" #include "src/codegen/ia32/constants-ia32.h" @@ -292,7 +293,7 @@ class V8_EXPORT_PRIVATE Operand { // Only valid if len_ > 4. RelocInfo::Mode rmode_ = RelocInfo::NONE; - // TODO(clemensh): Get rid of this friendship, or make Operand immutable. + // TODO(clemensb): Get rid of this friendship, or make Operand immutable. friend class Assembler; }; ASSERT_TRIVIALLY_COPYABLE(Operand); @@ -371,7 +372,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // own buffer. Otherwise it takes ownership of the provided buffer. explicit Assembler(const AssemblerOptions&, std::unique_ptr<AssemblerBuffer> = {}); - virtual ~Assembler() {} // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; @@ -512,6 +512,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movzx_w(Register dst, Operand src); void movq(XMMRegister dst, Operand src); + // Conditional moves void cmov(Condition cc, Register dst, Register src) { cmov(cc, dst, Operand(src)); @@ -849,56 +850,54 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movups(XMMRegister dst, Operand src); void movups(Operand dst, XMMRegister src); void shufps(XMMRegister dst, XMMRegister src, byte imm8); + void shufpd(XMMRegister dst, XMMRegister src, byte imm8); void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); } void maxss(XMMRegister dst, Operand src); void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); } void minss(XMMRegister dst, Operand src); - void andps(XMMRegister dst, Operand src); - void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); } - void andnps(XMMRegister dst, Operand src); - void andnps(XMMRegister dst, XMMRegister src) { andnps(dst, Operand(src)); } - void xorps(XMMRegister dst, Operand src); - void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); } - void orps(XMMRegister dst, Operand src); - void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); } - - void addps(XMMRegister dst, Operand src); - void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); } - void subps(XMMRegister dst, Operand src); - void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); } - void mulps(XMMRegister dst, Operand src); - void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); } - void divps(XMMRegister dst, Operand src); - void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); } void rcpps(XMMRegister dst, Operand src); void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); } + void sqrtps(XMMRegister dst, Operand src); + void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); } void rsqrtps(XMMRegister dst, Operand src); void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); } void haddps(XMMRegister dst, Operand src); void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); } - - void minps(XMMRegister dst, Operand src); - void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); } - void maxps(XMMRegister dst, Operand src); - void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); } + void sqrtpd(XMMRegister dst, Operand src) { + sse2_instr(dst, src, 0x66, 0x0F, 0x51); + } + void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); } void cmpps(XMMRegister dst, Operand src, uint8_t cmp); void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp) { cmpps(dst, Operand(src), cmp); } -#define SSE_CMP_P(instr, imm8) \ - void instr##ps(XMMRegister dst, XMMRegister src) { \ - cmpps(dst, Operand(src), imm8); \ - } \ - void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } + void cmppd(XMMRegister dst, Operand src, uint8_t cmp); + void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp) { + cmppd(dst, Operand(src), cmp); + } + +// Packed floating-point comparison operations. +#define PACKED_CMP_LIST(V) \ + V(cmpeq, 0x0) \ + V(cmplt, 0x1) \ + V(cmple, 0x2) \ + V(cmpunord, 0x3) \ + V(cmpneq, 0x4) - SSE_CMP_P(cmpeq, 0x0) - SSE_CMP_P(cmplt, 0x1) - SSE_CMP_P(cmple, 0x2) - SSE_CMP_P(cmpneq, 0x4) +#define SSE_CMP_P(instr, imm8) \ + void instr##ps(XMMRegister dst, XMMRegister src) { \ + cmpps(dst, Operand(src), imm8); \ + } \ + void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \ + void instr##pd(XMMRegister dst, XMMRegister src) { \ + cmppd(dst, Operand(src), imm8); \ + } \ + void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); } + PACKED_CMP_LIST(SSE_CMP_P) #undef SSE_CMP_P // SSE2 instructions @@ -941,22 +940,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void mulsd(XMMRegister dst, Operand src); void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); } void divsd(XMMRegister dst, Operand src); - void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); } - void xorpd(XMMRegister dst, Operand src); void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); } void sqrtsd(XMMRegister dst, Operand src); - void andpd(XMMRegister dst, XMMRegister src) { andpd(dst, Operand(src)); } - void andpd(XMMRegister dst, Operand src); - void orpd(XMMRegister dst, XMMRegister src) { orpd(dst, Operand(src)); } - void orpd(XMMRegister dst, Operand src); - void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); } void ucomisd(XMMRegister dst, Operand src); void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode); void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); + void movapd(XMMRegister dst, XMMRegister src) { movapd(dst, Operand(src)); } + void movapd(XMMRegister dst, Operand src) { + sse2_instr(dst, src, 0x66, 0x0F, 0x28); + } + void movmskpd(Register dst, XMMRegister src); void movmskps(Register dst, XMMRegister src); @@ -1298,6 +1295,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vrcpps(XMMRegister dst, Operand src) { vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG); } + void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); } + void vsqrtps(XMMRegister dst, Operand src) { + vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG); + } void vrsqrtps(XMMRegister dst, XMMRegister src) { vrsqrtps(dst, Operand(src)); } @@ -1310,14 +1311,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) { vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG); } + void vsqrtpd(XMMRegister dst, XMMRegister src) { vsqrtpd(dst, Operand(src)); } + void vsqrtpd(XMMRegister dst, Operand src) { + vinstr(0x51, dst, xmm0, src, k66, k0F, kWIG); + } void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); } void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); } + void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); } + void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); } void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); } void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); } void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) { vshufps(dst, src1, Operand(src2), imm8); } void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8); + void vshufpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) { + vshufpd(dst, src1, Operand(src2), imm8); + } + void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8); void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8); @@ -1325,6 +1336,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8); + void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) { vpshufhw(dst, Operand(src), shuffle); @@ -1489,6 +1501,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } void rorx(Register dst, Operand src, byte imm8); + // Implementation of packed single-precision floating-point SSE instructions. + void ps(byte op, XMMRegister dst, Operand src); + // Implementation of packed double-precision floating-point SSE instructions. + void pd(byte op, XMMRegister dst, Operand src); + #define PACKED_OP_LIST(V) \ V(and, 0x54) \ V(andn, 0x55) \ @@ -1501,6 +1518,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { V(div, 0x5e) \ V(max, 0x5f) +#define SSE_PACKED_OP_DECLARE(name, opcode) \ + void name##ps(XMMRegister dst, XMMRegister src) { \ + ps(opcode, dst, Operand(src)); \ + } \ + void name##ps(XMMRegister dst, Operand src) { ps(opcode, dst, src); } \ + void name##pd(XMMRegister dst, XMMRegister src) { \ + pd(opcode, dst, Operand(src)); \ + } \ + void name##pd(XMMRegister dst, Operand src) { pd(opcode, dst, src); } + + PACKED_OP_LIST(SSE_PACKED_OP_DECLARE) +#undef SSE_PACKED_OP_DECLARE + #define AVX_PACKED_OP_DECLARE(name, opcode) \ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ vps(opcode, dst, src1, Operand(src2)); \ @@ -1516,24 +1546,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } PACKED_OP_LIST(AVX_PACKED_OP_DECLARE) +#undef AVX_PACKED_OP_DECLARE +#undef PACKED_OP_LIST + void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2); void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2); void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp); -#define AVX_CMP_P(instr, imm8) \ - void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ - vcmpps(dst, src1, Operand(src2), imm8); \ - } \ - void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \ - vcmpps(dst, src1, src2, imm8); \ - } - - AVX_CMP_P(vcmpeq, 0x0) - AVX_CMP_P(vcmplt, 0x1) - AVX_CMP_P(vcmple, 0x2) - AVX_CMP_P(vcmpneq, 0x4) - + void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp); + +#define AVX_CMP_P(instr, imm8) \ + void v##instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ + vcmpps(dst, src1, Operand(src2), imm8); \ + } \ + void v##instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \ + vcmpps(dst, src1, src2, imm8); \ + } \ + void v##instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ + vcmppd(dst, src1, Operand(src2), imm8); \ + } \ + void v##instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \ + vcmppd(dst, src1, src2, imm8); \ + } + + PACKED_CMP_LIST(AVX_CMP_P) #undef AVX_CMP_P +#undef PACKED_CMP_LIST // Other SSE and AVX instructions #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \ diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc index 070f3159776..dd11bc496ed 100644 --- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -1168,57 +1168,44 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - push(eax); - cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0)); - pop(eax); - j(equal, &skip_hook); - - { - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg()); - Push(actual.reg()); - SmiUntag(actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Operand receiver_op = - actual.is_reg() - ? Operand(ebp, actual.reg(), times_system_pointer_size, - kSystemPointerSize * 2) - : Operand(ebp, actual.immediate() * times_system_pointer_size + - kSystemPointerSize * 2); - Push(receiver_op); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + SmiUntag(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Operand receiver_op = + actual.is_reg() + ? Operand(ebp, actual.reg(), times_system_pointer_size, + kSystemPointerSize * 2) + : Operand(ebp, actual.immediate() * times_system_pointer_size + + kSystemPointerSize * 2); + Push(receiver_op); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -1233,7 +1220,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + push(eax); + cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0)); + pop(eax); + j(not_equal, &debug_hook, Label::kNear); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -1256,8 +1252,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(ecx); } - bind(&done); } + jmp(&done, Label::kNear); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + jmp(&continue_after_hook, Label::kNear); + + bind(&done); } void MacroAssembler::InvokeFunction(Register fun, Register new_target, @@ -1479,6 +1482,15 @@ void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) { } } +void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrlq(dst, dst, shift); + } else { + psrlq(dst, shift); + } +} + void TurboAssembler::Psignb(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h index c65871cfad3..9e7774c55d5 100644 --- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle); void Psraw(XMMRegister dst, uint8_t shift); void Psrlw(XMMRegister dst, uint8_t shift); + void Psrlq(XMMRegister dst, uint8_t shift); // SSE/SSE2 instructions with AVX version. #define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \ @@ -258,6 +259,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister) AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister) AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand) + AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&) + AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister) + AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&) #undef AVX_OP2_WITH_TYPE @@ -278,6 +282,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP3_XO(Packsswb, packsswb) AVX_OP3_XO(Packuswb, packuswb) + AVX_OP3_XO(Paddusb, paddusb) AVX_OP3_XO(Pcmpeqb, pcmpeqb) AVX_OP3_XO(Pcmpeqw, pcmpeqw) AVX_OP3_XO(Pcmpeqd, pcmpeqd) @@ -294,10 +299,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP3_XO(Xorpd, xorpd) AVX_OP3_XO(Sqrtss, sqrtss) AVX_OP3_XO(Sqrtsd, sqrtsd) + AVX_OP3_XO(Orpd, orpd) + AVX_OP3_XO(Andnpd, andnpd) #undef AVX_OP3_XO #undef AVX_OP3_WITH_TYPE +// Only use this macro when dst and src1 is the same in SSE case. +#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \ + void macro_name(dst_type dst, dst_type src1, src_type src2) { \ + if (CpuFeatures::IsSupported(AVX)) { \ + CpuFeatureScope scope(this, AVX); \ + v##name(dst, src1, src2); \ + } else { \ + DCHECK_EQ(dst, src1); \ + name(dst, src2); \ + } \ + } +#define AVX_PACKED_OP3(macro_name, name) \ + AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \ + AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand) + + AVX_PACKED_OP3(Addpd, addpd) + AVX_PACKED_OP3(Subpd, subpd) + AVX_PACKED_OP3(Mulpd, mulpd) + AVX_PACKED_OP3(Divpd, divpd) + AVX_PACKED_OP3(Cmpeqpd, cmpeqpd) + AVX_PACKED_OP3(Cmpneqpd, cmpneqpd) + AVX_PACKED_OP3(Cmpltpd, cmpltpd) + AVX_PACKED_OP3(Cmplepd, cmplepd) + AVX_PACKED_OP3(Minpd, minpd) + AVX_PACKED_OP3(Maxpd, maxpd) + AVX_PACKED_OP3(Cmpunordpd, cmpunordpd) +#undef AVX_PACKED_OP3 +#undef AVX_PACKED_OP3_WITH_TYPE + // Non-SSE2 instructions. #define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \ sse_scope) \ @@ -529,11 +565,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. + // On function call, call into the debugger. // This may clobber ecx. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. diff --git a/chromium/v8/src/codegen/interface-descriptors.cc b/chromium/v8/src/codegen/interface-descriptors.cc index f537ebc8994..1525f814cd9 100644 --- a/chromium/v8/src/codegen/interface-descriptors.cc +++ b/chromium/v8/src/codegen/interface-descriptors.cc @@ -278,6 +278,11 @@ void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(0, nullptr); } +void GetIteratorStackParameterDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + data->InitializePlatformSpecific(0, nullptr); +} + void LoadWithVectorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(), diff --git a/chromium/v8/src/codegen/interface-descriptors.h b/chromium/v8/src/codegen/interface-descriptors.h index 544d62fd9f0..e305d666a3e 100644 --- a/chromium/v8/src/codegen/interface-descriptors.h +++ b/chromium/v8/src/codegen/interface-descriptors.h @@ -9,12 +9,17 @@ #include "src/codegen/machine-type.h" #include "src/codegen/register-arch.h" +#include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/execution/isolate.h" namespace v8 { namespace internal { +#define TORQUE_BUILTIN_LIST_TFC(V) \ + BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \ + IGNORE_BUILTIN, IGNORE_BUILTIN) + #define INTERFACE_DESCRIPTOR_LIST(V) \ V(Abort) \ V(Allocate) \ @@ -52,6 +57,7 @@ namespace internal { V(FastNewFunctionContext) \ V(FastNewObject) \ V(FrameDropperTrampoline) \ + V(GetIteratorStackParameter) \ V(GetProperty) \ V(GrowArrayElements) \ V(InterpreterCEntry1) \ @@ -89,7 +95,8 @@ namespace internal { V(WasmTableGet) \ V(WasmTableSet) \ V(WasmThrow) \ - BUILTIN_LIST_TFS(V) + BUILTIN_LIST_TFS(V) \ + TORQUE_BUILTIN_LIST_TFC(V) class V8_EXPORT_PRIVATE CallInterfaceDescriptorData { public: @@ -486,6 +493,46 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor { DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor) }; +// This class is subclassed by Torque-generated call interface descriptors. +template <int parameter_count> +class TorqueInterfaceDescriptor : public CallInterfaceDescriptor { + public: + static constexpr int kDescriptorFlags = CallInterfaceDescriptorData::kNoFlags; + static constexpr int kParameterCount = parameter_count; + enum ParameterIndices { kContext = kParameterCount }; + template <int i> + static ParameterIndices ParameterIndex() { + STATIC_ASSERT(0 <= i && i < kParameterCount); + return static_cast<ParameterIndices>(i); + } + static constexpr int kReturnCount = 1; + + using CallInterfaceDescriptor::CallInterfaceDescriptor; + + protected: + static const int kRegisterParams = + kParameterCount > kMaxTFSBuiltinRegisterParams + ? kMaxTFSBuiltinRegisterParams + : kParameterCount; + static const int kStackParams = kParameterCount - kRegisterParams; + virtual MachineType ReturnType() = 0; + virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0; + void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override { + DefaultInitializePlatformSpecific(data, kRegisterParams); + } + void InitializePlatformIndependent( + CallInterfaceDescriptorData* data) override { + std::vector<MachineType> machine_types = {ReturnType()}; + auto parameter_types = ParameterTypes(); + machine_types.insert(machine_types.end(), parameter_types.begin(), + parameter_types.end()); + DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size()); + data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, + kParameterCount, machine_types.data(), + static_cast<int>(machine_types.size())); + } +}; + // Dummy descriptor used to mark builtins that don't yet have their proper // descriptor associated. using DummyDescriptor = VoidDescriptor; @@ -706,7 +753,7 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kScopeInfo, kSlots) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo - MachineType::Int32()) // kSlots + MachineType::Uint32()) // kSlots DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor) static const Register ScopeInfoRegister(); @@ -771,6 +818,16 @@ class AsyncFunctionStackParameterDescriptor final CallInterfaceDescriptor) }; +class GetIteratorStackParameterDescriptor final + : public CallInterfaceDescriptor { + public: + DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult) + DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(), + MachineType::AnyTagged(), MachineType::AnyTagged()) + DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor, + CallInterfaceDescriptor) +}; + class GetPropertyDescriptor final : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kObject, kKey) @@ -1298,6 +1355,11 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor { BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR) #undef DEFINE_TFS_BUILTIN_DESCRIPTOR +// This file contains interface descriptor class definitions for builtins +// defined in Torque. It is included here because the class definitions need to +// precede the definition of name##Descriptor::key() below. +#include "torque-generated/interface-descriptors-tq.inc" + #undef DECLARE_DEFAULT_DESCRIPTOR #undef DECLARE_DESCRIPTOR_WITH_BASE #undef DECLARE_DESCRIPTOR diff --git a/chromium/v8/src/codegen/machine-type.h b/chromium/v8/src/codegen/machine-type.h index 15e3df65c5a..a0bef4e07d6 100644 --- a/chromium/v8/src/codegen/machine-type.h +++ b/chromium/v8/src/codegen/machine-type.h @@ -9,6 +9,7 @@ #include "src/base/bits.h" #include "src/common/globals.h" +#include "src/flags/flags.h" namespace v8 { namespace internal { @@ -114,6 +115,10 @@ class MachineType { constexpr bool IsCompressedPointer() const { return representation() == MachineRepresentation::kCompressedPointer; } + constexpr static MachineRepresentation TaggedRepresentation() { + return (kTaggedSize == 4) ? MachineRepresentation::kWord32 + : MachineRepresentation::kWord64; + } constexpr static MachineRepresentation PointerRepresentation() { return (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 : MachineRepresentation::kWord64; @@ -239,71 +244,79 @@ class MachineType { // pointer flag is enabled. Otherwise, they returned the corresponding tagged // one. constexpr static MachineRepresentation RepCompressedTagged() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressed; -#else - return MachineRepresentation::kTagged; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressed; + } else { + return MachineRepresentation::kTagged; + } } constexpr static MachineRepresentation RepCompressedTaggedSigned() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressedSigned; -#else - return MachineRepresentation::kTaggedSigned; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressedSigned; + } else { + return MachineRepresentation::kTaggedSigned; + } } constexpr static MachineRepresentation RepCompressedTaggedPointer() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressedPointer; -#else - return MachineRepresentation::kTaggedPointer; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressedPointer; + } else { + return MachineRepresentation::kTaggedPointer; + } + } + + constexpr static MachineType TypeRawTagged() { + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::Int32(); + } else { + return MachineType::Pointer(); + } } constexpr static MachineType TypeCompressedTagged() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::AnyCompressed(); -#else - return MachineType::AnyTagged(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::AnyCompressed(); + } else { + return MachineType::AnyTagged(); + } } constexpr static MachineType TypeCompressedTaggedSigned() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::CompressedSigned(); -#else - return MachineType::TaggedSigned(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::CompressedSigned(); + } else { + return MachineType::TaggedSigned(); + } } constexpr static MachineType TypeCompressedTaggedPointer() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::CompressedPointer(); -#else - return MachineType::TaggedPointer(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::CompressedPointer(); + } else { + return MachineType::TaggedPointer(); + } } constexpr bool IsCompressedTagged() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressed(); -#else - return IsTagged(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressed(); + } else { + return IsTagged(); + } } constexpr bool IsCompressedTaggedSigned() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressedSigned(); -#else - return IsTaggedSigned(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressedSigned(); + } else { + return IsTaggedSigned(); + } } constexpr bool IsCompressedTaggedPointer() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressedPointer(); -#else - return IsTaggedPointer(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressedPointer(); + } else { + return IsTaggedPointer(); + } } static MachineType TypeForRepresentation(const MachineRepresentation& rep, @@ -405,11 +418,11 @@ inline bool IsAnyCompressed(MachineRepresentation rep) { } inline bool IsAnyCompressedTagged(MachineRepresentation rep) { -#ifdef V8_COMPRESS_POINTERS - return IsAnyCompressed(rep); -#else - return IsAnyTagged(rep); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsAnyCompressed(rep); + } else { + return IsAnyTagged(rep); + } } // Gets the log2 of the element size in bytes of the machine type. @@ -431,7 +444,6 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) { case MachineRepresentation::kTaggedSigned: case MachineRepresentation::kTaggedPointer: case MachineRepresentation::kTagged: - return kSystemPointerSizeLog2; case MachineRepresentation::kCompressedSigned: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressed: diff --git a/chromium/v8/src/codegen/mips/assembler-mips-inl.h b/chromium/v8/src/codegen/mips/assembler-mips-inl.h index d8181ad8f5b..53e6f93411b 100644 --- a/chromium/v8/src/codegen/mips/assembler-mips-inl.h +++ b/chromium/v8/src/codegen/mips/assembler-mips-inl.h @@ -133,7 +133,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc, if (Assembler::IsJicOrJialc(instr2)) { // Encoded internal references are lui/jic load of 32-bit absolute address. uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); @@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/mips/assembler-mips.cc b/chromium/v8/src/codegen/mips/assembler-mips.cc index 423da2fb65f..768b16b86c4 100644 --- a/chromium/v8/src/codegen/mips/assembler-mips.cc +++ b/chromium/v8/src/codegen/mips/assembler-mips.cc @@ -231,8 +231,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle<HeapObject> object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: const StringConstantBase* str = request.string(); @@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) { // before that addition, difference between upper part of the target address and // upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted // in jic register with lui instruction. -void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset, - int16_t& jic_offset) { - lui_offset = (address & kHiMask) >> kLuiShift; - jic_offset = address & kLoMask; +void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset) { + *lui_offset = (address & kHiMask) >> kLuiShift; + *jic_offset = address & kLoMask; - if (jic_offset < 0) { - lui_offset -= kImm16Mask; + if (*jic_offset < 0) { + *lui_offset -= kImm16Mask; } } void Assembler::UnpackTargetAddressUnsigned(uint32_t address, - uint32_t& lui_offset, - uint32_t& jic_offset) { + uint32_t* lui_offset, + uint32_t* jic_offset) { int16_t lui_offset16 = (address & kHiMask) >> kLuiShift; int16_t jic_offset16 = address & kLoMask; if (jic_offset16 < 0) { lui_offset16 -= kImm16Mask; } - lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask; - jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask; + *lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask; + *jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask; } void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui, @@ -977,7 +977,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos, if (IsJicOrJialc(instr2)) { uint32_t lui_offset_u, jic_offset_u; - UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u); instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u); } else { @@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) { // ------------Memory-instructions------------- -void Assembler::AdjustBaseAndOffset(MemOperand& src, +void Assembler::AdjustBaseAndOffset(MemOperand* src, OffsetAccessType access_type, int second_access_add_to_offset) { // This method is used to adjust the base register and offset pair @@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // pointer register). // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. - bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0; + bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned; DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. // is_int16 must be passed a signed value, hence the static cast below. - if (is_int16(src.offset()) && + if (is_int16(src->offset()) && (!two_accesses || is_int16(static_cast<int32_t>( - src.offset() + second_access_add_to_offset)))) { + src->offset() + second_access_add_to_offset)))) { // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified // value) fits into int16_t. return; } UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - DCHECK(src.rm() != scratch); // Must not overwrite the register 'base' - // while loading 'offset'. + DCHECK(src->rm() != scratch); // Must not overwrite the register 'base' + // while loading 'offset'. #ifdef DEBUG // Remember the "(mis)alignment" of 'offset', it will be checked at the end. - uint32_t misalignment = src.offset() & (kDoubleSize - 1); + uint32_t misalignment = src->offset() & (kDoubleSize - 1); #endif // Do not load the whole 32-bit 'offset' if it can be represented as @@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 0x7FF8; // Max int16_t that's a multiple of 8. constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) { - addiu(at, src.rm(), kMinOffsetForSimpleAdjustment); - src.offset_ -= kMinOffsetForSimpleAdjustment; - } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() && - src.offset() < 0) { - addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment); - src.offset_ += kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + addiu(at, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; } else if (IsMipsArchVariant(kMips32r6)) { // On r6 take advantage of the aui instruction, e.g.: // aui at, base, offset_high @@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // addiu at, at, 8 // lw reg_lo, (offset_low-8)(at) // lw reg_hi, (offset_low-4)(at) - int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16); - int16_t offset_low = static_cast<uint16_t>(src.offset()); + int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16); + int16_t offset_low = static_cast<uint16_t>(src->offset()); offset_high += (offset_low < 0) ? 1 : 0; // Account for offset sign extension in load/store. - aui(scratch, src.rm(), static_cast<uint16_t>(offset_high)); + aui(scratch, src->rm(), static_cast<uint16_t>(offset_high)); if (two_accesses && !is_int16(static_cast<int32_t>( offset_low + second_access_add_to_offset))) { // Avoid overflow in the 16-bit offset of the load/store instruction when @@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, addiu(scratch, scratch, kDoubleSize); offset_low -= kDoubleSize; } - src.offset_ = offset_low; + src->offset_ = offset_low; } else { // Do not load the whole 32-bit 'offset' if it can be represented as // a sum of three 16-bit signed offsets. This can save an instruction. @@ -2013,62 +2013,62 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 2 * kMinOffsetForSimpleAdjustment; constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) { - addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2); + if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { + addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); - src.offset_ -= kMinOffsetForMediumAdjustment; - } else if (-kMaxOffsetForMediumAdjustment <= src.offset() && - src.offset() < 0) { - addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2); + src->offset_ -= kMinOffsetForMediumAdjustment; + } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && + src->offset() < 0) { + addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); - src.offset_ += kMinOffsetForMediumAdjustment; + src->offset_ += kMinOffsetForMediumAdjustment; } else { // Now that all shorter options have been exhausted, load the full 32-bit // offset. - int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize); + int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. - addu(scratch, scratch, src.rm()); - src.offset_ -= loaded_offset; + addu(scratch, scratch, src->rm()); + src->offset_ -= loaded_offset; } } - src.rm_ = scratch; + src->rm_ = scratch; - DCHECK(is_int16(src.offset())); + DCHECK(is_int16(src->offset())); if (two_accesses) { DCHECK(is_int16( - static_cast<int32_t>(src.offset() + second_access_add_to_offset))); + static_cast<int32_t>(src->offset() + second_access_add_to_offset))); } - DCHECK(misalignment == (src.offset() & (kDoubleSize - 1))); + DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); } void Assembler::lb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LB, source.rm(), rd, source.offset()); } void Assembler::lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LBU, source.rm(), rd, source.offset()); } void Assembler::lh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LH, source.rm(), rd, source.offset()); } void Assembler::lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LHU, source.rm(), rd, source.offset()); } void Assembler::lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LW, source.rm(), rd, source.offset()); } @@ -2088,19 +2088,19 @@ void Assembler::lwr(Register rd, const MemOperand& rs) { void Assembler::sb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SB, source.rm(), rd, source.offset()); } void Assembler::sh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SH, source.rm(), rd, source.offset()); } void Assembler::sw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SW, source.rm(), rd, source.offset()); } @@ -2385,13 +2385,13 @@ void Assembler::seb(Register rd, Register rt) { // Load, store, move. void Assembler::lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset()); } void Assembler::swc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset()); } @@ -2969,7 +2969,7 @@ MSA_BRANCH_LIST(MSA_BRANCH) #define MSA_LD_ST(name, opcode) \ void Assembler::name(MSARegister wd, const MemOperand& rs) { \ MemOperand source = rs; \ - AdjustBaseAndOffset(source); \ + AdjustBaseAndOffset(&source); \ if (is_int10(source.offset())) { \ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ } else { \ @@ -3473,7 +3473,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, if (IsJicOrJialc(instr2)) { uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + Assembler::UnpackTargetAddressUnsigned(imm, + &lui_offset_u, &jic_offset_u); instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); } else { @@ -3717,7 +3718,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target, if (IsJicOrJialc(instr2)) { // Must use 2 instructions to insure patchable code => use lui and jic uint32_t lui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset); + Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); instr1 &= ~kImm16Mask; instr2 &= ~kImm16Mask; diff --git a/chromium/v8/src/codegen/mips/assembler-mips.h b/chromium/v8/src/codegen/mips/assembler-mips.h index 0359be2c94a..d8cb8ec3f2a 100644 --- a/chromium/v8/src/codegen/mips/assembler-mips.h +++ b/chromium/v8/src/codegen/mips/assembler-mips.h @@ -36,6 +36,7 @@ #define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ #include <stdio.h> +#include <memory> #include <set> @@ -1478,13 +1479,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static bool IsAddImmediate(Instr instr); static Instr SetAddImmediateOffset(Instr instr, int16_t offset); static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); - static void UnpackTargetAddress( - uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references) - int16_t& jic_offset); // NOLINT(runtime/references) - static void UnpackTargetAddressUnsigned( - uint32_t address, - uint32_t& lui_offset, // NOLINT(runtime/references) - uint32_t& jic_offset); // NOLINT(runtime/references) + static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset); + static void UnpackTargetAddressUnsigned(uint32_t address, + uint32_t* lui_offset, + uint32_t* jic_offset); static bool IsAndImmediate(Instr instr); static bool IsEmittedConstant(Instr instr); @@ -1515,7 +1514,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, // NOLINT(runtime/references) + MemOperand* src, OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc index 2e4698a9e71..760d33d7c91 100644 --- a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc +++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc @@ -1063,7 +1063,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); if (rd != source.rm()) { lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); @@ -1089,7 +1089,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); } @@ -1105,7 +1105,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1140,7 +1140,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1177,7 +1177,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); if (scratch != rd) { mov(scratch, rd); @@ -1256,7 +1256,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); MemOperand tmp = src; - AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); if (IsFp32Mode()) { // fp32 mode. FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); @@ -1284,7 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); MemOperand tmp = src; - AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); if (IsFp32Mode()) { // fp32 mode. FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); @@ -1305,13 +1305,13 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { void TurboAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lw(rd, source); } void TurboAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand dest = rs; - AdjustBaseAndOffset(dest); + AdjustBaseAndOffset(&dest); sw(rd, dest); } @@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; - offset = GetOffset(offset, L, bits); + *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt) { +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; - scratch = GetRtAsRegisterHelper(rt, scratch); - offset = GetOffset(offset, L, bits); + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); return true; } @@ -2955,23 +2955,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); break; case eq: if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 beq is used here to make the code patchable. Otherwise bc // should be used which has no condition field so is not patchable. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beq(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beqc(rs, scratch, offset); } @@ -2980,16 +2980,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 bne is used here to make the code patchable. Otherwise we // should not generate any instruction. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bne(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bnec(rs, scratch, offset); } @@ -3001,14 +3001,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(scratch, rs, offset); @@ -3017,17 +3017,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(rs, scratch, offset); @@ -3038,14 +3038,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(rs, scratch, offset); @@ -3054,17 +3054,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(scratch, rs, offset); @@ -3077,14 +3077,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(scratch, rs, offset); @@ -3093,17 +3093,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Ugreater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; beqzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(rs, scratch, offset); @@ -3114,13 +3114,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { break; // No code needs to be emitted. } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(rs, scratch, offset); @@ -3129,17 +3129,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Uless_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) return false; bc(offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(scratch, rs, offset); @@ -3418,7 +3418,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); break; case eq: @@ -3440,11 +3440,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3456,14 +3456,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3477,11 +3477,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3493,14 +3493,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= r2 if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3751,8 +3751,8 @@ void TurboAssembler::Jump(Register target, const Operand& offset, if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && !is_int16(offset.immediate())) { uint32_t aui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset, - jic_offset); + Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset, + &jic_offset); RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate()); aui(target, target, aui_offset); if (cond == cc_always) { @@ -3790,7 +3790,7 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, // This is not an issue, t9 is expected to be clobbered anyway. if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { uint32_t lui_offset, jic_offset; - UnpackTargetAddressUnsigned(target, lui_offset, jic_offset); + UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); if (MustUseReg(rmode)) { RecordRelocInfo(rmode, target); } @@ -3853,10 +3853,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, } void TurboAssembler::Jump(const ExternalReference& reference) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, reference); - Jump(scratch); + li(t9, reference); + Jump(t9); } void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, @@ -3940,7 +3938,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, int32_t target_int = static_cast<int32_t>(target); if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) { uint32_t lui_offset, jialc_offset; - UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset); + UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset); if (MustUseReg(rmode)) { RecordRelocInfo(rmode, target_int); } @@ -3990,7 +3988,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, } } DCHECK(RelocInfo::IsCodeTarget(rmode)); - AllowDeferredHandleDereference embedding_raw_address; Call(code.address(), rmode, cond, rs, rt, bd); } diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.h b/chromium/v8/src/codegen/mips/macro-assembler-mips.h index d9c372f8687..e82c88f0b5e 100644 --- a/chromium/v8/src/codegen/mips/macro-assembler-mips.h +++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.h @@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd = PROTECT); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits, - Register& scratch, // NOLINT(runtime/references) - const Operand& rt); + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h b/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h index 7b9946d16eb..cacdbd8f8bb 100644 --- a/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h +++ b/chromium/v8/src/codegen/mips64/assembler-mips64-inl.h @@ -159,7 +159,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.cc b/chromium/v8/src/codegen/mips64/assembler-mips64.cc index 801faf6306d..37a05585c4b 100644 --- a/chromium/v8/src/codegen/mips64/assembler-mips64.cc +++ b/chromium/v8/src/codegen/mips64/assembler-mips64.cc @@ -207,8 +207,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle<HeapObject> object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: const StringConstantBase* str = request.string(); @@ -1996,7 +1996,7 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) { // ------------Memory-instructions------------- -void Assembler::AdjustBaseAndOffset(MemOperand& src, +void Assembler::AdjustBaseAndOffset(MemOperand* src, OffsetAccessType access_type, int second_access_add_to_offset) { // This method is used to adjust the base register and offset pair @@ -2009,25 +2009,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // pointer register). // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. - bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0; + bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned; DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. // is_int16 must be passed a signed value, hence the static cast below. - if (is_int16(src.offset()) && + if (is_int16(src->offset()) && (!two_accesses || is_int16(static_cast<int32_t>( - src.offset() + second_access_add_to_offset)))) { + src->offset() + second_access_add_to_offset)))) { // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified // value) fits into int16_t. return; } - DCHECK(src.rm() != + DCHECK(src->rm() != at); // Must not overwrite the register 'base' while loading 'offset'. #ifdef DEBUG // Remember the "(mis)alignment" of 'offset', it will be checked at the end. - uint32_t misalignment = src.offset() & (kDoubleSize - 1); + uint32_t misalignment = src->offset() & (kDoubleSize - 1); #endif // Do not load the whole 32-bit 'offset' if it can be represented as @@ -2042,13 +2042,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) { - daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment); - src.offset_ -= kMinOffsetForSimpleAdjustment; - } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() && - src.offset() < 0) { - daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment); - src.offset_ += kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; } else if (kArchVariant == kMips64r6) { // On r6 take advantage of the daui instruction, e.g.: // daui at, base, offset_high @@ -2060,9 +2060,9 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // daddiu at, at, 8 // lw reg_lo, (offset_low-8)(at) // lw reg_hi, (offset_low-4)(at) - int16_t offset_low = static_cast<uint16_t>(src.offset()); + int16_t offset_low = static_cast<uint16_t>(src->offset()); int32_t offset_low32 = offset_low; - int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16); + int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16); bool increment_hi16 = offset_low < 0; bool overflow_hi16 = false; @@ -2070,7 +2070,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, offset_high++; overflow_hi16 = (offset_high == -32768); } - daui(scratch, src.rm(), static_cast<uint16_t>(offset_high)); + daui(scratch, src->rm(), static_cast<uint16_t>(offset_high)); if (overflow_hi16) { dahi(scratch, 1); @@ -2084,7 +2084,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, offset_low32 -= kDoubleSize; } - src.offset_ = offset_low32; + src->offset_ = offset_low32; } else { // Do not load the whole 32-bit 'offset' if it can be represented as // a sum of three 16-bit signed offsets. This can save an instruction. @@ -2095,33 +2095,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 2 * kMinOffsetForSimpleAdjustment; constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) { - daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2); + if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { + daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); - src.offset_ -= kMinOffsetForMediumAdjustment; - } else if (-kMaxOffsetForMediumAdjustment <= src.offset() && - src.offset() < 0) { - daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2); + src->offset_ -= kMinOffsetForMediumAdjustment; + } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && + src->offset() < 0) { + daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); - src.offset_ += kMinOffsetForMediumAdjustment; + src->offset_ += kMinOffsetForMediumAdjustment; } else { // Now that all shorter options have been exhausted, load the full 32-bit // offset. - int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize); + int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. - daddu(scratch, scratch, src.rm()); - src.offset_ -= loaded_offset; + daddu(scratch, scratch, src->rm()); + src->offset_ -= loaded_offset; } } - src.rm_ = scratch; + src->rm_ = scratch; - DCHECK(is_int16(src.offset())); + DCHECK(is_int16(src->offset())); if (two_accesses) { DCHECK(is_int16( - static_cast<int32_t>(src.offset() + second_access_add_to_offset))); + static_cast<int32_t>(src->offset() + second_access_add_to_offset))); } - DCHECK(misalignment == (src.offset() & (kDoubleSize - 1))); + DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); } void Assembler::lb(Register rd, const MemOperand& rs) { @@ -3169,7 +3169,7 @@ MSA_BRANCH_LIST(MSA_BRANCH) #define MSA_LD_ST(name, opcode) \ void Assembler::name(MSARegister wd, const MemOperand& rs) { \ MemOperand source = rs; \ - AdjustBaseAndOffset(source); \ + AdjustBaseAndOffset(&source); \ if (is_int10(source.offset())) { \ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ } else { \ diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.h b/chromium/v8/src/codegen/mips64/assembler-mips64.h index 9695aa65248..48733eebea5 100644 --- a/chromium/v8/src/codegen/mips64/assembler-mips64.h +++ b/chromium/v8/src/codegen/mips64/assembler-mips64.h @@ -36,7 +36,7 @@ #define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_ #include <stdio.h> - +#include <memory> #include <set> #include "src/codegen/assembler.h" @@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, // NOLINT(runtime/references) + MemOperand* src, OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc index b3537860643..2ea770d2240 100644 --- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -1166,7 +1166,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); if (rd != source.rm()) { lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); @@ -1201,7 +1201,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); } @@ -1216,7 +1216,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1250,7 +1250,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1286,7 +1286,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); if (scratch != rd) { mov(scratch, rd); @@ -1314,7 +1314,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) { DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 7 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7); if (rd != source.rm()) { ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset)); ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset)); @@ -1349,7 +1349,7 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) { DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 7 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7); sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset)); sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset)); } @@ -1411,91 +1411,91 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, void TurboAssembler::Lb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lb(rd, source); } void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lbu(rd, source); } void TurboAssembler::Sb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sb(rd, source); } void TurboAssembler::Lh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lh(rd, source); } void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lhu(rd, source); } void TurboAssembler::Sh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sh(rd, source); } void TurboAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lw(rd, source); } void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lwu(rd, source); } void TurboAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sw(rd, source); } void TurboAssembler::Ld(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); ld(rd, source); } void TurboAssembler::Sd(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sd(rd, source); } void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); lwc1(fd, tmp); } void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); swc1(fs, tmp); } void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); ldc1(fd, tmp); } void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); sdc1(fs, tmp); } @@ -3362,18 +3362,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; - offset = GetOffset(offset, L, bits); + *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt) { +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; - scratch = GetRtAsRegisterHelper(rt, scratch); - offset = GetOffset(offset, L, bits); + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); return true; } @@ -3392,23 +3392,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); break; case eq: if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 beq is used here to make the code patchable. Otherwise bc // should be used which has no condition field so is not patchable. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beq(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beqc(rs, scratch, offset); } @@ -3417,16 +3417,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 bne is used here to make the code patchable. Otherwise we // should not generate any instruction. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bne(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bnec(rs, scratch, offset); } @@ -3438,14 +3438,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(scratch, rs, offset); @@ -3454,17 +3454,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(rs, scratch, offset); @@ -3475,14 +3475,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(rs, scratch, offset); @@ -3491,17 +3491,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(scratch, rs, offset); @@ -3514,14 +3514,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(scratch, rs, offset); @@ -3530,17 +3530,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Ugreater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; beqzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(rs, scratch, offset); @@ -3551,13 +3551,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { break; // No code needs to be emitted. } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(rs, scratch, offset); @@ -3566,17 +3566,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Uless_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) return false; bc(offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(scratch, rs, offset); @@ -3858,7 +3858,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); break; case eq: @@ -3880,11 +3880,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3896,14 +3896,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3917,11 +3917,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3933,14 +3933,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= r2 if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -4202,10 +4202,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, } void TurboAssembler::Jump(const ExternalReference& reference) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, reference); - Jump(scratch); + li(t9, reference); + Jump(t9); } // Note: To call gcc-compiled C code on mips, you must call through t9. @@ -4284,7 +4282,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h index c2b701a5aff..886d64e494b 100644 --- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -850,12 +850,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits, - Register& scratch, // NOLINT(runtime/references) - const Operand& rt); + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/chromium/v8/src/codegen/optimized-compilation-info.cc b/chromium/v8/src/codegen/optimized-compilation-info.cc index 7dc94f39cd6..de89371adbf 100644 --- a/chromium/v8/src/codegen/optimized-compilation-info.cc +++ b/chromium/v8/src/codegen/optimized-compilation-info.cc @@ -111,15 +111,9 @@ OptimizedCompilationInfo::~OptimizedCompilationInfo() { } void OptimizedCompilationInfo::set_deferred_handles( - std::shared_ptr<DeferredHandles> deferred_handles) { + std::unique_ptr<DeferredHandles> deferred_handles) { DCHECK_NULL(deferred_handles_); - deferred_handles_.swap(deferred_handles); -} - -void OptimizedCompilationInfo::set_deferred_handles( - DeferredHandles* deferred_handles) { - DCHECK_NULL(deferred_handles_); - deferred_handles_.reset(deferred_handles); + deferred_handles_ = std::move(deferred_handles); } void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) { @@ -132,6 +126,7 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) { if (!closure_.is_null()) { closure_ = Handle<JSFunction>(*closure_, isolate); } + DCHECK(code_.is_null()); } void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) { diff --git a/chromium/v8/src/codegen/optimized-compilation-info.h b/chromium/v8/src/codegen/optimized-compilation-info.h index 624517283e3..2f3afafc68d 100644 --- a/chromium/v8/src/codegen/optimized-compilation-info.h +++ b/chromium/v8/src/codegen/optimized-compilation-info.h @@ -231,11 +231,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { osr_frame_ = osr_frame; } - void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles); - void set_deferred_handles(DeferredHandles* deferred_handles); - std::shared_ptr<DeferredHandles> deferred_handles() { - return deferred_handles_; - } + void set_deferred_handles(std::unique_ptr<DeferredHandles> deferred_handles); void ReopenHandlesInNewHandleScope(Isolate* isolate); @@ -330,7 +326,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { // OptimizedCompilationInfo allocates. Zone* zone_; - std::shared_ptr<DeferredHandles> deferred_handles_; + std::unique_ptr<DeferredHandles> deferred_handles_; BailoutReason bailout_reason_ = BailoutReason::kNoReason; diff --git a/chromium/v8/src/codegen/pending-optimization-table.cc b/chromium/v8/src/codegen/pending-optimization-table.cc index b7be9c77757..84e36fc8438 100644 --- a/chromium/v8/src/codegen/pending-optimization-table.cc +++ b/chromium/v8/src/codegen/pending-optimization-table.cc @@ -83,7 +83,7 @@ void PendingOptimizationTable::MarkedForOptimization( function->ShortPrint(); PrintF( " should be prepared for optimization with " - "%%PrepareFunctionForOptimize before " + "%%PrepareFunctionForOptimization before " "%%OptimizeFunctionOnNextCall / %%OptimizeOSR "); UNREACHABLE(); } diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h b/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h index 166b9d44231..c55a5a9c0bf 100644 --- a/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h +++ b/chromium/v8/src/codegen/ppc/assembler-ppc-inl.h @@ -144,7 +144,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.cc b/chromium/v8/src/codegen/ppc/assembler-ppc.cc index 2a638af0705..17a3aba1b2e 100644 --- a/chromium/v8/src/codegen/ppc/assembler-ppc.cc +++ b/chromium/v8/src/codegen/ppc/assembler-ppc.cc @@ -200,8 +200,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle<HeapObject> object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); break; } case HeapObjectRequest::kStringConstant: { diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.h b/chromium/v8/src/codegen/ppc/assembler-ppc.h index dee264a75c0..42eda72d4d7 100644 --- a/chromium/v8/src/codegen/ppc/assembler-ppc.h +++ b/chromium/v8/src/codegen/ppc/assembler-ppc.h @@ -41,6 +41,7 @@ #define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_ #include <stdio.h> +#include <memory> #include <vector> #include "src/codegen/assembler.h" diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc index 41162063331..9e41dec2a8f 100644 --- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -1287,12 +1287,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, { // Load receiver to pass it later to DebugOnFunctionCall hook. if (actual.is_reg()) { - mr(r7, actual.reg()); + ShiftLeftImm(r7, actual.reg(), Operand(kPointerSizeLog2)); + LoadPX(r7, MemOperand(sp, r7)); } else { - mov(r7, Operand(actual.immediate())); + LoadP(r7, MemOperand(sp, actual.immediate() << kPointerSizeLog2), r0); } - ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2)); - LoadPX(r7, MemOperand(sp, r7)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { @@ -2409,51 +2408,51 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch, CRegister cr) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Cmpi(src1, Operand(smi), scratch, cr); +#else LoadSmiLiteral(scratch, smi); cmp(src1, scratch, cr); -#else - Cmpi(src1, Operand(smi), scratch, cr); #endif } void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch, CRegister cr) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Cmpli(src1, Operand(smi), scratch, cr); +#else LoadSmiLiteral(scratch, smi); cmpl(src1, scratch, cr); -#else - Cmpli(src1, Operand(smi), scratch, cr); #endif } void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch); +#else LoadSmiLiteral(scratch, smi); add(dst, src, scratch); -#else - Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch); #endif } void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch); +#else LoadSmiLiteral(scratch, smi); sub(dst, src, scratch); -#else - Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch); #endif } void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch, RCBit rc) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + And(dst, src, Operand(smi), rc); +#else LoadSmiLiteral(scratch, smi); and_(dst, src, scratch, rc); -#else - And(dst, src, Operand(smi), rc); #endif } @@ -2941,14 +2940,18 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + ShiftLeftImm(builtin_index, builtin_index, + Operand(kSystemPointerSizeLog2 - kSmiShift)); +#else ShiftRightArithImm(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); +#endif addi(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index)); diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h index fd4cb6014bb..7ff5a6bb4b7 100644 --- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -876,12 +876,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { } void SmiToPtrArrayOffset(Register dst, Register src) { -#if V8_TARGET_ARCH_PPC64 - STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); - ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); -#else +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); +#else + STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); + ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); #endif } @@ -895,7 +895,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void AssertNotSmi(Register object); void AssertSmi(Register object); -#if V8_TARGET_ARCH_PPC64 +#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // Ensure it is permissible to read/write int value directly from // upper half of the smi. STATIC_ASSERT(kSmiTag == 0); diff --git a/chromium/v8/src/codegen/reglist.h b/chromium/v8/src/codegen/reglist.h index 609e6b88458..4f1d35267d0 100644 --- a/chromium/v8/src/codegen/reglist.h +++ b/chromium/v8/src/codegen/reglist.h @@ -25,20 +25,18 @@ constexpr int NumRegs(RegList list) { return base::bits::CountPopulation(list); } +namespace detail { // Combine two RegLists by building the union of the contained registers. -// Implemented as a Functor to pass it to base::fold even on gcc < 5 (see -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). -// TODO(clemensh): Remove this once we require gcc >= 5.0. -struct CombineRegListsFunctor { - constexpr RegList operator()(RegList list1, RegList list2) const { - return list1 | list2; - } -}; +// TODO(clemensb): Replace by constexpr lambda once we have C++17. +constexpr RegList CombineRegListsHelper(RegList list1, RegList list2) { + return list1 | list2; +} +} // namespace detail // Combine several RegLists by building the union of the contained registers. template <typename... RegLists> constexpr RegList CombineRegLists(RegLists... lists) { - return base::fold(CombineRegListsFunctor{}, 0, lists...); + return base::fold(detail::CombineRegListsHelper, 0, lists...); } } // namespace internal diff --git a/chromium/v8/src/codegen/reloc-info.cc b/chromium/v8/src/codegen/reloc-info.cc index a889a8b9c7b..039a6746b1b 100644 --- a/chromium/v8/src/codegen/reloc-info.cc +++ b/chromium/v8/src/codegen/reloc-info.cc @@ -366,7 +366,7 @@ void RelocInfo::set_target_address(Address target, Assembler::set_target_address_at(pc_, constant_pool_, target, icache_flush_mode); if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && - IsCodeTargetMode(rmode_)) { + IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) { Code target_code = Code::GetCodeFromTargetAddress(target); MarkingBarrierForCode(host(), this, target_code); } diff --git a/chromium/v8/src/codegen/s390/assembler-s390-inl.h b/chromium/v8/src/codegen/s390/assembler-s390-inl.h index 5e7b193c8ac..f911bdabf6f 100644 --- a/chromium/v8/src/codegen/s390/assembler-s390-inl.h +++ b/chromium/v8/src/codegen/s390/assembler-s390-inl.h @@ -150,7 +150,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/s390/assembler-s390.cc b/chromium/v8/src/codegen/s390/assembler-s390.cc index 873c0a2ad06..9de95ed5084 100644 --- a/chromium/v8/src/codegen/s390/assembler-s390.cc +++ b/chromium/v8/src/codegen/s390/assembler-s390.cc @@ -329,8 +329,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); set_target_address_at(pc, kNullAddress, object.address(), SKIP_ICACHE_FLUSH); break; diff --git a/chromium/v8/src/codegen/s390/assembler-s390.h b/chromium/v8/src/codegen/s390/assembler-s390.h index 0653e79b67c..f1a418d1afa 100644 --- a/chromium/v8/src/codegen/s390/assembler-s390.h +++ b/chromium/v8/src/codegen/s390/assembler-s390.h @@ -40,6 +40,7 @@ #ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_ #define V8_CODEGEN_S390_ASSEMBLER_S390_H_ #include <stdio.h> +#include <memory> #if V8_HOST_ARCH_S390 // elf.h include is required for auxv check for STFLE facility used // for hardware detection, which is sensible only on s390 hosts. diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc index 355d536379a..4cab44d9e1b 100644 --- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc @@ -51,7 +51,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, } RegList list = kJSCallerSaved & ~exclusions; - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; if (fp_mode == kSaveFPRegs) { bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize; @@ -76,7 +76,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, RegList list = kJSCallerSaved & ~exclusions; MultiPush(list); - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; if (fp_mode == kSaveFPRegs) { MultiPushDoubles(kCallerSavedDoubles); @@ -107,7 +107,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, RegList list = kJSCallerSaved & ~exclusions; MultiPop(list); - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; return bytes; } @@ -116,8 +116,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); - const uint32_t offset = - FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag; + const uint32_t offset = FixedArray::kHeaderSize + + constant_index * kSystemPointerSize - kHeapObjectTag; CHECK(is_uint19(offset)); DCHECK_NE(destination, r0); @@ -258,7 +258,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, void TurboAssembler::Drop(int count) { if (count > 0) { - int total = count * kPointerSize; + int total = count * kSystemPointerSize; if (is_uint12(total)) { la(sp, MemOperand(sp, total)); } else if (is_int20(total)) { @@ -270,7 +270,7 @@ void TurboAssembler::Drop(int count) { } void TurboAssembler::Drop(Register count, Register scratch) { - ShiftLeftP(scratch, count, Operand(kPointerSizeLog2)); + ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2)); AddP(sp, sp, scratch); } @@ -367,12 +367,12 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, void TurboAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = base::bits::CountPopulation(regs); - int16_t stack_offset = num_to_push * kPointerSize; + int16_t stack_offset = num_to_push * kSystemPointerSize; SubP(location, location, Operand(stack_offset)); for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { - stack_offset -= kPointerSize; + stack_offset -= kSystemPointerSize; StoreP(ToRegister(i), MemOperand(location, stack_offset)); } } @@ -384,7 +384,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { for (int16_t i = 0; i < Register::kNumRegisters; i++) { if ((regs & (1 << i)) != 0) { LoadP(ToRegister(i), MemOperand(location, stack_offset)); - stack_offset += kPointerSize; + stack_offset += kSystemPointerSize; } } AddP(location, location, Operand(stack_offset)); @@ -439,13 +439,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } // Although the object register is tagged, the offset is relative to the start - // of the object, so so offset must be a multiple of kPointerSize. - DCHECK(IsAligned(offset, kPointerSize)); + // of the object, so so offset must be a multiple of kSystemPointerSize. + DCHECK(IsAligned(offset, kSystemPointerSize)); lay(dst, MemOperand(object, offset - kHeapObjectTag)); if (emit_debug_code()) { Label ok; - AndP(r0, dst, Operand(kPointerSize - 1)); + AndP(r0, dst, Operand(kSystemPointerSize - 1)); beq(&ok, Label::kNear); stop(); bind(&ok); @@ -632,7 +632,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { Push(r14, fp); fp_delta = 0; } - la(fp, MemOperand(sp, fp_delta * kPointerSize)); + la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } void TurboAssembler::PopCommonFrame(Register marker_reg) { @@ -653,7 +653,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(r14, fp, cp); fp_delta = 1; } - la(fp, MemOperand(sp, fp_delta * kPointerSize)); + la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } void TurboAssembler::RestoreFrameStateForTailCall() { @@ -1082,9 +1082,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); // Set up the frame structure on the stack. - DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); - DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); - DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); + DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement); + DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset); + DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset); DCHECK_GT(stack_space, 0); // This is an opportunity to build a frame to wrap @@ -1117,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, // since the sp slot and code slot were pushed after the fp. } - lay(sp, MemOperand(sp, -stack_space * kPointerSize)); + lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize)); // Allocate and align the frame preparing for calling the runtime // function. @@ -1127,11 +1127,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8 } - lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); + lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize)); StoreP(MemOperand(sp), Operand::Zero(), r0); // Set the exit frame sp value to point just before the return address // location. - lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize)); + lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize)); StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset)); } @@ -1184,7 +1184,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, if (argument_count.is_valid()) { if (!argument_count_is_length) { - ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2)); + ShiftLeftP(argument_count, argument_count, + Operand(kSystemPointerSizeLog2)); } la(sp, MemOperand(sp, argument_count)); } @@ -1211,22 +1212,24 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, #endif // Calculate the end of destination area where we will put the arguments - // after we drop current frame. We AddP kPointerSize to count the receiver - // argument which is not included into formal parameters count. + // after we drop current frame. We AddP kSystemPointerSize to count the + // receiver argument which is not included into formal parameters count. Register dst_reg = scratch0; - ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2)); + ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kSystemPointerSizeLog2)); AddP(dst_reg, fp, dst_reg); AddP(dst_reg, dst_reg, - Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); + Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); Register src_reg = caller_args_count_reg; - // Calculate the end of source area. +kPointerSize is for the receiver. + // Calculate the end of source area. +kSystemPointerSize is for the receiver. if (callee_args_count.is_reg()) { - ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2)); + ShiftLeftP(src_reg, callee_args_count.reg(), + Operand(kSystemPointerSizeLog2)); AddP(src_reg, sp, src_reg); - AddP(src_reg, src_reg, Operand(kPointerSize)); + AddP(src_reg, src_reg, Operand(kSystemPointerSize)); } else { - mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize)); + mov(src_reg, + Operand((callee_args_count.immediate() + 1) * kSystemPointerSize)); AddP(src_reg, src_reg, sp); } @@ -1253,10 +1256,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, } LoadRR(r1, tmp_reg); bind(&loop); - LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize)); - StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize)); - lay(src_reg, MemOperand(src_reg, -kPointerSize)); - lay(dst_reg, MemOperand(dst_reg, -kPointerSize)); + LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize)); + StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize)); + lay(src_reg, MemOperand(src_reg, -kSystemPointerSize)); + lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize)); BranchOnCount(r1, &loop); // Leave current frame. @@ -1342,12 +1345,12 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, { // Load receiver to pass it later to DebugOnFunctionCall hook. if (actual.is_reg()) { - LoadRR(r6, actual.reg()); + ShiftLeftP(r6, actual.reg(), Operand(kSystemPointerSizeLog2)); + LoadP(r6, MemOperand(sp, r6)); } else { - mov(r6, Operand(actual.immediate())); + LoadP(r6, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2), + ip); } - ShiftLeftP(r6, r6, Operand(kPointerSizeLog2)); - LoadP(r6, MemOperand(sp, r6)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { @@ -1470,8 +1473,8 @@ void MacroAssembler::MaybeDropFrames() { void MacroAssembler::PushStackHandler() { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize); // Link the current handler as the next handler. Move(r7, @@ -1486,13 +1489,13 @@ void MacroAssembler::PushStackHandler() { // Copy the old handler into the next handler slot. MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7), - Operand(kPointerSize)); + Operand(kSystemPointerSize)); // Set this new handler as the current one. StoreP(sp, MemOperand(r7)); } void MacroAssembler::PopStackHandler() { - STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); // Pop the Next Handler into r3 and store it into Handler Address reference. @@ -1839,18 +1842,19 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, int stack_passed_arguments = CalculateStackPassedWords(num_reg_arguments, num_double_arguments); int stack_space = kNumRequiredStackFrameSlots; - if (frame_alignment > kPointerSize) { + if (frame_alignment > kSystemPointerSize) { // Make stack end at alignment and make room for stack arguments // -- preserving original value of sp. LoadRR(scratch, sp); - lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize)); + lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize)); DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); - StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize)); + StoreP(scratch, + MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize)); } else { stack_space += stack_passed_arguments; } - lay(sp, MemOperand(sp, (-stack_space) * kPointerSize)); + lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize)); } void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, @@ -1940,11 +1944,11 @@ void TurboAssembler::CallCFunctionHelper(Register function, int stack_passed_arguments = CalculateStackPassedWords(num_reg_arguments, num_double_arguments); int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; - if (ActivationFrameAlignment() > kPointerSize) { + if (ActivationFrameAlignment() > kSystemPointerSize) { // Load the original stack pointer (pre-alignment) from the stack - LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); + LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize)); } else { - la(sp, MemOperand(sp, stack_space * kPointerSize)); + la(sp, MemOperand(sp, stack_space * kSystemPointerSize)); } } @@ -1962,20 +1966,20 @@ void TurboAssembler::CheckPageFlag( uint32_t shifted_mask = mask; // Determine the byte offset to be tested if (mask <= 0x80) { - byte_offset = kPointerSize - 1; + byte_offset = kSystemPointerSize - 1; } else if (mask < 0x8000) { - byte_offset = kPointerSize - 2; + byte_offset = kSystemPointerSize - 2; shifted_mask = mask >> 8; } else if (mask < 0x800000) { - byte_offset = kPointerSize - 3; + byte_offset = kSystemPointerSize - 3; shifted_mask = mask >> 16; } else { - byte_offset = kPointerSize - 4; + byte_offset = kSystemPointerSize - 4; shifted_mask = mask >> 24; } #if V8_TARGET_LITTLE_ENDIAN // Reverse the byte_offset if emulating on little endian platform - byte_offset = kPointerSize - byte_offset - 1; + byte_offset = kSystemPointerSize - byte_offset - 1; #endif tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset), Operand(shifted_mask)); @@ -3415,12 +3419,12 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) { void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { intptr_t value = static_cast<intptr_t>(smi.ptr()); -#if V8_TARGET_ARCH_S390X +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + llilf(dst, Operand(value)); +#else DCHECK_EQ(value & 0xFFFFFFFF, 0); // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros. llihf(dst, Operand(value >> 32)); -#else - llilf(dst, Operand(value)); #endif } @@ -3456,16 +3460,16 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value, } void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_S390X +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + // CFI takes 32-bit immediate. + cfi(src1, Operand(smi)); +#else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32)); } else { LoadSmiLiteral(scratch, smi); cgr(src1, scratch); } -#else - // CFI takes 32-bit immediate. - cfi(src1, Operand(smi)); #endif } @@ -4154,7 +4158,7 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) { // Clear right most # of bits void TurboAssembler::ClearRightImm(Register dst, Register src, const Operand& val) { - int numBitsToClear = val.immediate() % (kPointerSize * 8); + int numBitsToClear = val.immediate() % (kSystemPointerSize * 8); // Try to use RISBG if possible if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { @@ -4342,14 +4346,19 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + STATIC_ASSERT(kSmiShiftSize == 0); + ShiftLeftP(builtin_index, builtin_index, + Operand(kSystemPointerSizeLog2 - kSmiShift)); +#else ShiftRightArithP(builtin_index, builtin_index, Operand(kSmiShift - kSystemPointerSizeLog2)); +#endif AddP(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); LoadP(builtin_index, MemOperand(kRootRegister, builtin_index)); @@ -4427,7 +4436,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Label return_label; larl(r14, &return_label); // Generate the return addr of call later. - StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize)); + StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize)); // zLinux ABI requires caller's frame to have sufficient space for callee // preserved regsiter save area. diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.h b/chromium/v8/src/codegen/s390/macro-assembler-s390.h index 856e4b592ec..06c26cb305f 100644 --- a/chromium/v8/src/codegen/s390/macro-assembler-s390.h +++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.h @@ -515,26 +515,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { } void push(DoubleRegister src) { - lay(sp, MemOperand(sp, -kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize)); StoreDouble(src, MemOperand(sp)); } void push(Register src) { - lay(sp, MemOperand(sp, -kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize)); StoreP(src, MemOperand(sp)); } void pop(DoubleRegister dst) { LoadDouble(dst, MemOperand(sp)); - la(sp, MemOperand(sp, kPointerSize)); + la(sp, MemOperand(sp, kSystemPointerSize)); } void pop(Register dst) { LoadP(dst, MemOperand(sp)); - la(sp, MemOperand(sp, kPointerSize)); + la(sp, MemOperand(sp, kSystemPointerSize)); } - void pop() { la(sp, MemOperand(sp, kPointerSize)); } + void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); } void Push(Register src) { push(src); } @@ -544,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2) { - lay(sp, MemOperand(sp, -kPointerSize * 2)); - StoreP(src1, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 2)); + StoreP(src1, MemOperand(sp, kSystemPointerSize)); StoreP(src2, MemOperand(sp, 0)); } // Push three registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Register src3) { - lay(sp, MemOperand(sp, -kPointerSize * 3)); - StoreP(src1, MemOperand(sp, kPointerSize * 2)); - StoreP(src2, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 3)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src2, MemOperand(sp, kSystemPointerSize)); StoreP(src3, MemOperand(sp, 0)); } // Push four registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Register src3, Register src4) { - lay(sp, MemOperand(sp, -kPointerSize * 4)); - StoreP(src1, MemOperand(sp, kPointerSize * 3)); - StoreP(src2, MemOperand(sp, kPointerSize * 2)); - StoreP(src3, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 4)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 3)); + StoreP(src2, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src3, MemOperand(sp, kSystemPointerSize)); StoreP(src4, MemOperand(sp, 0)); } @@ -580,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { DCHECK(src3 != src5); DCHECK(src4 != src5); - lay(sp, MemOperand(sp, -kPointerSize * 5)); - StoreP(src1, MemOperand(sp, kPointerSize * 4)); - StoreP(src2, MemOperand(sp, kPointerSize * 3)); - StoreP(src3, MemOperand(sp, kPointerSize * 2)); - StoreP(src4, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 5)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 4)); + StoreP(src2, MemOperand(sp, kSystemPointerSize * 3)); + StoreP(src3, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src4, MemOperand(sp, kSystemPointerSize)); StoreP(src5, MemOperand(sp, 0)); } @@ -593,36 +593,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Pop two registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2) { LoadP(src2, MemOperand(sp, 0)); - LoadP(src1, MemOperand(sp, kPointerSize)); - la(sp, MemOperand(sp, 2 * kPointerSize)); + LoadP(src1, MemOperand(sp, kSystemPointerSize)); + la(sp, MemOperand(sp, 2 * kSystemPointerSize)); } // Pop three registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3) { LoadP(src3, MemOperand(sp, 0)); - LoadP(src2, MemOperand(sp, kPointerSize)); - LoadP(src1, MemOperand(sp, 2 * kPointerSize)); - la(sp, MemOperand(sp, 3 * kPointerSize)); + LoadP(src2, MemOperand(sp, kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize)); + la(sp, MemOperand(sp, 3 * kSystemPointerSize)); } // Pop four registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3, Register src4) { LoadP(src4, MemOperand(sp, 0)); - LoadP(src3, MemOperand(sp, kPointerSize)); - LoadP(src2, MemOperand(sp, 2 * kPointerSize)); - LoadP(src1, MemOperand(sp, 3 * kPointerSize)); - la(sp, MemOperand(sp, 4 * kPointerSize)); + LoadP(src3, MemOperand(sp, kSystemPointerSize)); + LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize)); + la(sp, MemOperand(sp, 4 * kSystemPointerSize)); } // Pop five registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3, Register src4, Register src5) { LoadP(src5, MemOperand(sp, 0)); - LoadP(src4, MemOperand(sp, kPointerSize)); - LoadP(src3, MemOperand(sp, 2 * kPointerSize)); - LoadP(src2, MemOperand(sp, 3 * kPointerSize)); - LoadP(src1, MemOperand(sp, 4 * kPointerSize)); - la(sp, MemOperand(sp, 5 * kPointerSize)); + LoadP(src4, MemOperand(sp, kSystemPointerSize)); + LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize)); + LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize)); + la(sp, MemOperand(sp, 5 * kSystemPointerSize)); } // Push a fixed frame, consisting of lr, fp, constant pool. @@ -1182,12 +1182,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { } void SmiToPtrArrayOffset(Register dst, Register src) { -#if V8_TARGET_ARCH_S390X - STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); - ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2)); +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2); + ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift)); #else - STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); - ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); + STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2); + ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2)); #endif } @@ -1201,14 +1201,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void AssertNotSmi(Register object); void AssertSmi(Register object); -#if V8_TARGET_ARCH_S390X +#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // Ensure it is permissible to read/write int value directly from // upper half of the smi. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); #endif #if V8_TARGET_LITTLE_ENDIAN -#define SmiWordOffset(offset) (offset + kPointerSize / 2) +#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2) #else #define SmiWordOffset(offset) offset #endif diff --git a/chromium/v8/src/codegen/source-position-table.cc b/chromium/v8/src/codegen/source-position-table.cc index 870241eac69..ba8e5981f06 100644 --- a/chromium/v8/src/codegen/source-position-table.cc +++ b/chromium/v8/src/codegen/source-position-table.cc @@ -31,24 +31,23 @@ using MoreBit = BitField8<bool, 7, 1>; using ValueBits = BitField8<unsigned, 0, 7>; // Helper: Add the offsets from 'other' to 'value'. Also set is_statement. -void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references) +void AddAndSetEntry(PositionTableEntry* value, const PositionTableEntry& other) { - value.code_offset += other.code_offset; - value.source_position += other.source_position; - value.is_statement = other.is_statement; + value->code_offset += other.code_offset; + value->source_position += other.source_position; + value->is_statement = other.is_statement; } // Helper: Subtract the offsets from 'other' from 'value'. -void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references) +void SubtractFromEntry(PositionTableEntry* value, const PositionTableEntry& other) { - value.code_offset -= other.code_offset; - value.source_position -= other.source_position; + value->code_offset -= other.code_offset; + value->source_position -= other.source_position; } // Helper: Encode an integer. template <typename T> -void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references) - T value) { +void EncodeInt(std::vector<byte>* bytes, T value) { using unsigned_type = typename std::make_unsigned<T>::type; // Zig-zag encoding. static const int kShift = sizeof(T) * kBitsPerByte - 1; @@ -60,14 +59,13 @@ void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references) more = encoded > ValueBits::kMax; byte current = MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask); - bytes.push_back(current); + bytes->push_back(current); encoded >>= ValueBits::kSize; } while (more); } // Encode a PositionTableEntry. -void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references) - const PositionTableEntry& entry) { +void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) { // We only accept ascending code offsets. DCHECK_GE(entry.code_offset, 0); // Since code_offset is not negative, we use sign to encode is_statement. @@ -115,17 +113,16 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) { } #ifdef ENABLE_SLOW_DCHECKS -void CheckTableEquals( - std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references) - SourcePositionTableIterator& encoded) { // NOLINT(runtime/references) +void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries, + SourcePositionTableIterator* encoded) { // Brute force testing: Record all positions and decode // the entire table to verify they are identical. auto raw = raw_entries.begin(); - for (; !encoded.done(); encoded.Advance(), raw++) { + for (; !encoded->done(); encoded->Advance(), raw++) { DCHECK(raw != raw_entries.end()); - DCHECK_EQ(encoded.code_offset(), raw->code_offset); - DCHECK_EQ(encoded.source_position().raw(), raw->source_position); - DCHECK_EQ(encoded.is_statement(), raw->is_statement); + DCHECK_EQ(encoded->code_offset(), raw->code_offset); + DCHECK_EQ(encoded->source_position().raw(), raw->source_position); + DCHECK_EQ(encoded->is_statement(), raw->is_statement); } DCHECK(raw == raw_entries.end()); } @@ -148,8 +145,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset, void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) { PositionTableEntry tmp(entry); - SubtractFromEntry(tmp, previous_); - EncodeEntry(bytes_, tmp); + SubtractFromEntry(&tmp, previous_); + EncodeEntry(&bytes_, tmp); previous_ = entry; #ifdef ENABLE_SLOW_DCHECKS raw_entries_.push_back(entry); @@ -169,7 +166,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable( // Brute force testing: Record all positions and decode // the entire table to verify they are identical. SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll); - CheckTableEquals(raw_entries_, it); + CheckTableEquals(raw_entries_, &it); // No additional source positions after creating the table. mode_ = OMIT_SOURCE_POSITIONS; #endif @@ -187,7 +184,7 @@ OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() { // the entire table to verify they are identical. SourcePositionTableIterator it(table.as_vector(), SourcePositionTableIterator::kAll); - CheckTableEquals(raw_entries_, it); + CheckTableEquals(raw_entries_, &it); // No additional source positions after creating the table. mode_ = OMIT_SOURCE_POSITIONS; #endif @@ -232,7 +229,7 @@ void SourcePositionTableIterator::Advance() { } else { PositionTableEntry tmp; DecodeEntry(bytes, &index_, &tmp); - AddAndSetEntry(current_, tmp); + AddAndSetEntry(¤t_, tmp); SourcePosition p = source_position(); filter_satisfied = (filter_ == kAll) || (filter_ == kJavaScriptOnly && p.IsJavaScript()) || diff --git a/chromium/v8/src/codegen/tnode.h b/chromium/v8/src/codegen/tnode.h new file mode 100644 index 00000000000..1f6c627929b --- /dev/null +++ b/chromium/v8/src/codegen/tnode.h @@ -0,0 +1,374 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_TNODE_H_ +#define V8_CODEGEN_TNODE_H_ + +#include "src/codegen/machine-type.h" + +namespace v8 { +namespace internal { + +class HeapNumber; +class BigInt; +class Object; + +namespace compiler { + +class Node; + +} + +struct UntaggedT {}; + +struct IntegralT : UntaggedT {}; + +struct WordT : IntegralT { + static const MachineRepresentation kMachineRepresentation = + (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 + : MachineRepresentation::kWord64; +}; + +struct RawPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::Pointer(); +}; + +template <class To> +struct RawPtr : RawPtrT {}; + +struct Word32T : IntegralT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kWord32; +}; +struct Int32T : Word32T { + static constexpr MachineType kMachineType = MachineType::Int32(); +}; +struct Uint32T : Word32T { + static constexpr MachineType kMachineType = MachineType::Uint32(); +}; +struct Int16T : Int32T { + static constexpr MachineType kMachineType = MachineType::Int16(); +}; +struct Uint16T : Uint32T, Int32T { + static constexpr MachineType kMachineType = MachineType::Uint16(); +}; +struct Int8T : Int16T { + static constexpr MachineType kMachineType = MachineType::Int8(); +}; +struct Uint8T : Uint16T, Int16T { + static constexpr MachineType kMachineType = MachineType::Uint8(); +}; + +struct Word64T : IntegralT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kWord64; +}; +struct Int64T : Word64T { + static constexpr MachineType kMachineType = MachineType::Int64(); +}; +struct Uint64T : Word64T { + static constexpr MachineType kMachineType = MachineType::Uint64(); +}; + +struct IntPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::IntPtr(); +}; +struct UintPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::UintPtr(); +}; + +struct Float32T : UntaggedT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kFloat32; + static constexpr MachineType kMachineType = MachineType::Float32(); +}; + +struct Float64T : UntaggedT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kFloat64; + static constexpr MachineType kMachineType = MachineType::Float64(); +}; + +#ifdef V8_COMPRESS_POINTERS +using TaggedT = Int32T; +#else +using TaggedT = IntPtrT; +#endif + +// Result of a comparison operation. +struct BoolT : Word32T {}; + +// Value type of a Turbofan node with two results. +template <class T1, class T2> +struct PairT {}; + +inline constexpr MachineType CommonMachineType(MachineType type1, + MachineType type2) { + return (type1 == type2) ? type1 + : ((type1.IsTagged() && type2.IsTagged()) + ? MachineType::AnyTagged() + : MachineType::None()); +} + +template <class Type, class Enable = void> +struct MachineTypeOf { + static constexpr MachineType value = Type::kMachineType; +}; + +template <class Type, class Enable> +constexpr MachineType MachineTypeOf<Type, Enable>::value; + +template <> +struct MachineTypeOf<Object> { + static constexpr MachineType value = MachineType::AnyTagged(); +}; +template <> +struct MachineTypeOf<MaybeObject> { + static constexpr MachineType value = MachineType::AnyTagged(); +}; +template <> +struct MachineTypeOf<Smi> { + static constexpr MachineType value = MachineType::TaggedSigned(); +}; +template <class HeapObjectSubtype> +struct MachineTypeOf<HeapObjectSubtype, + typename std::enable_if<std::is_base_of< + HeapObject, HeapObjectSubtype>::value>::type> { + static constexpr MachineType value = MachineType::TaggedPointer(); +}; + +template <class HeapObjectSubtype> +constexpr MachineType MachineTypeOf< + HeapObjectSubtype, typename std::enable_if<std::is_base_of< + HeapObject, HeapObjectSubtype>::value>::type>::value; + +template <class Type, class Enable = void> +struct MachineRepresentationOf { + static const MachineRepresentation value = Type::kMachineRepresentation; +}; +template <class T> +struct MachineRepresentationOf< + T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> { + static const MachineRepresentation value = + MachineTypeOf<T>::value.representation(); +}; +template <class T> +struct MachineRepresentationOf< + T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> { + static const MachineRepresentation value = + MachineTypeOf<T>::value.representation(); +}; +template <> +struct MachineRepresentationOf<ExternalReference> { + static const MachineRepresentation value = RawPtrT::kMachineRepresentation; +}; + +template <class T> +struct is_valid_type_tag { + static const bool value = std::is_base_of<Object, T>::value || + std::is_base_of<UntaggedT, T>::value || + std::is_base_of<MaybeObject, T>::value || + std::is_same<ExternalReference, T>::value; + static const bool is_tagged = std::is_base_of<Object, T>::value || + std::is_base_of<MaybeObject, T>::value; +}; + +template <class T1, class T2> +struct is_valid_type_tag<PairT<T1, T2>> { + static const bool value = + is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value; + static const bool is_tagged = false; +}; + +template <class T1, class T2> +struct UnionT; + +template <class T1, class T2> +struct is_valid_type_tag<UnionT<T1, T2>> { + static const bool is_tagged = + is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged; + static const bool value = is_tagged; +}; + +template <class T1, class T2> +struct UnionT { + static constexpr MachineType kMachineType = + CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value); + static const MachineRepresentation kMachineRepresentation = + kMachineType.representation(); + static_assert(kMachineRepresentation != MachineRepresentation::kNone, + "no common representation"); + static_assert(is_valid_type_tag<T1>::is_tagged && + is_valid_type_tag<T2>::is_tagged, + "union types are only possible for tagged values"); +}; + +using AnyTaggedT = UnionT<Object, MaybeObject>; +using Number = UnionT<Smi, HeapNumber>; +using Numeric = UnionT<Number, BigInt>; + +// A pointer to a builtin function, used by Torque's function pointers. +using BuiltinPtr = Smi; + +class int31_t { + public: + int31_t() : value_(0) {} + int31_t(int value) : value_(value) { // NOLINT(runtime/explicit) + DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); + } + int31_t& operator=(int value) { + DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); + value_ = value; + return *this; + } + int32_t value() const { return value_; } + operator int32_t() const { return value_; } + + private: + int32_t value_; +}; + +template <class T, class U> +struct is_subtype { + static const bool value = std::is_base_of<U, T>::value; +}; +template <class T1, class T2, class U> +struct is_subtype<UnionT<T1, T2>, U> { + static const bool value = + is_subtype<T1, U>::value && is_subtype<T2, U>::value; +}; +template <class T, class U1, class U2> +struct is_subtype<T, UnionT<U1, U2>> { + static const bool value = + is_subtype<T, U1>::value || is_subtype<T, U2>::value; +}; +template <class T1, class T2, class U1, class U2> +struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> { + static const bool value = + (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) && + (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value); +}; + +template <class T, class U> +struct types_have_common_values { + static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value; +}; +template <class U> +struct types_have_common_values<BoolT, U> { + static const bool value = types_have_common_values<Word32T, U>::value; +}; +template <class U> +struct types_have_common_values<Uint32T, U> { + static const bool value = types_have_common_values<Word32T, U>::value; +}; +template <class U> +struct types_have_common_values<Int32T, U> { + static const bool value = types_have_common_values<Word32T, U>::value; +}; +template <class U> +struct types_have_common_values<Uint64T, U> { + static const bool value = types_have_common_values<Word64T, U>::value; +}; +template <class U> +struct types_have_common_values<Int64T, U> { + static const bool value = types_have_common_values<Word64T, U>::value; +}; +template <class U> +struct types_have_common_values<IntPtrT, U> { + static const bool value = types_have_common_values<WordT, U>::value; +}; +template <class U> +struct types_have_common_values<UintPtrT, U> { + static const bool value = types_have_common_values<WordT, U>::value; +}; +template <class T1, class T2, class U> +struct types_have_common_values<UnionT<T1, T2>, U> { + static const bool value = types_have_common_values<T1, U>::value || + types_have_common_values<T2, U>::value; +}; + +template <class T, class U1, class U2> +struct types_have_common_values<T, UnionT<U1, U2>> { + static const bool value = types_have_common_values<T, U1>::value || + types_have_common_values<T, U2>::value; +}; +template <class T1, class T2, class U1, class U2> +struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> { + static const bool value = types_have_common_values<T1, U1>::value || + types_have_common_values<T1, U2>::value || + types_have_common_values<T2, U1>::value || + types_have_common_values<T2, U2>::value; +}; + +template <class T> +struct types_have_common_values<T, MaybeObject> { + static const bool value = types_have_common_values<T, Object>::value; +}; + +template <class T> +struct types_have_common_values<MaybeObject, T> { + static const bool value = types_have_common_values<Object, T>::value; +}; + +// TNode<T> is an SSA value with the static type tag T, which is one of the +// following: +// - a subclass of internal::Object represents a tagged type +// - a subclass of internal::UntaggedT represents an untagged type +// - ExternalReference +// - PairT<T1, T2> for an operation returning two values, with types T1 +// and T2 +// - UnionT<T1, T2> represents either a value of type T1 or of type T2. +template <class T> +class TNode { + public: + template <class U, + typename std::enable_if<is_subtype<U, T>::value, int>::type = 0> + TNode(const TNode<U>& other) : node_(other) { + LazyTemplateChecks(); + } + TNode() : TNode(nullptr) {} + + TNode operator=(TNode other) { + DCHECK_NOT_NULL(other.node_); + node_ = other.node_; + return *this; + } + + bool is_null() { return node_ == nullptr; } + + operator compiler::Node*() const { return node_; } + + static TNode UncheckedCast(compiler::Node* node) { return TNode(node); } + + protected: + explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); } + + private: + // These checks shouldn't be checked before TNode is actually used. + void LazyTemplateChecks() { + static_assert(is_valid_type_tag<T>::value, "invalid type tag"); + } + + compiler::Node* node_; +}; + +// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from +// Node*. It is intended for function arguments as long as some call sites +// still use untyped Node* arguments. +// TODO(tebbi): Delete this class once transition is finished. +template <class T> +class SloppyTNode : public TNode<T> { + public: + SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit) + : TNode<T>(node) {} + template <class U, typename std::enable_if<is_subtype<U, T>::value, + int>::type = 0> + SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit) + : TNode<T>(other) {} +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_TNODE_H_ diff --git a/chromium/v8/src/codegen/turbo-assembler.h b/chromium/v8/src/codegen/turbo-assembler.h index 3a3e65a41e7..c0f833b6527 100644 --- a/chromium/v8/src/codegen/turbo-assembler.h +++ b/chromium/v8/src/codegen/turbo-assembler.h @@ -5,6 +5,8 @@ #ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_ #define V8_CODEGEN_TURBO_ASSEMBLER_H_ +#include <memory> + #include "src/base/template-utils.h" #include "src/builtins/builtins.h" #include "src/codegen/assembler-arch.h" @@ -100,7 +102,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { static bool IsAddressableThroughRootRegister( Isolate* isolate, const ExternalReference& reference); -#if V8_OS_WIN +#if V8_TARGET_OS_WIN // Minimum page size. We must touch memory once per page when expanding the // stack, to avoid access violations. static constexpr int kStackPageSize = 4 * KB; diff --git a/chromium/v8/src/codegen/x64/assembler-x64-inl.h b/chromium/v8/src/codegen/x64/assembler-x64-inl.h index f5d0c0ffcf5..d8457d9d3e3 100644 --- a/chromium/v8/src/codegen/x64/assembler-x64-inl.h +++ b/chromium/v8/src/codegen/x64/assembler-x64-inl.h @@ -218,6 +218,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { void Assembler::set_target_address_at(Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode) { + DCHECK(is_int32(target - pc - 4)); WriteUnalignedValue(pc, static_cast<int32_t>(target - pc - 4)); if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc, sizeof(int32_t)); @@ -363,7 +364,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc_, sizeof(Address)); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/chromium/v8/src/codegen/x64/assembler-x64.cc b/chromium/v8/src/codegen/x64/assembler-x64.cc index 1783da700ba..16791a64539 100644 --- a/chromium/v8/src/codegen/x64/assembler-x64.cc +++ b/chromium/v8/src/codegen/x64/assembler-x64.cc @@ -327,8 +327,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - Handle<HeapNumber> object = isolate->factory()->NewHeapNumber( - request.heap_number(), AllocationType::kOld); + Handle<HeapNumber> object = + isolate->factory()->NewHeapNumber<AllocationType::kOld>( + request.heap_number()); WriteUnalignedValue(pc, object); break; } @@ -1777,6 +1778,13 @@ void Assembler::emit_mov(Register dst, Immediate64 value, int size) { } } +void Assembler::movq_imm64(Register dst, int64_t value) { + EnsureSpace ensure_space(this); + emit_rex(dst, kInt64Size); + emit(0xB8 | dst.low_bits()); + emitq(static_cast<uint64_t>(value)); +} + void Assembler::movq_heap_number(Register dst, double value) { EnsureSpace ensure_space(this); emit_rex(dst, kInt64Size); @@ -1963,6 +1971,13 @@ void Assembler::emit_repmovs(int size) { emit(0xA5); } +void Assembler::repstosq() { + EnsureSpace ensure_space(this); + emit(0xF3); + emit_rex_64(); + emit(0xAB); +} + void Assembler::mull(Register src) { EnsureSpace ensure_space(this); emit_optional_rex_32(src); @@ -4099,6 +4114,42 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1, emit_sse_operand(dst, src2); } +void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1, + XMMRegister src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1, + Operand src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1, + XMMRegister src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1, + Operand src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1); + emit(op); + emit_sse_operand(dst, src2); +} + void Assembler::vmovd(XMMRegister dst, Register src) { DCHECK(IsEnabled(AVX)); EnsureSpace ensure_space(this); diff --git a/chromium/v8/src/codegen/x64/assembler-x64.h b/chromium/v8/src/codegen/x64/assembler-x64.h index 7c69b4c4736..74cfd0ab850 100644 --- a/chromium/v8/src/codegen/x64/assembler-x64.h +++ b/chromium/v8/src/codegen/x64/assembler-x64.h @@ -39,6 +39,7 @@ #include <deque> #include <map> +#include <memory> #include <vector> #include "src/codegen/assembler.h" @@ -155,7 +156,9 @@ enum ScaleFactor : int8_t { times_4 = 2, times_8 = 3, times_int_size = times_4, - times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4, + + times_half_system_pointer_size = times_4, + times_system_pointer_size = times_8, times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4, }; @@ -513,12 +516,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movq_string(Register dst, const StringConstantBase* str); - // Loads a 64-bit immediate into a register. + // Loads a 64-bit immediate into a register, potentially using the constant + // pool. void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); } void movq(Register dst, uint64_t value) { movq(dst, Immediate64(static_cast<int64_t>(value))); } + // Loads a 64-bit immediate into a register without using the constant pool. + void movq_imm64(Register dst, int64_t value); + void movsxbl(Register dst, Register src); void movsxbl(Register dst, Operand src); void movsxbq(Register dst, Register src); @@ -531,12 +538,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movsxlq(Register dst, Operand src); // Repeated moves. - void repmovsb(); void repmovsw(); void repmovsl() { emit_repmovs(kInt32Size); } void repmovsq() { emit_repmovs(kInt64Size); } + // Repeated store of quadwords (fill RCX quadwords at [RDI] with RAX). + void repstosq(); + // Instruction to load from an immediate 64-bit pointer into RAX. void load_rax(Address value, RelocInfo::Mode rmode); void load_rax(ExternalReference ext); @@ -1295,6 +1304,36 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + void vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmaps(0xb8, dst, src1, src2); + } + void vfmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmaps(0xb8, dst, src1, src2); + } + void vfnmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmaps(0xbc, dst, src1, src2); + } + void vfnmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmaps(0xbc, dst, src1, src2); + } + void vfmaps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); + void vfmaps(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + + void vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmapd(0xb8, dst, src1, src2); + } + void vfmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmapd(0xb8, dst, src1, src2); + } + void vfnmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmapd(0xbc, dst, src1, src2); + } + void vfnmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmapd(0xbc, dst, src1, src2); + } + void vfmapd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); + void vfmapd(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + void vmovd(XMMRegister dst, Register src); void vmovd(XMMRegister dst, Operand src); void vmovd(Register dst, XMMRegister src); @@ -1330,7 +1369,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { impl(opcode, dst, src1, src2); \ } - AVX_SP_3(vsqrt, 0x51) + // vsqrtpd is defined by sqrtpd in SSE2_INSTRUCTION_LIST + AVX_S_3(vsqrt, 0x51) + AVX_3(vsqrtps, 0x51, vps) AVX_S_3(vadd, 0x58) AVX_S_3(vsub, 0x5c) AVX_S_3(vmul, 0x59) diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc index 4deeb1bc02d..d02b95b38e1 100644 --- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc @@ -218,45 +218,45 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) { void TurboAssembler::LoadTaggedPointerField(Register destination, Operand field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressTaggedPointer(destination, field_operand); -#else - mov_tagged(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedPointer(destination, field_operand); + } else { + mov_tagged(destination, field_operand); + } } void TurboAssembler::LoadAnyTaggedField(Register destination, Operand field_operand, Register scratch) { -#ifdef V8_COMPRESS_POINTERS - DecompressAnyTagged(destination, field_operand, scratch); -#else - mov_tagged(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressAnyTagged(destination, field_operand, scratch); + } else { + mov_tagged(destination, field_operand); + } } void TurboAssembler::PushTaggedPointerField(Operand field_operand, Register scratch) { -#ifdef V8_COMPRESS_POINTERS - DCHECK(!field_operand.AddressUsesRegister(scratch)); - DecompressTaggedPointer(scratch, field_operand); - Push(scratch); -#else - Push(field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DCHECK(!field_operand.AddressUsesRegister(scratch)); + DecompressTaggedPointer(scratch, field_operand); + Push(scratch); + } else { + Push(field_operand); + } } void TurboAssembler::PushTaggedAnyField(Operand field_operand, Register scratch1, Register scratch2) { -#ifdef V8_COMPRESS_POINTERS - DCHECK(!AreAliased(scratch1, scratch2)); - DCHECK(!field_operand.AddressUsesRegister(scratch1)); - DCHECK(!field_operand.AddressUsesRegister(scratch2)); - DecompressAnyTagged(scratch1, field_operand, scratch2); - Push(scratch1); -#else - Push(field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DCHECK(!AreAliased(scratch1, scratch2)); + DCHECK(!field_operand.AddressUsesRegister(scratch1)); + DCHECK(!field_operand.AddressUsesRegister(scratch2)); + DecompressAnyTagged(scratch1, field_operand, scratch2); + Push(scratch1); + } else { + Push(field_operand); + } } void TurboAssembler::SmiUntagField(Register dst, Operand src) { @@ -265,44 +265,40 @@ void TurboAssembler::SmiUntagField(Register dst, Operand src) { void TurboAssembler::StoreTaggedField(Operand dst_field_operand, Immediate value) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - movl(dst_field_operand, value); - RecordComment("]"); -#else - movq(dst_field_operand, value); -#endif + if (COMPRESS_POINTERS_BOOL) { + movl(dst_field_operand, value); + } else { + movq(dst_field_operand, value); + } } void TurboAssembler::StoreTaggedField(Operand dst_field_operand, Register value) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - movl(dst_field_operand, value); - RecordComment("]"); -#else - movq(dst_field_operand, value); -#endif + if (COMPRESS_POINTERS_BOOL) { + movl(dst_field_operand, value); + } else { + movq(dst_field_operand, value); + } } void TurboAssembler::DecompressTaggedSigned(Register destination, Operand field_operand) { RecordComment("[ DecompressTaggedSigned"); - movsxlq(destination, field_operand); + movl(destination, field_operand); RecordComment("]"); } void TurboAssembler::DecompressTaggedSigned(Register destination, Register source) { RecordComment("[ DecompressTaggedSigned"); - movsxlq(destination, source); + movl(destination, source); RecordComment("]"); } void TurboAssembler::DecompressTaggedPointer(Register destination, Operand field_operand) { RecordComment("[ DecompressTaggedPointer"); - movsxlq(destination, field_operand); + movl(destination, field_operand); addq(destination, kRootRegister); RecordComment("]"); } @@ -310,30 +306,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressTaggedPointer(Register destination, Register source) { RecordComment("[ DecompressTaggedPointer"); - movsxlq(destination, source); + movl(destination, source); addq(destination, kRootRegister); RecordComment("]"); } void TurboAssembler::DecompressRegisterAnyTagged(Register destination, Register scratch) { - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); - Register masked_root = scratch; - xorq(masked_root, masked_root); - Condition smi = CheckSmi(destination); - cmovq(NegateCondition(smi), masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is - // a smi or add the isolate root if it is a heap object. - addq(destination, masked_root); - } else { - Label done; - JumpIfSmi(destination, &done); - addq(destination, kRootRegister); - bind(&done); - } + addq(destination, kRootRegister); } void TurboAssembler::DecompressAnyTagged(Register destination, @@ -341,7 +321,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register scratch) { DCHECK(!AreAliased(destination, scratch)); RecordComment("[ DecompressAnyTagged"); - movsxlq(destination, field_operand); + movl(destination, field_operand); DecompressRegisterAnyTagged(destination, scratch); RecordComment("]"); } @@ -350,7 +330,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source, Register scratch) { DCHECK(!AreAliased(destination, scratch)); RecordComment("[ DecompressAnyTagged"); - movsxlq(destination, source); + movl(destination, source); DecompressRegisterAnyTagged(destination, scratch); RecordComment("]"); } @@ -1109,7 +1089,11 @@ Register TurboAssembler::GetSmiConstant(Smi source) { xorl(kScratchRegister, kScratchRegister); return kScratchRegister; } - Move(kScratchRegister, source); + if (SmiValuesAre32Bits()) { + Move(kScratchRegister, source); + } else { + movl(kScratchRegister, Immediate(source)); + } return kScratchRegister; } @@ -1133,20 +1117,47 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) { movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE)); } -void MacroAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register reg) { STATIC_ASSERT(kSmiTag == 0); - if (dst != src) { + DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + shll(reg, Immediate(kSmiShift)); + } else { + shlq(reg, Immediate(kSmiShift)); + } +} + +void MacroAssembler::SmiTag(Register dst, Register src) { + DCHECK(dst != src); + if (COMPRESS_POINTERS_BOOL) { + movl(dst, src); + } else { movq(dst, src); } + SmiTag(dst); +} + +void TurboAssembler::SmiUntag(Register reg) { + STATIC_ASSERT(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); - shlq(dst, Immediate(kSmiShift)); + // TODO(v8:7703): Is there a way to avoid this sign extension when pointer + // compression is enabled? + if (COMPRESS_POINTERS_BOOL) { + movsxlq(reg, reg); + } + sarq(reg, Immediate(kSmiShift)); } void TurboAssembler::SmiUntag(Register dst, Register src) { - STATIC_ASSERT(kSmiTag == 0); - if (dst != src) { + DCHECK(dst != src); + if (COMPRESS_POINTERS_BOOL) { + movsxlq(dst, src); + } else { movq(dst, src); } + // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra + // mov when pointer compression is enabled. + STATIC_ASSERT(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); sarq(dst, Immediate(kSmiShift)); } @@ -1158,12 +1169,13 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { movsxlq(dst, dst); } else { DCHECK(SmiValuesAre31Bits()); -#ifdef V8_COMPRESS_POINTERS - movsxlq(dst, src); -#else - movq(dst, src); -#endif - sarq(dst, Immediate(kSmiShift)); + if (COMPRESS_POINTERS_BOOL) { + movsxlq(dst, src); + sarq(dst, Immediate(kSmiShift)); + } else { + movq(dst, src); + sarq(dst, Immediate(kSmiShift)); + } } } @@ -1283,12 +1295,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { return SmiIndex(dst, times_1); } else { DCHECK(SmiValuesAre31Bits()); - if (dst != src) { - mov_tagged(dst, src); - } // We have to sign extend the index register to 64-bit as the SMI might // be negative. - movsxlq(dst, dst); + movsxlq(dst, src); if (shift < kSmiShift) { sarq(dst, Immediate(kSmiShift - shift)); } else if (shift != kSmiShift) { @@ -1423,7 +1432,6 @@ void MacroAssembler::Negpd(XMMRegister dst) { } void MacroAssembler::Cmp(Register dst, Handle<Object> source) { - AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -1433,7 +1441,6 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) { } void MacroAssembler::Cmp(Operand dst, Handle<Object> source) { - AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -1463,6 +1470,8 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object, RelocInfo::Mode rmode) { if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { + // TODO(v8:9706): Fix-it! This load will always uncompress the value + // even when we are loading a compressed embedded object. IndirectLoadConstant(result, object); return; } @@ -1605,26 +1614,20 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { } Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { -#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - - // The builtin_index register contains the builtin index as a Smi. - // Untagging is folded into the indexing operand below (we use times_4 instead - // of times_8 since smis are already shifted by one). - return Operand(kRootRegister, builtin_index, times_4, - IsolateData::builtin_entry_table_offset()); -#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 31); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); + if (SmiValuesAre32Bits()) { + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index); + return Operand(kRootRegister, builtin_index, times_system_pointer_size, + IsolateData::builtin_entry_table_offset()); + } else { + DCHECK(SmiValuesAre31Bits()); - // The builtin_index register contains the builtin index as a Smi. - SmiUntag(builtin_index, builtin_index); - return Operand(kRootRegister, builtin_index, times_8, - IsolateData::builtin_entry_table_offset()); -#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + // The builtin_index register contains the builtin index as a Smi. + // Untagging is folded into the indexing operand below (we use + // times_half_system_pointer_size since smis are already shifted by one). + return Operand(kRootRegister, builtin_index, times_half_system_pointer_size, + IsolateData::builtin_entry_table_offset()); + } } void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { @@ -1739,7 +1742,11 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { Movd(dst, src); return; } - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrd(dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pextrd(dst, src, imm8); return; @@ -1749,8 +1756,38 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { shrq(dst, Immediate(32)); } +void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrw(dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pextrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrb(dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pextrb(dst, src, imm8); + return; + } +} + void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrd(dst, dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; @@ -1765,7 +1802,11 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { } void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrd(dst, dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; @@ -1779,6 +1820,56 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { } } +void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrw(dst, dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrw(dst, dst, src, imm8); + return; + } else { + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrb(dst, dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrb(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrb(dst, dst, src, imm8); + return; + } else { + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrb(dst, src, imm8); + return; + } +} + void TurboAssembler::Psllq(XMMRegister dst, byte imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -1819,6 +1910,16 @@ void TurboAssembler::Psrld(XMMRegister dst, byte imm8) { } } +void TurboAssembler::Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpshufd(dst, src, shuffle); + } else { + DCHECK(!IsEnabled(AVX)); + pshufd(dst, src, shuffle); + } +} + void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); @@ -2278,7 +2379,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + Operand debug_hook_active_operand = + ExternalReferenceAsOperand(debug_hook_active); + cmpb(debug_hook_active_operand, Immediate(0)); + j(not_equal, &debug_hook, Label::kNear); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -2302,8 +2412,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(rcx); } - bind(&done); } + jmp(&done, Label::kNear); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + jmp(&continue_after_hook, Label::kNear); + + bind(&done); } void MacroAssembler::InvokePrologue(const ParameterCount& expected, @@ -2368,50 +2485,38 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - Operand debug_hook_active_operand = - ExternalReferenceAsOperand(debug_hook_active); - cmpb(debug_hook_active_operand, Immediate(0)); - j(equal, &skip_hook); - - { - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg(), expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg(), actual.reg()); - Push(actual.reg()); - SmiUntag(actual.reg(), actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg(), actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg(), expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + SmiUntag(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void TurboAssembler::StubPrologue(StackFrame::Type type) { @@ -2443,7 +2548,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { popq(rbp); } -#ifdef V8_OS_WIN +#ifdef V8_TARGET_OS_WIN void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { // In windows, we cannot increment the stack size by more than one page // (minimum page size is 4KB) without accessing at least one byte on the @@ -2511,7 +2616,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax, void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles) { -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kShadowSpace = 4; arg_stack_space += kShadowSpace; #endif @@ -2615,7 +2720,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() { movq(c_entry_fp_operand, Immediate(0)); } -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN static const int kRegisterPassedArguments = 4; #else static const int kRegisterPassedArguments = 6; @@ -2634,7 +2739,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers // and the caller does not reserve stack slots for them. DCHECK_GE(num_arguments, 0); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kMinimumStackSlots = kRegisterPassedArguments; if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; return num_arguments; diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.h b/chromium/v8/src/codegen/x64/macro-assembler-x64.h index 8e7766c7e19..f38da45788c 100644 --- a/chromium/v8/src/codegen/x64/macro-assembler-x64.h +++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.h @@ -152,8 +152,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP(Roundsd, roundsd) AVX_OP(Sqrtss, sqrtss) AVX_OP(Sqrtsd, sqrtsd) + AVX_OP(Sqrtpd, sqrtpd) AVX_OP(Ucomiss, ucomiss) AVX_OP(Ucomisd, ucomisd) + AVX_OP(Pshufb, pshufb) + AVX_OP(Paddusb, paddusb) + AVX_OP(Psignd, psignd) + AVX_OP(Pand, pand) + AVX_OP(Por, por) + AVX_OP(Pxor, pxor) + AVX_OP(Psubd, psubd) + AVX_OP(Pslld, pslld) + AVX_OP(Psrad, psrad) + AVX_OP(Psrld, psrld) + AVX_OP(Paddd, paddd) + AVX_OP(Pmulld, pmulld) + AVX_OP(Pminsd, pminsd) + AVX_OP(Pminud, pminud) + AVX_OP(Pmaxsd, pmaxsd) + AVX_OP(Pmaxud, pmaxud) + AVX_OP(Pcmpgtd, pcmpgtd) #undef AVX_OP @@ -314,6 +332,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT); // Convert smi to word-size sign-extended value. + void SmiUntag(Register reg); + // Requires dst != src void SmiUntag(Register dst, Register src); void SmiUntag(Register dst, Operand src); @@ -365,14 +385,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Non-SSE2 instructions. void Pextrd(Register dst, XMMRegister src, int8_t imm8); + void Pextrw(Register dst, XMMRegister src, int8_t imm8); + void Pextrb(Register dst, XMMRegister src, int8_t imm8); void Pinsrd(XMMRegister dst, Register src, int8_t imm8); void Pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void Pinsrw(XMMRegister dst, Register src, int8_t imm8); + void Pinsrw(XMMRegister dst, Operand src, int8_t imm8); + void Pinsrb(XMMRegister dst, Register src, int8_t imm8); + void Pinsrb(XMMRegister dst, Operand src, int8_t imm8); void Psllq(XMMRegister dst, byte imm8); void Psrlq(XMMRegister dst, byte imm8); void Pslld(XMMRegister dst, byte imm8); void Psrld(XMMRegister dst, byte imm8); + void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle); + void CompareRoot(Register with, RootIndex index); void CompareRoot(Operand with, RootIndex index); @@ -414,7 +442,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // stack check, do it before calling this function because this function may // write into the newly allocated space. It may also overwrite the given // register's value, in the version that takes a register. -#ifdef V8_OS_WIN +#ifdef V8_TARGET_OS_WIN void AllocateStackSpace(Register bytes_scratch); void AllocateStackSpace(int bytes); #else @@ -647,10 +675,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. @@ -665,6 +693,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Conversions between tagged smi values and non-tagged integer values. // Tag an word-size value. The result must be known to be a valid smi value. + void SmiTag(Register reg); + // Requires dst != src void SmiTag(Register dst, Register src); // Simple comparison of smis. Both sides must be known smis to use these, @@ -917,7 +947,7 @@ inline Operand NativeContextOperand() { // Provides access to exit frame stack space (not GCed). inline Operand StackSpaceOperand(int index) { -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kShaddowSpace = 4; return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize); #else diff --git a/chromium/v8/src/codegen/x64/register-x64.h b/chromium/v8/src/codegen/x64/register-x64.h index 199571f088d..181da9d9f3a 100644 --- a/chromium/v8/src/codegen/x64/register-x64.h +++ b/chromium/v8/src/codegen/x64/register-x64.h @@ -88,7 +88,7 @@ constexpr int kNumJSCallerSaved = 5; // Number of registers for which space is reserved in safepoints. constexpr int kNumSafepointRegisters = 16; -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Windows calling convention constexpr Register arg_reg_1 = rcx; constexpr Register arg_reg_2 = rdx; @@ -100,7 +100,7 @@ constexpr Register arg_reg_1 = rdi; constexpr Register arg_reg_2 = rsi; constexpr Register arg_reg_3 = rdx; constexpr Register arg_reg_4 = rcx; -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN #define DOUBLE_REGISTERS(V) \ V(xmm0) \ diff --git a/chromium/v8/src/codegen/x64/sse-instr.h b/chromium/v8/src/codegen/x64/sse-instr.h index 8ba54e85b42..8af06ae92c8 100644 --- a/chromium/v8/src/codegen/x64/sse-instr.h +++ b/chromium/v8/src/codegen/x64/sse-instr.h @@ -6,6 +6,7 @@ #define V8_CODEGEN_X64_SSE_INSTR_H_ #define SSE2_INSTRUCTION_LIST(V) \ + V(sqrtpd, 66, 0F, 51) \ V(andnpd, 66, 0F, 55) \ V(addpd, 66, 0F, 58) \ V(mulpd, 66, 0F, 59) \ diff --git a/chromium/v8/src/common/assert-scope.cc b/chromium/v8/src/common/assert-scope.cc index f1fe717cc09..5138ce71221 100644 --- a/chromium/v8/src/common/assert-scope.cc +++ b/chromium/v8/src/common/assert-scope.cc @@ -126,8 +126,6 @@ template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>; template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>; template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>; template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>; -template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>; -template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>; template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>; template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>; diff --git a/chromium/v8/src/common/assert-scope.h b/chromium/v8/src/common/assert-scope.h index 73729400ac6..27f41121445 100644 --- a/chromium/v8/src/common/assert-scope.h +++ b/chromium/v8/src/common/assert-scope.h @@ -28,7 +28,6 @@ enum PerThreadAssertType { HEAP_ALLOCATION_ASSERT, HANDLE_ALLOCATION_ASSERT, HANDLE_DEREFERENCE_ASSERT, - DEFERRED_HANDLE_DEREFERENCE_ASSERT, CODE_DEPENDENCY_CHANGE_ASSERT, LAST_PER_THREAD_ASSERT_TYPE }; @@ -145,19 +144,11 @@ using DisallowHandleDereference = using AllowHandleDereference = PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>; -// Scope to document where we do not expect deferred handles to be dereferenced. -using DisallowDeferredHandleDereference = - PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>; - -// Scope to introduce an exception to DisallowDeferredHandleDereference. -using AllowDeferredHandleDereference = - PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>; - -// Scope to document where we do not expect deferred handles to be dereferenced. +// Scope to document where we do not expect code dependencies to change. using DisallowCodeDependencyChange = PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>; -// Scope to introduce an exception to DisallowDeferredHandleDereference. +// Scope to introduce an exception to DisallowCodeDependencyChange. using AllowCodeDependencyChange = PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>; @@ -243,10 +234,6 @@ extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>; extern template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>; extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>; extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>; -extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, - false>; -extern template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, - true>; extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>; extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>; diff --git a/chromium/v8/src/common/globals.h b/chromium/v8/src/common/globals.h index a0584b95c40..20faebfe3a1 100644 --- a/chromium/v8/src/common/globals.h +++ b/chromium/v8/src/common/globals.h @@ -166,13 +166,14 @@ constexpr int kElidedFrameSlots = 0; #endif constexpr int kDoubleSizeLog2 = 3; +constexpr size_t kMaxWasmCodeMB = 1024; +constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB; #if V8_TARGET_ARCH_ARM64 // ARM64 only supports direct calls within a 128 MB range. -constexpr size_t kMaxWasmCodeMB = 128; +constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB; #else -constexpr size_t kMaxWasmCodeMB = 1024; +constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory; #endif -constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB; #if V8_HOST_ARCH_64_BIT constexpr int kSystemPointerSizeLog2 = 3; @@ -230,7 +231,7 @@ constexpr int kTaggedSizeLog2 = 2; // These types define raw and atomic storage types for tagged values stored // on V8 heap. -using Tagged_t = int32_t; +using Tagged_t = uint32_t; using AtomicTagged_t = base::Atomic32; #else @@ -245,11 +246,6 @@ using AtomicTagged_t = base::AtomicWord; #endif // V8_COMPRESS_POINTERS -// Defines whether the branchless or branchful implementation of pointer -// decompression should be used. -constexpr bool kUseBranchlessPtrDecompressionInRuntime = false; -constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false; - STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES); @@ -795,8 +791,6 @@ enum InlineCacheState { NO_FEEDBACK, // Has never been executed. UNINITIALIZED, - // Has been executed but monomorphic state has been delayed. - PREMONOMORPHIC, // Has been executed and only one receiver type has been seen. MONOMORPHIC, // Check failed due to prototype (or map deprecation). @@ -816,8 +810,6 @@ inline const char* InlineCacheState2String(InlineCacheState state) { return "NOFEEDBACK"; case UNINITIALIZED: return "UNINITIALIZED"; - case PREMONOMORPHIC: - return "PREMONOMORPHIC"; case MONOMORPHIC: return "MONOMORPHIC"; case RECOMPUTE_HANDLER: @@ -1216,6 +1208,10 @@ enum VariableLocation : uint8_t { // immediately initialized upon creation (kCreatedInitialized). enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized }; +// Static variables can only be used with the class in the closest +// class scope as receivers. +enum class IsStaticFlag : uint8_t { kNotStatic, kStatic }; + enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned }; enum class InterpreterPushArgsMode : unsigned { diff --git a/chromium/v8/src/common/message-template.h b/chromium/v8/src/common/message-template.h index e3307a525c8..41af7b8f18a 100644 --- a/chromium/v8/src/common/message-template.h +++ b/chromium/v8/src/common/message-template.h @@ -10,7 +10,6 @@ namespace v8 { namespace internal { -// TODO(913887): fix the use of 'neuter' in these error messages. #define MESSAGE_TEMPLATES(T) \ /* Error */ \ T(None, "") \ @@ -34,7 +33,6 @@ namespace internal { "Derived ArrayBuffer constructor created a buffer which was too small") \ T(ArrayBufferSpeciesThis, \ "ArrayBuffer subclass returned this from species constructor") \ - T(ArrayItemNotType, "array %[%] is not type %") \ T(AwaitNotInAsyncFunction, "await is only valid in async function") \ T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \ T(BadSortComparisonFunction, \ @@ -78,7 +76,7 @@ namespace internal { T(DebuggerType, "Debugger: Parameters have wrong types.") \ T(DeclarationMissingInitializer, "Missing initializer in % declaration") \ T(DefineDisallowed, "Cannot define property %, object is not extensible") \ - T(DetachedOperation, "Cannot perform % on a neutered ArrayBuffer") \ + T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \ T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \ T(ExtendsValueNotConstructor, \ "Class extends value % is not a constructor or null") \ @@ -101,6 +99,7 @@ namespace internal { T(InvalidRegExpExecResult, \ "RegExp exec method returned something other than an Object or null") \ T(InvalidUnit, "Invalid unit argument for %() '%'") \ + T(IterableYieldedNonString, "Iterable yielded % which is not a string") \ T(IteratorResultNotAnObject, "Iterator result % is not an object") \ T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \ T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \ @@ -540,6 +539,7 @@ namespace internal { T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \ T(WasmTrapFuncInvalid, "invalid index into function table") \ T(WasmTrapFuncSigMismatch, "function signature mismatch") \ + T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \ T(WasmTrapTypeError, "wasm function signature contains illegal type") \ T(WasmTrapDataSegmentDropped, "data segment has been dropped") \ T(WasmTrapElemSegmentDropped, "element segment has been dropped") \ @@ -554,7 +554,7 @@ namespace internal { T(DataCloneError, "% could not be cloned.") \ T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \ T(DataCloneErrorDetachedArrayBuffer, \ - "An ArrayBuffer is neutered and could not be cloned.") \ + "An ArrayBuffer is detached and could not be cloned.") \ T(DataCloneErrorSharedArrayBufferTransferred, \ "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \ "transferred.") \ diff --git a/chromium/v8/src/common/ptr-compr-inl.h b/chromium/v8/src/common/ptr-compr-inl.h index a8fd7f245cb..17239d15c27 100644 --- a/chromium/v8/src/common/ptr-compr-inl.h +++ b/chromium/v8/src/common/ptr-compr-inl.h @@ -29,8 +29,7 @@ V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) { // signed constant instead of 64-bit constant (the problem is that 2Gb looks // like a negative 32-bit value). It's correct because we will never use // leftmost address of V8 heap as |on_heap_addr|. - return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr + - kPtrComprIsolateRootBias - 1); + return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr); } template <> @@ -54,37 +53,20 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { template <typename TOnHeapAddress> V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value)); - Address root = GetIsolateRoot(on_heap_addr); - return root + static_cast<Address>(value); + return GetIsolateRoot(on_heap_addr) + static_cast<Address>(raw_value); } // Decompresses any tagged value, preserving both weak- and smi- tags. template <typename TOnHeapAddress> V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - if (kUseBranchlessPtrDecompressionInRuntime) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value)); - // |root_mask| is 0 if the |value| was a smi or -1 otherwise. - Address root_mask = static_cast<Address>(-(value & kSmiTagMask)); - Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr); - return root_or_zero + static_cast<Address>(value); - } else { - return HAS_SMI_TAG(raw_value) - ? DecompressTaggedSigned(raw_value) - : DecompressTaggedPointer(on_heap_addr, raw_value); - } + return DecompressTaggedPointer(on_heap_addr, raw_value); } #ifdef V8_COMPRESS_POINTERS STATIC_ASSERT(kPtrComprHeapReservationSize == Internals::kPtrComprHeapReservationSize); -STATIC_ASSERT(kPtrComprIsolateRootBias == Internals::kPtrComprIsolateRootBias); STATIC_ASSERT(kPtrComprIsolateRootAlignment == Internals::kPtrComprIsolateRootAlignment); diff --git a/chromium/v8/src/common/ptr-compr.h b/chromium/v8/src/common/ptr-compr.h index 5b4a74e7e31..105d5f1a4f6 100644 --- a/chromium/v8/src/common/ptr-compr.h +++ b/chromium/v8/src/common/ptr-compr.h @@ -14,7 +14,6 @@ namespace internal { // See v8:7703 for details about how pointer compression works. constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB; -constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2; constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; } // namespace internal diff --git a/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc index e1d47d30a61..42d64b66145 100644 --- a/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc +++ b/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc @@ -6,7 +6,6 @@ #include "src/ast/ast.h" #include "src/base/platform/time.h" -#include "src/base/template-utils.h" #include "src/codegen/compiler.h" #include "src/flags/flags.h" #include "src/handles/global-handles.h" @@ -66,7 +65,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue( if (!IsEnabled()) return base::nullopt; - std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask( + std::unique_ptr<Job> job = std::make_unique<Job>(new BackgroundCompileTask( allocator_, outer_parse_info, function_name, function_literal, worker_thread_runtime_call_stats_, background_compile_timer_, static_cast<int>(max_stack_size_))); diff --git a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc index fbaeaa73f87..3d2342e9a22 100644 --- a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc +++ b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc @@ -5,7 +5,6 @@ #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" #include "src/base/atomicops.h" -#include "src/base/template-utils.h" #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" #include "src/execution/isolate.h" @@ -244,14 +243,14 @@ void OptimizingCompileDispatcher::QueueForOptimization( blocked_jobs_++; } else { V8::GetCurrentPlatform()->CallOnWorkerThread( - base::make_unique<CompileTask>(isolate_, this)); + std::make_unique<CompileTask>(isolate_, this)); } } void OptimizingCompileDispatcher::Unblock() { while (blocked_jobs_ > 0) { V8::GetCurrentPlatform()->CallOnWorkerThread( - base::make_unique<CompileTask>(isolate_, this)); + std::make_unique<CompileTask>(isolate_, this)); blocked_jobs_--; } } diff --git a/chromium/v8/src/compiler/OWNERS b/chromium/v8/src/compiler/OWNERS index 50e2af71290..204c0ba115e 100644 --- a/chromium/v8/src/compiler/OWNERS +++ b/chromium/v8/src/compiler/OWNERS @@ -8,11 +8,12 @@ tebbi@chromium.org neis@chromium.org mvstanton@chromium.org mslekova@chromium.org +jgruber@chromium.org per-file wasm-*=ahaas@chromium.org per-file wasm-*=bbudge@chromium.org per-file wasm-*=binji@chromium.org -per-file wasm-*=clemensh@chromium.org +per-file wasm-*=clemensb@chromium.org per-file wasm-*=gdeepti@chromium.org per-file int64-lowering.*=ahaas@chromium.org diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc index 7a72be80284..e6c5568af03 100644 --- a/chromium/v8/src/compiler/access-builder.cc +++ b/chromium/v8/src/compiler/access-builder.cc @@ -23,10 +23,9 @@ namespace internal { namespace compiler { // static -FieldAccess AccessBuilder::ForExternalTaggedValue() { - FieldAccess access = {kUntaggedBase, 0, - MaybeHandle<Name>(), MaybeHandle<Map>(), - Type::Any(), MachineType::AnyTagged(), +FieldAccess AccessBuilder::ForExternalIntPtr() { + FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(), + MaybeHandle<Map>(), Type::Any(), MachineType::IntPtr(), kNoWriteBarrier}; return access; } @@ -109,7 +108,6 @@ FieldAccess AccessBuilder::ForJSObjectElements() { return access; } - // static FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map, int index) { @@ -185,7 +183,6 @@ FieldAccess AccessBuilder::ForJSFunctionContext() { return access; } - // static FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() { FieldAccess access = { @@ -296,7 +293,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() { return access; } - // static FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() { FieldAccess access = { @@ -478,7 +474,6 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) { return access; } - // static FieldAccess AccessBuilder::ForJSIteratorResultDone() { FieldAccess access = { @@ -489,7 +484,6 @@ FieldAccess AccessBuilder::ForJSIteratorResultDone() { return access; } - // static FieldAccess AccessBuilder::ForJSIteratorResultValue() { FieldAccess access = { @@ -540,7 +534,6 @@ FieldAccess AccessBuilder::ForJSRegExpSource() { return access; } - // static FieldAccess AccessBuilder::ForFixedArrayLength() { FieldAccess access = {kTaggedBase, @@ -600,7 +593,6 @@ FieldAccess AccessBuilder::ForMapBitField3() { return access; } - // static FieldAccess AccessBuilder::ForMapDescriptors() { FieldAccess access = { @@ -611,7 +603,6 @@ FieldAccess AccessBuilder::ForMapDescriptors() { return access; } - // static FieldAccess AccessBuilder::ForMapInstanceType() { FieldAccess access = { @@ -621,7 +612,6 @@ FieldAccess AccessBuilder::ForMapInstanceType() { return access; } - // static FieldAccess AccessBuilder::ForMapPrototype() { FieldAccess access = { @@ -810,7 +800,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorString() { // static FieldAccess AccessBuilder::ForJSStringIteratorIndex() { FieldAccess access = {kTaggedBase, - JSStringIterator::kNextIndexOffset, + JSStringIterator::kIndexOffset, Handle<Name>(), MaybeHandle<Map>(), TypeCache::Get()->kStringLengthType, @@ -829,7 +819,6 @@ FieldAccess AccessBuilder::ForArgumentsLength() { return access; } - // static FieldAccess AccessBuilder::ForArgumentsCallee() { FieldAccess access = { @@ -840,7 +829,6 @@ FieldAccess AccessBuilder::ForArgumentsCallee() { return access; } - // static FieldAccess AccessBuilder::ForFixedArraySlot( size_t index, WriteBarrierKind write_barrier_kind) { @@ -852,7 +840,6 @@ FieldAccess AccessBuilder::ForFixedArraySlot( return access; } - // static FieldAccess AccessBuilder::ForCellValue() { FieldAccess access = {kTaggedBase, Cell::kValueOffset, @@ -937,7 +924,7 @@ ElementAccess AccessBuilder::ForStackArgument() { ElementAccess access = { kUntaggedBase, CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize, - Type::NonInternal(), MachineType::AnyTagged(), + Type::NonInternal(), MachineType::Pointer(), WriteBarrierKind::kNoWriteBarrier}; return access; } diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h index 231e75f8195..4aa69e3726e 100644 --- a/chromium/v8/src/compiler/access-builder.h +++ b/chromium/v8/src/compiler/access-builder.h @@ -24,11 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final // =========================================================================== // Access to external values (based on external references). - // Provides access to a tagged field identified by an external reference. - static FieldAccess ForExternalTaggedValue(); - - // Provides access to an uint8 field identified by an external reference. - static FieldAccess ForExternalUint8Value(); + // Provides access to an IntPtr field identified by an external reference. + static FieldAccess ForExternalIntPtr(); // =========================================================================== // Access to heap object fields and elements (based on tagged pointer). diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc index 269ef903751..dcdd1de831a 100644 --- a/chromium/v8/src/compiler/access-info.cc +++ b/chromium/v8/src/compiler/access-info.cc @@ -31,9 +31,9 @@ bool CanInlinePropertyAccess(Handle<Map> map) { // We can inline property access to prototypes of all primitives, except // the special Oddball ones that have no wrapper counterparts (i.e. Null, // Undefined and TheHole). - STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE); + STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE); if (map->IsBooleanMap()) return true; - if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true; + if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true; return map->IsJSObjectMap() && !map->is_dictionary_map() && !map->has_named_interceptor() && // TODO(verwaest): Whitelist contexts to which we have access. @@ -323,8 +323,8 @@ bool AccessInfoFactory::ComputeElementAccessInfos( PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder, - int descriptor, AccessMode access_mode) const { - DCHECK_NE(descriptor, DescriptorArray::kNotFound); + InternalIndex descriptor, AccessMode access_mode) const { + DCHECK(descriptor.is_found()); Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); PropertyDetails const details = descriptors->GetDetails(descriptor); int index = descriptors->GetFieldIndex(descriptor); @@ -351,6 +351,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( descriptor)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; + if (!FLAG_unbox_double_fields) { + unrecorded_dependencies.push_back( + dependencies()->FieldRepresentationDependencyOffTheRecord( + map_ref, descriptor)); + } } else if (details_representation.IsHeapObject()) { // Extract the field type from the property details (make sure its // representation is TaggedPointer to reflect the heap object case). @@ -408,9 +413,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map, - MaybeHandle<JSObject> holder, int descriptor, + MaybeHandle<JSObject> holder, InternalIndex descriptor, AccessMode access_mode) const { - DCHECK_NE(descriptor, DescriptorArray::kNotFound); + DCHECK(descriptor.is_found()); Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); SLOW_DCHECK(descriptor == descriptors->Search(*name, *map)); if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) { @@ -497,8 +502,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( while (true) { // Lookup the named property on the {map}. Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); - int const number = descriptors->Search(*name, *map); - if (number != DescriptorArray::kNotFound) { + InternalIndex const number = descriptors->Search(*name, *map); + if (number.is_found()) { PropertyDetails const details = descriptors->GetDetails(number); if (access_mode == AccessMode::kStore || access_mode == AccessMode::kStoreInLiteral) { @@ -762,7 +767,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( } Handle<Map> transition_map(transition, isolate()); - int const number = transition_map->LastAdded(); + InternalIndex const number = transition_map->LastAdded(); PropertyDetails const details = transition_map->instance_descriptors().GetDetails(number); // Don't bother optimizing stores to read-only properties. @@ -789,6 +794,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( transition_map_ref, number)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; + if (!FLAG_unbox_double_fields) { + transition_map_ref.SerializeOwnDescriptor(number); + unrecorded_dependencies.push_back( + dependencies()->FieldRepresentationDependencyOffTheRecord( + transition_map_ref, number)); + } } else if (details_representation.IsHeapObject()) { // Extract the field type from the property details (make sure its // representation is TaggedPointer to reflect the heap object case). diff --git a/chromium/v8/src/compiler/access-info.h b/chromium/v8/src/compiler/access-info.h index e2f6e6d453d..59101e2cc90 100644 --- a/chromium/v8/src/compiler/access-info.h +++ b/chromium/v8/src/compiler/access-info.h @@ -204,11 +204,11 @@ class AccessInfoFactory final { PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder, - int descriptor, + InternalIndex descriptor, AccessMode access_mode) const; PropertyAccessInfo ComputeAccessorDescriptorAccessInfo( Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map, - MaybeHandle<JSObject> holder, int descriptor, + MaybeHandle<JSObject> holder, InternalIndex descriptor, AccessMode access_mode) const; void MergePropertyAccessInfos(ZoneVector<PropertyAccessInfo> infos, diff --git a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc index 65a569d755b..3fe53610838 100644 --- a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -44,7 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { UNREACHABLE(); } - Operand InputImmediate(size_t index) { + Operand InputImmediate(size_t index) const { return ToImmediate(instr_->InputAt(index)); } @@ -111,7 +111,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { return InputOffset(&first_index); } - Operand ToImmediate(InstructionOperand* operand) { + Operand ToImmediate(InstructionOperand* operand) const { Constant constant = ToConstant(operand); switch (constant.type()) { case Constant::kInt32: @@ -153,9 +153,6 @@ class ArmOperandConverter final : public InstructionOperandConverter { NeonMemOperand NeonInputOperand(size_t first_index) { const size_t index = first_index; switch (AddressingModeField::decode(instr_->opcode())) { - case kMode_Offset_RR: - return NeonMemOperand(InputRegister(index + 0), - InputRegister(index + 1)); case kMode_Operand2_R: return NeonMemOperand(InputRegister(index + 0)); default: @@ -309,9 +306,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, - ArmOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, + ArmOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -320,10 +317,10 @@ void EmitWordLoadPoisoningIfNeeded( } } -void ComputePoisonedAddressForLoad( - CodeGenerator* codegen, InstructionCode opcode, - ArmOperandConverter& i, // NOLINT(runtime/references) - Register address) { +void ComputePoisonedAddressForLoad(CodeGenerator* codegen, + InstructionCode opcode, + ArmOperandConverter const& i, + Register address) { DCHECK_EQ(kMemoryAccessPoisoned, static_cast<MemoryAccessMode>(MiscField::decode(opcode))); switch (AddressingModeField::decode(opcode)) { @@ -1798,6 +1795,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } + case kArmF32x4Sqrt: { + QwNeonRegister dst = i.OutputSimd128Register(); + QwNeonRegister src1 = i.InputSimd128Register(0); + DCHECK_EQ(dst, q0); + DCHECK_EQ(src1, q0); +#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane) + __ vsqrt(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0)); + __ vsqrt(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1)); + __ vsqrt(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2)); + __ vsqrt(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3)); +#undef S_FROM_Q + break; + } case kArmF32x4RecipApprox: { __ vrecpe(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; @@ -1919,14 +1929,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kArmI32x4ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vneg(Neon32, tmp, tmp); __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -1998,7 +2014,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vneg(Neon32, tmp, tmp); __ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2029,7 +2048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8ExtractLane: { - __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16, + __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU16, i.InputInt8(1)); break; } @@ -2054,14 +2073,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kArmI16x8ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vneg(Neon16, tmp, tmp); __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2142,7 +2167,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vneg(Neon16, tmp, tmp); __ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2186,7 +2214,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16ExtractLane: { - __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8, + __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU8, i.InputInt8(1)); break; } @@ -2201,6 +2229,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); __ vdup(Neon8, tmp, i.InputRegister(1)); __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2208,7 +2239,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon8, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); + __ vdup(Neon8, tmp, shift); __ vneg(Neon8, tmp, tmp); __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2275,7 +2309,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon8, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); + __ vdup(Neon8, tmp, shift); __ vneg(Neon8, tmp, tmp); __ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); diff --git a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h index 3551e26aea8..d398ec0ed6e 100644 --- a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h +++ b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h @@ -135,6 +135,7 @@ namespace compiler { V(ArmF32x4UConvertI32x4) \ V(ArmF32x4Abs) \ V(ArmF32x4Neg) \ + V(ArmF32x4Sqrt) \ V(ArmF32x4RecipApprox) \ V(ArmF32x4RecipSqrtApprox) \ V(ArmF32x4Add) \ diff --git a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc index 1d7cf61dfe7..92be55dcc3d 100644 --- a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc +++ b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc @@ -115,6 +115,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArmF32x4UConvertI32x4: case kArmF32x4Abs: case kArmF32x4Neg: + case kArmF32x4Sqrt: case kArmF32x4RecipApprox: case kArmF32x4RecipSqrtApprox: case kArmF32x4Add: diff --git a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc index ce74faa4a62..303648051f8 100644 --- a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/base/enum-set.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -94,7 +94,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ArmOperandGenerator g(selector); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()}; selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), arraysize(temps), temps); @@ -352,6 +352,26 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode, } } +// Adds the base and offset into a register, then change the addressing +// mode of opcode_return to use this register. Certain instructions, e.g. +// vld1 and vst1, when given two registers, will post-increment the offset, i.e. +// perform the operation at base, then add offset to base. What we intend is to +// access at (base+offset). +void EmitAddBeforeS128LoadStore(InstructionSelector* selector, + InstructionCode* opcode_return, + size_t* input_count_return, + InstructionOperand* inputs) { + DCHECK(*opcode_return == kArmVld1S128 || *opcode_return == kArmVst1S128); + ArmOperandGenerator g(selector); + InstructionOperand addr = g.TempRegister(); + InstructionCode op = kArmAdd; + op |= AddressingModeField::encode(kMode_Operand2_R); + selector->Emit(op, 1, &addr, 2, inputs); + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R); + *input_count_return -= 1; + inputs[0] = addr; +} + void EmitLoad(InstructionSelector* selector, InstructionCode opcode, InstructionOperand* output, Node* base, Node* index) { ArmOperandGenerator g(selector); @@ -368,7 +388,11 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode, input_count = 3; } else { inputs[1] = g.UseRegister(index); - opcode |= AddressingModeField::encode(kMode_Offset_RR); + if (opcode == kArmVld1S128) { + EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[0]); + } else { + opcode |= AddressingModeField::encode(kMode_Offset_RR); + } } selector->Emit(opcode, 1, output, input_count, inputs); } @@ -386,7 +410,12 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode, input_count = 4; } else { inputs[input_count++] = g.UseRegister(index); - opcode |= AddressingModeField::encode(kMode_Offset_RR); + if (opcode == kArmVst1S128) { + // Inputs are value, base, index, only care about base and index. + EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]); + } else { + opcode |= AddressingModeField::encode(kMode_Offset_RR); + } } selector->Emit(opcode, 0, nullptr, input_count, inputs); } @@ -596,8 +625,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp); return; } - case MachineRepresentation::kFloat64: - case MachineRepresentation::kSimd128: { + case MachineRepresentation::kFloat64: { // Compute the address of the least-significant byte of the FP value. // We assume that the base node is unlikely to be an encodable immediate // or the result of a shift operation, so only consider the addressing @@ -623,13 +651,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { if (CpuFeatures::IsSupported(NEON)) { // With NEON we can load directly from the calculated address. - InstructionCode op = load_rep == MachineRepresentation::kFloat64 - ? kArmVld1F64 - : kArmVld1S128; + InstructionCode op = kArmVld1F64; op |= AddressingModeField::encode(kMode_Operand2_R); Emit(op, g.DefineAsRegister(node), addr); } else { - DCHECK_NE(MachineRepresentation::kSimd128, load_rep); // Load both halves and move to an FP register. InstructionOperand fp_lo = g.TempRegister(); InstructionOperand fp_hi = g.TempRegister(); @@ -670,8 +695,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) { EmitStore(this, kArmStr, input_count, inputs, index); return; } - case MachineRepresentation::kFloat64: - case MachineRepresentation::kSimd128: { + case MachineRepresentation::kFloat64: { if (CpuFeatures::IsSupported(NEON)) { InstructionOperand address = g.TempRegister(); { @@ -697,13 +721,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) { inputs[input_count++] = g.UseRegister(value); inputs[input_count++] = address; - InstructionCode op = store_rep == MachineRepresentation::kFloat64 - ? kArmVst1F64 - : kArmVst1S128; + InstructionCode op = kArmVst1F64; op |= AddressingModeField::encode(kMode_Operand2_R); Emit(op, 0, nullptr, input_count, inputs); } else { - DCHECK_NE(MachineRepresentation::kSimd128, store_rep); // Store a 64-bit floating point value using two 32-bit integer stores. // Computing the store address here would require three live temporary // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after @@ -942,7 +963,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) { uint32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { - uint32_t value = (mleft.right().Value() >> lsb) << lsb; + uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb) + << lsb; uint32_t width = base::bits::CountPopulation(value); uint32_t msb = base::bits::CountLeadingZeros32(value); if ((width != 0) && (msb + width + lsb == 32)) { @@ -1119,6 +1141,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { VisitRR(this, kArmRev, node); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitInt32Add(Node* node) { @@ -2513,6 +2539,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP #undef SIMD_BINOP_LIST +void InstructionSelector::VisitF32x4Sqrt(Node* node) { + ArmOperandGenerator g(this); + // Use fixed registers in the lower 8 Q-registers so we can directly access + // mapped registers S0-S31. + Emit(kArmF32x4Sqrt, g.DefineAsFixed(node, q0), + g.UseFixed(node->InputAt(0), q0)); +} + void InstructionSelector::VisitF32x4Div(Node* node) { ArmOperandGenerator g(this); // Use fixed registers in the lower 8 Q-registers so we can directly access diff --git a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 66ca7f6cf0c..6f65c905dd1 100644 --- a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -376,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - Arm64OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + Arm64OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -389,6 +389,36 @@ void EmitWordLoadPoisoningIfNeeded( } } +void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode, + Arm64OperandConverter* i, VRegister output_reg) { + const MemoryAccessMode access_mode = + static_cast<MemoryAccessMode>(MiscField::decode(opcode)); + AddressingMode address_mode = AddressingModeField::decode(opcode); + if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) { + UseScratchRegisterScope temps(codegen->tasm()); + Register address = temps.AcquireX(); + switch (address_mode) { + case kMode_MRI: // Fall through. + case kMode_MRR: + codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1)); + break; + case kMode_Operand2_R_LSL_I: + codegen->tasm()->Add(address, i->InputRegister(0), + i->InputOperand2_64(1)); + break; + default: + // Note: we don't need poisoning for kMode_Root loads as those loads + // target a fixed offset from root register which is set once when + // initializing the vm. + UNREACHABLE(); + } + codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister)); + codegen->tasm()->Ldr(output_reg, MemOperand(address)); + } else { + codegen->tasm()->Ldr(output_reg, i->MemoryOperand()); + } +} + } // namespace #define ASSEMBLE_SHIFT(asm_instr, width) \ @@ -1198,6 +1228,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Sxtw: __ Sxtw(i.OutputRegister(), i.InputRegister32(0)); break; + case kArm64Sbfx: + __ Sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1), + i.InputInt6(2)); + break; case kArm64Sbfx32: __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1), i.InputInt5(2)); @@ -1586,6 +1620,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Str: __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); break; + case kArm64StrCompressTagged: + __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); + break; case kArm64DecompressSigned: { __ DecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0)); break; @@ -1599,13 +1636,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64LdrS: - __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); + EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S()); break; case kArm64StrS: __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1)); break; case kArm64LdrD: - __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); + EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister()); break; case kArm64StrD: __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1)); @@ -1616,9 +1653,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64StrQ: __ Str(i.InputSimd128Register(0), i.MemoryOperand(1)); break; - case kArm64StrCompressTagged: - __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); - break; case kArm64DmbIsh: __ Dmb(InnerShareable, BarrierAll); break; @@ -1794,6 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D); SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D); + SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D); SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D); SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D); SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D); @@ -1818,6 +1853,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(0).V2D()); break; } + case kArm64F64x2Qfma: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmla(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(2).V2D()); + break; + } + case kArm64F64x2Qfms: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmls(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(2).V2D()); + break; + } case kArm64F32x4Splat: { __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0); break; @@ -1840,6 +1887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S); SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S); SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S); + SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S); SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S); SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S); SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S); @@ -1867,6 +1915,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(0).V4S()); break; } + case kArm64F32x4Qfma: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmla(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), + i.InputSimd128Register(2).V4S()); + break; + } + case kArm64F32x4Qfms: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmls(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), + i.InputSimd128Register(2).V4S()); + break; + } case kArm64I64x2Splat: { __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0)); break; @@ -1888,14 +1948,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D); case kArm64I64x2Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); break; } case kArm64I64x2ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Neg(tmp.V2D(), tmp.V2D()); __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); @@ -1903,6 +1969,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D); SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D); + case kArm64I64x2Mul: { + UseScratchRegisterScope scope(tasm()); + VRegister dst = i.OutputSimd128Register(); + VRegister src1 = i.InputSimd128Register(0); + VRegister src2 = i.InputSimd128Register(1); + VRegister tmp1 = scope.AcquireSameSizeAs(dst); + VRegister tmp2 = scope.AcquireSameSizeAs(dst); + VRegister tmp3 = i.ToSimd128Register(instr->TempAt(0)); + + // This 2x64-bit multiplication is performed with several 32-bit + // multiplications. + + // 64-bit numbers x and y, can be represented as: + // x = a + 2^32(b) + // y = c + 2^32(d) + + // A 64-bit multiplication is: + // x * y = ac + 2^32(ad + bc) + 2^64(bd) + // note: `2^64(bd)` can be ignored, the value is too large to fit in + // 64-bits. + + // This sequence implements a 2x64bit multiply, where the registers + // `src1` and `src2` are split up into 32-bit components: + // src1 = |d|c|b|a| + // src2 = |h|g|f|e| + // + // src1 * src2 = |cg + 2^32(ch + dg)|ae + 2^32(af + be)| + + // Reverse the 32-bit elements in the 64-bit words. + // tmp2 = |g|h|e|f| + __ Rev64(tmp2.V4S(), src2.V4S()); + + // Calculate the high half components. + // tmp2 = |dg|ch|be|af| + __ Mul(tmp2.V4S(), tmp2.V4S(), src1.V4S()); + + // Extract the low half components of src1. + // tmp1 = |c|a| + __ Xtn(tmp1.V2S(), src1.V2D()); + + // Sum the respective high half components. + // tmp2 = |dg+ch|be+af||dg+ch|be+af| + __ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S()); + + // Extract the low half components of src2. + // tmp3 = |g|e| + __ Xtn(tmp3.V2S(), src2.V2D()); + + // Shift the high half components, into the high half. + // dst = |dg+ch << 32|be+af << 32| + __ Shll(dst.V2D(), tmp2.V2S(), 32); + + // Multiply the low components together, and accumulate with the high + // half. + // dst = |dst[1] + cg|dst[0] + ae| + __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S()); + + break; + } SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D); case kArm64I64x2Ne: { VRegister dst = i.OutputSimd128Register().V2D(); @@ -1915,7 +2040,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D); case kArm64I64x2ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Neg(tmp.V2D(), tmp.V2D()); __ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); @@ -1947,14 +2075,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S); case kArm64I32x4Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); break; } case kArm64I32x4ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Neg(tmp.V4S(), tmp.V4S()); __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); @@ -1981,7 +2115,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H); case kArm64I32x4ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Neg(tmp.V4S(), tmp.V4S()); __ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); @@ -1996,7 +2133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I16x8ExtractLane: { - __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(), + __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(), i.InputInt8(1)); break; } @@ -2014,14 +2151,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H); case kArm64I16x8Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); break; } case kArm64I16x8ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Neg(tmp.V8H(), tmp.V8H()); __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); @@ -2070,7 +2213,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArm64I16x8ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Neg(tmp.V8H(), tmp.V8H()); __ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); @@ -2101,7 +2247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I8x16ExtractLane: { - __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), + __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), i.InputInt8(1)); break; } @@ -2117,14 +2263,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B); case kArm64I8x16Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Sshl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); break; } case kArm64I8x16ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Neg(tmp.V16B(), tmp.V16B()); __ Sshl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); @@ -2163,7 +2315,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B); case kArm64I8x16ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Neg(tmp.V16B(), tmp.V16B()); __ Ushl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); @@ -2277,6 +2432,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1).V16B(), i.InputInt4(2)); break; } + case kArm64S8x16Swizzle: { + __ Tbl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), + i.InputSimd128Register(1).V16B()); + break; + } case kArm64S8x16Shuffle: { Simd128Register dst = i.OutputSimd128Register().V16B(), src0 = i.InputSimd128Register(0).V16B(), diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 4b56e402c15..880a3fbf9e0 100644 --- a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -70,6 +70,7 @@ namespace compiler { V(Arm64Sxtb) \ V(Arm64Sxth) \ V(Arm64Sxtw) \ + V(Arm64Sbfx) \ V(Arm64Sbfx32) \ V(Arm64Ubfx) \ V(Arm64Ubfx32) \ @@ -175,6 +176,7 @@ namespace compiler { V(Arm64F64x2ReplaceLane) \ V(Arm64F64x2Abs) \ V(Arm64F64x2Neg) \ + V(Arm64F64x2Sqrt) \ V(Arm64F64x2Add) \ V(Arm64F64x2Sub) \ V(Arm64F64x2Mul) \ @@ -185,6 +187,8 @@ namespace compiler { V(Arm64F64x2Ne) \ V(Arm64F64x2Lt) \ V(Arm64F64x2Le) \ + V(Arm64F64x2Qfma) \ + V(Arm64F64x2Qfms) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ V(Arm64F32x4ReplaceLane) \ @@ -192,6 +196,7 @@ namespace compiler { V(Arm64F32x4UConvertI32x4) \ V(Arm64F32x4Abs) \ V(Arm64F32x4Neg) \ + V(Arm64F32x4Sqrt) \ V(Arm64F32x4RecipApprox) \ V(Arm64F32x4RecipSqrtApprox) \ V(Arm64F32x4Add) \ @@ -205,6 +210,8 @@ namespace compiler { V(Arm64F32x4Ne) \ V(Arm64F32x4Lt) \ V(Arm64F32x4Le) \ + V(Arm64F32x4Qfma) \ + V(Arm64F32x4Qfms) \ V(Arm64I64x2Splat) \ V(Arm64I64x2ExtractLane) \ V(Arm64I64x2ReplaceLane) \ @@ -213,6 +220,7 @@ namespace compiler { V(Arm64I64x2ShrS) \ V(Arm64I64x2Add) \ V(Arm64I64x2Sub) \ + V(Arm64I64x2Mul) \ V(Arm64I64x2Eq) \ V(Arm64I64x2Ne) \ V(Arm64I64x2GtS) \ @@ -331,6 +339,7 @@ namespace compiler { V(Arm64S8x16TransposeLeft) \ V(Arm64S8x16TransposeRight) \ V(Arm64S8x16Concat) \ + V(Arm64S8x16Swizzle) \ V(Arm64S8x16Shuffle) \ V(Arm64S32x2Reverse) \ V(Arm64S16x4Reverse) \ diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 7cba2d50ea0..b0f92029684 100644 --- a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -71,6 +71,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64Sxth: case kArm64Sxth32: case kArm64Sxtw: + case kArm64Sbfx: case kArm64Sbfx32: case kArm64Ubfx: case kArm64Ubfx32: @@ -142,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F64x2ReplaceLane: case kArm64F64x2Abs: case kArm64F64x2Neg: + case kArm64F64x2Sqrt: case kArm64F64x2Add: case kArm64F64x2Sub: case kArm64F64x2Mul: @@ -152,6 +154,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F64x2Ne: case kArm64F64x2Lt: case kArm64F64x2Le: + case kArm64F64x2Qfma: + case kArm64F64x2Qfms: case kArm64F32x4Splat: case kArm64F32x4ExtractLane: case kArm64F32x4ReplaceLane: @@ -159,6 +163,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F32x4UConvertI32x4: case kArm64F32x4Abs: case kArm64F32x4Neg: + case kArm64F32x4Sqrt: case kArm64F32x4RecipApprox: case kArm64F32x4RecipSqrtApprox: case kArm64F32x4Add: @@ -172,6 +177,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F32x4Ne: case kArm64F32x4Lt: case kArm64F32x4Le: + case kArm64F32x4Qfma: + case kArm64F32x4Qfms: case kArm64I64x2Splat: case kArm64I64x2ExtractLane: case kArm64I64x2ReplaceLane: @@ -180,6 +187,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64I64x2ShrS: case kArm64I64x2Add: case kArm64I64x2Sub: + case kArm64I64x2Mul: case kArm64I64x2Eq: case kArm64I64x2Ne: case kArm64I64x2GtS: @@ -298,6 +306,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64S8x16TransposeLeft: case kArm64S8x16TransposeRight: case kArm64S8x16Concat: + case kArm64S8x16Swizzle: case kArm64S8x16Shuffle: case kArm64S32x2Reverse: case kArm64S16x4Reverse: @@ -439,6 +448,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { case kArm64Clz: case kArm64Clz32: + case kArm64Sbfx: case kArm64Sbfx32: case kArm64Sxtb32: case kArm64Sxth32: diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 4abbd68c49a..53a289fe6a6 100644 --- a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -153,7 +153,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { Arm64OperandGenerator g(selector); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()}; selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), arraysize(temps), temps); @@ -499,6 +499,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode, Arm64OperandGenerator g(selector); Matcher m(node); if (m.right().HasValue() && (m.right().Value() < 0) && + (m.right().Value() > std::numeric_limits<int>::min()) && g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) { selector->Emit(negate_opcode, g.DefineAsRegister(node), g.UseRegister(m.left().node()), @@ -627,9 +628,24 @@ void InstructionSelector::VisitLoad(Node* node) { #else UNREACHABLE(); #endif +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kArm64LdrDecompressTaggedSigned; + immediate_mode = kLoadStoreImm32; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kArm64LdrDecompressTaggedPointer; + immediate_mode = kLoadStoreImm32; + break; + case MachineRepresentation::kTagged: + opcode = kArm64LdrDecompressAnyTagged; + immediate_mode = kLoadStoreImm32; + break; +#else case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. +#endif case MachineRepresentation::kWord64: opcode = kArm64Ldr; immediate_mode = kLoadStoreImm64; @@ -723,7 +739,7 @@ void InstructionSelector::VisitStore(Node* node) { case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: #ifdef V8_COMPRESS_POINTERS - opcode = kArm64StrW; + opcode = kArm64StrCompressTagged; immediate_mode = kLoadStoreImm32; break; #else @@ -731,7 +747,11 @@ void InstructionSelector::VisitStore(Node* node) { #endif case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTagged: + opcode = kArm64StrCompressTagged; + immediate_mode = + COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64; + break; case MachineRepresentation::kWord64: opcode = kArm64Str; immediate_mode = kLoadStoreImm64; @@ -770,6 +790,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + // Architecture supports unaligned access, therefore VisitLoad is used instead void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); } @@ -1048,7 +1072,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) { if (mleft.right().HasValue() && mleft.right().Value() != 0) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is // shifted into the least-significant bits. - uint32_t mask = (mleft.right().Value() >> lsb) << lsb; + uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb) + << lsb; unsigned mask_width = base::bits::CountPopulation(mask); unsigned mask_msb = base::bits::CountLeadingZeros32(mask); if ((mask_msb + mask_width + lsb) == 32) { @@ -1091,7 +1116,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) { if (mleft.right().HasValue() && mleft.right().Value() != 0) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is // shifted into the least-significant bits. - uint64_t mask = (mleft.right().Value() >> lsb) << lsb; + uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb) + << lsb; unsigned mask_width = base::bits::CountPopulation(mask); unsigned mask_msb = base::bits::CountLeadingZeros64(mask); if ((mask_msb + mask_width + lsb) == 64) { @@ -1240,7 +1266,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) { V(Float32Max, kArm64Float32Max) \ V(Float64Max, kArm64Float64Max) \ V(Float32Min, kArm64Float32Min) \ - V(Float64Min, kArm64Float64Min) + V(Float64Min, kArm64Float64Min) \ + V(S8x16Swizzle, kArm64S8x16Swizzle) #define RR_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ @@ -1572,9 +1599,22 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { return; } EmitLoad(this, value, opcode, immediate_mode, rep, node); - } else { - VisitRR(this, kArm64Sxtw, node); + return; + } + + if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) { + Int32BinopMatcher m(value); + if (m.right().HasValue()) { + Arm64OperandGenerator g(this); + // Mask the shift amount, to keep the same semantics as Word32Sar. + int right = m.right().Value() & 0x1F; + Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(right), g.TempImmediate(32 - right)); + return; + } } + + VisitRR(this, kArm64Sxtw, node); } void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { @@ -1830,31 +1870,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode, selector->EmitWithContinuation(opcode, left, right, cont); } -// Shared routine for multiple word compare operations. -void VisitWordCompare(InstructionSelector* selector, Node* node, - InstructionCode opcode, FlagsContinuation* cont, - ImmediateMode immediate_mode) { - Arm64OperandGenerator g(selector); - - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - - // If one of the two inputs is an immediate, make sure it's on the right. - if (!g.CanBeImmediate(right, immediate_mode) && - g.CanBeImmediate(left, immediate_mode)) { - cont->Commute(); - std::swap(left, right); - } - - if (g.CanBeImmediate(right, immediate_mode)) { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), - cont); - } else { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), - cont); - } -} - // This function checks whether we can convert: // ((a <op> b) cmp 0), b.<cond> // to: @@ -1986,9 +2001,35 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector, selector->EmitWithContinuation(opcode, value, cont); } +template <int N> +struct CbzOrTbzMatchTrait {}; + +template <> +struct CbzOrTbzMatchTrait<32> { + using IntegralType = uint32_t; + using BinopMatcher = Int32BinopMatcher; + static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord32And; + static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch32; + static constexpr ArchOpcode kCompareAndBranchOpcode = + kArm64CompareAndBranch32; + static constexpr unsigned kSignBit = kWSignBit; +}; + +template <> +struct CbzOrTbzMatchTrait<64> { + using IntegralType = uint64_t; + using BinopMatcher = Int64BinopMatcher; + static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord64And; + static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch; + static constexpr ArchOpcode kCompareAndBranchOpcode = kArm64CompareAndBranch; + static constexpr unsigned kSignBit = kXSignBit; +}; + // Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node} // against {value}, depending on the condition. -bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, +template <int N> +bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, + typename CbzOrTbzMatchTrait<N>::IntegralType value, Node* user, FlagsCondition cond, FlagsContinuation* cont) { // Branch poisoning requires flags to be set, so when it's enabled for // a particular branch, we shouldn't be applying the cbz/tbz optimization. @@ -2007,28 +2048,33 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, if (cont->IsDeoptimize()) return false; Arm64OperandGenerator g(selector); cont->Overwrite(MapForTbz(cond)); - Int32Matcher m(node); - if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) { - // SignedLessThan(Float64ExtractHighWord32(x), 0) and - // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially - // check the sign bit of a 64-bit floating point value. - InstructionOperand temp = g.TempRegister(); - selector->Emit(kArm64U64MoveFloat64, temp, - g.UseRegister(node->InputAt(0))); - selector->EmitWithContinuation(kArm64TestAndBranch, temp, - g.TempImmediate(63), cont); - return true; + + if (N == 32) { + Int32Matcher m(node); + if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) { + // SignedLessThan(Float64ExtractHighWord32(x), 0) and + // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) + // essentially check the sign bit of a 64-bit floating point value. + InstructionOperand temp = g.TempRegister(); + selector->Emit(kArm64U64MoveFloat64, temp, + g.UseRegister(node->InputAt(0))); + selector->EmitWithContinuation(kArm64TestAndBranch, temp, + g.TempImmediate(kDSignBit), cont); + return true; + } } - selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node), - g.TempImmediate(31), cont); + + selector->EmitWithContinuation( + CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode, g.UseRegister(node), + g.TempImmediate(CbzOrTbzMatchTrait<N>::kSignBit), cont); return true; } case kEqual: case kNotEqual: { - if (node->opcode() == IrOpcode::kWord32And) { + if (node->opcode() == CbzOrTbzMatchTrait<N>::kAndOpcode) { // Emit a tbz/tbnz if we are comparing with a single-bit mask: - // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false) - Int32BinopMatcher m_and(node); + // Branch(WordEqual(WordAnd(x, 1 << N), 1 << N), true, false) + typename CbzOrTbzMatchTrait<N>::BinopMatcher m_and(node); if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) && m_and.right().Is(value) && selector->CanCover(user, node)) { Arm64OperandGenerator g(selector); @@ -2036,7 +2082,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, // the opposite here so negate the condition. cont->Negate(); selector->EmitWithContinuation( - kArm64TestAndBranch32, g.UseRegister(m_and.left().node()), + CbzOrTbzMatchTrait<N>::kTestAndBranchOpcode, + g.UseRegister(m_and.left().node()), g.TempImmediate(base::bits::CountTrailingZeros(value)), cont); return true; } @@ -2048,7 +2095,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, if (value != 0) return false; Arm64OperandGenerator g(selector); cont->Overwrite(MapForCbz(cond)); - EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32, + EmitBranchOrDeoptimize(selector, + CbzOrTbzMatchTrait<N>::kCompareAndBranchOpcode, g.UseRegister(node), cont); return true; } @@ -2057,20 +2105,50 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, } } +// Shared routine for multiple word compare operations. +void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + ImmediateMode immediate_mode) { + Arm64OperandGenerator g(selector); + + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // If one of the two inputs is an immediate, make sure it's on the right. + if (!g.CanBeImmediate(right, immediate_mode) && + g.CanBeImmediate(left, immediate_mode)) { + cont->Commute(); + std::swap(left, right); + } + + if (opcode == kArm64Cmp && !cont->IsPoisoned()) { + Int64Matcher m(right); + if (m.HasValue()) { + if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node, + cont->condition(), cont)) { + return; + } + } + } + + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseOperand(right, immediate_mode), cont); +} + void VisitWord32Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont) { Int32BinopMatcher m(node); FlagsCondition cond = cont->condition(); if (!cont->IsPoisoned()) { if (m.right().HasValue()) { - if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node, - cond, cont)) { + if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(), + node, cond, cont)) { return; } } else if (m.left().HasValue()) { FlagsCondition commuted_cond = CommuteFlagsCondition(cond); - if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node, - commuted_cond, cont)) { + if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(), + node, commuted_cond, cont)) { return; } } @@ -2378,13 +2456,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) { return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm); } - // Merge the Word64Equal(x, 0) comparison into a cbz instruction. - if ((cont->IsBranch() || cont->IsDeoptimize()) && - !cont->IsPoisoned()) { - EmitBranchOrDeoptimize(this, kArm64CompareAndBranch, - g.UseRegister(left), cont); - return; - } } return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); } @@ -3054,10 +3125,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { #define SIMD_UNOP_LIST(V) \ V(F64x2Abs, kArm64F64x2Abs) \ V(F64x2Neg, kArm64F64x2Neg) \ + V(F64x2Sqrt, kArm64F64x2Sqrt) \ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ V(F32x4Abs, kArm64F32x4Abs) \ V(F32x4Neg, kArm64F32x4Neg) \ + V(F32x4Sqrt, kArm64F32x4Sqrt) \ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \ V(I64x2Neg, kArm64I64x2Neg) \ @@ -3236,6 +3309,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP #undef SIMD_BINOP_LIST +void InstructionSelector::VisitI64x2Mul(Node* node) { + Arm64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kArm64I64x2Mul, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), + arraysize(temps), temps); +} + void InstructionSelector::VisitS128Select(Node* node) { Arm64OperandGenerator g(this); Emit(kArm64S128Select, g.DefineSameAsFirst(node), @@ -3243,6 +3324,19 @@ void InstructionSelector::VisitS128Select(Node* node) { g.UseRegister(node->InputAt(2))); } +#define VISIT_SIMD_QFMOP(op) \ + void InstructionSelector::Visit##op(Node* node) { \ + Arm64OperandGenerator g(this); \ + Emit(kArm64##op, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2))); \ + } +VISIT_SIMD_QFMOP(F64x2Qfma) +VISIT_SIMD_QFMOP(F64x2Qfms) +VISIT_SIMD_QFMOP(F32x4Qfma) +VISIT_SIMD_QFMOP(F32x4Qfms) +#undef VISIT_SIMD_QFMOP + namespace { struct ShuffleEntry { diff --git a/chromium/v8/src/compiler/backend/code-generator-impl.h b/chromium/v8/src/compiler/backend/code-generator-impl.h index 2bfb009980d..530dc0a8136 100644 --- a/chromium/v8/src/compiler/backend/code-generator-impl.h +++ b/chromium/v8/src/compiler/backend/code-generator-impl.h @@ -26,7 +26,7 @@ class InstructionOperandConverter { // -- Instruction operand accesses with conversions -------------------------- - Register InputRegister(size_t index) { + Register InputRegister(size_t index) const { return ToRegister(instr_->InputAt(index)); } @@ -96,7 +96,7 @@ class InstructionOperandConverter { return ToRpoNumber(instr_->InputAt(index)); } - Register OutputRegister(size_t index = 0) { + Register OutputRegister(size_t index = 0) const { return ToRegister(instr_->OutputAt(index)); } @@ -130,7 +130,7 @@ class InstructionOperandConverter { return ToConstant(op).ToRpoNumber(); } - Register ToRegister(InstructionOperand* op) { + Register ToRegister(InstructionOperand* op) const { return LocationOperand::cast(op)->GetRegister(); } @@ -146,7 +146,7 @@ class InstructionOperandConverter { return LocationOperand::cast(op)->GetSimd128Register(); } - Constant ToConstant(InstructionOperand* op) { + Constant ToConstant(InstructionOperand* op) const { if (op->IsImmediate()) { return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op)); } diff --git a/chromium/v8/src/compiler/backend/code-generator.cc b/chromium/v8/src/compiler/backend/code-generator.cc index e7702bcdf62..43eb4a1f15a 100644 --- a/chromium/v8/src/compiler/backend/code-generator.cc +++ b/chromium/v8/src/compiler/backend/code-generator.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/code-generator.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/optimized-compilation-info.h" diff --git a/chromium/v8/src/compiler/backend/code-generator.h b/chromium/v8/src/compiler/backend/code-generator.h index e9ebf675905..d56b1edae0e 100644 --- a/chromium/v8/src/compiler/backend/code-generator.h +++ b/chromium/v8/src/compiler/backend/code-generator.h @@ -5,6 +5,8 @@ #ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_ #define V8_COMPILER_BACKEND_CODE_GENERATOR_H_ +#include <memory> + #include "src/base/optional.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/safepoint-table.h" diff --git a/chromium/v8/src/compiler/backend/frame-elider.cc b/chromium/v8/src/compiler/backend/frame-elider.cc index 064501b0971..293fc9352c4 100644 --- a/chromium/v8/src/compiler/backend/frame-elider.cc +++ b/chromium/v8/src/compiler/backend/frame-elider.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/frame-elider.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" namespace v8 { namespace internal { diff --git a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 4542da643b4..068268a3da4 100644 --- a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -479,17 +479,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } -#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ - do { \ - Register dst = i.OutputRegister(); \ - Operand src = i.InputOperand(0); \ - Register tmp = i.TempRegister(0); \ - __ mov(tmp, Immediate(1)); \ - __ xor_(dst, dst); \ - __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \ - __ opcode(kScratchDoubleReg, src); \ - __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \ - __ cmov(zero, dst, tmp); \ +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + Register dst = i.OutputRegister(); \ + Operand src = i.InputOperand(0); \ + Register tmp = i.TempRegister(0); \ + XMMRegister tmp_simd = i.TempSimd128Register(1); \ + __ mov(tmp, Immediate(1)); \ + __ xor_(dst, dst); \ + __ Pxor(tmp_simd, tmp_simd); \ + __ opcode(tmp_simd, src); \ + __ Ptest(tmp_simd, tmp_simd); \ + __ cmov(zero, dst, tmp); \ } while (false) void CodeGenerator::AssembleDeconstructFrame() { @@ -1266,16 +1267,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kSSEFloat32Abs: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); - __ andps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 33); + __ andps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Neg: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); - __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 31); + __ xorps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Round: { @@ -1444,16 +1447,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEFloat64Abs: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); - __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 1); + __ andpd(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat64Neg: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); - __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); + __ xorpd(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat64Sqrt: @@ -1476,13 +1481,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cvttss2si(i.OutputRegister(), i.InputOperand(0)); break; case kSSEFloat32ToUint32: - __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg); + __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), + i.TempSimd128Register(0)); break; case kSSEFloat64ToInt32: __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); break; case kSSEFloat64ToUint32: - __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg); + __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), + i.TempSimd128Register(0)); break; case kSSEInt32ToFloat32: __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -1577,34 +1584,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kAVXFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 33); CpuFeatureScope avx_scope(tasm(), AVX); - __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 31); CpuFeatureScope avx_scope(tasm(), AVX); - __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 1); CpuFeatureScope avx_scope(tasm(), AVX); - __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); CpuFeatureScope avx_scope(tasm(), AVX); - __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kSSEFloat64SilenceNaN: @@ -1825,6 +1836,164 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kSSEF64x2Splat: { + DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + XMMRegister dst = i.OutputSimd128Register(); + __ shufpd(dst, dst, 0x0); + break; + } + case kAVXF64x2Splat: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister src = i.InputDoubleRegister(0); + __ vshufpd(i.OutputSimd128Register(), src, src, 0x0); + break; + } + case kSSEF64x2ExtractLane: { + DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + XMMRegister dst = i.OutputDoubleRegister(); + int8_t lane = i.InputInt8(1); + if (lane != 0) { + DCHECK_LT(lane, 4); + __ shufpd(dst, dst, lane); + } + break; + } + case kAVXF64x2ExtractLane: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister dst = i.OutputDoubleRegister(); + XMMRegister src = i.InputSimd128Register(0); + int8_t lane = i.InputInt8(1); + if (lane == 0) { + if (dst != src) __ vmovapd(dst, src); + } else { + DCHECK_LT(lane, 4); + __ vshufpd(dst, src, src, lane); + } + break; + } + case kSSEF64x2ReplaceLane: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + XMMRegister dst = i.OutputSimd128Register(); + int8_t lane = i.InputInt8(1); + DoubleRegister rep = i.InputDoubleRegister(2); + + // insertps takes a mask which contains (high to low): + // - 2 bit specifying source float element to copy + // - 2 bit specifying destination float element to write to + // - 4 bits specifying which elements of the destination to zero + DCHECK_LT(lane, 2); + if (lane == 0) { + __ insertps(dst, rep, 0b00000000); + __ insertps(dst, rep, 0b01010000); + } else { + __ insertps(dst, rep, 0b00100000); + __ insertps(dst, rep, 0b01110000); + } + break; + } + case kAVXF64x2ReplaceLane: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + int8_t lane = i.InputInt8(1); + DoubleRegister rep = i.InputDoubleRegister(2); + + DCHECK_LT(lane, 2); + if (lane == 0) { + __ vinsertps(dst, src, rep, 0b00000000); + __ vinsertps(dst, src, rep, 0b01010000); + } else { + __ vinsertps(dst, src, rep, 0b10100000); + __ vinsertps(dst, src, rep, 0b11110000); + } + break; + } + case kIA32F64x2Sqrt: { + __ Sqrtpd(i.OutputSimd128Register(), i.InputOperand(0)); + break; + } + case kIA32F64x2Add: { + __ Addpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Sub: { + __ Subpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Mul: { + __ Mulpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Div: { + __ Divpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Min: { + Operand src1 = i.InputOperand(1); + XMMRegister dst = i.OutputSimd128Register(), + src = i.InputSimd128Register(0), + tmp = i.TempSimd128Register(0); + // The minpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform minpd in both orders, merge the resuls, and adjust. + __ Movapd(tmp, src1); + __ Minpd(tmp, tmp, src); + __ Minpd(dst, src, src1); + // propagate -0's and NaNs, which may be non-canonical. + __ Orpd(tmp, dst); + // Canonicalize NaNs by quieting and clearing the payload. + __ Cmpunordpd(dst, dst, tmp); + __ Orpd(tmp, dst); + __ Psrlq(dst, 13); + __ Andnpd(dst, tmp); + break; + } + case kIA32F64x2Max: { + Operand src1 = i.InputOperand(1); + XMMRegister dst = i.OutputSimd128Register(), + src = i.InputSimd128Register(0), + tmp = i.TempSimd128Register(0); + // The maxpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform maxpd in both orders, merge the resuls, and adjust. + __ Movapd(tmp, src1); + __ Maxpd(tmp, tmp, src); + __ Maxpd(dst, src, src1); + // Find discrepancies. + __ Xorpd(dst, tmp); + // Propagate NaNs, which may be non-canonical. + __ Orpd(tmp, dst); + // Propagate sign discrepancy and (subtle) quiet NaNs. + __ Subpd(tmp, tmp, dst); + // Canonicalize NaNs by clearing the payload. Sign is non-deterministic. + __ Cmpunordpd(dst, dst, tmp); + __ Psrlq(dst, 13); + __ Andnpd(dst, tmp); + break; + } + case kIA32F64x2Eq: { + __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Ne: { + __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Lt: { + __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Le: { + __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } case kSSEF32x4Splat: { DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); XMMRegister dst = i.OutputSimd128Register(); @@ -1951,6 +2120,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputOperand(0)); break; } + case kSSEF32x4Sqrt: { + __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kAVXF32x4Sqrt: { + CpuFeatureScope avx_scope(tasm(), AVX); + __ vsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kIA32F32x4RecipApprox: { __ Rcpps(i.OutputSimd128Register(), i.InputOperand(0)); break; @@ -2212,28 +2390,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI32x4Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ pslld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4Shl: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI32x4ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ psrad(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2430,14 +2620,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI32x4ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ psrld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2514,7 +2710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32I16x8ExtractLane: { Register dst = i.OutputRegister(); __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsx_w(dst, dst); break; } case kSSEI16x8ReplaceLane: { @@ -2553,28 +2748,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI16x8Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psllw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8Shl: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI16x8ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psraw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2745,14 +2952,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI16x8ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psrlw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2875,7 +3088,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32I8x16ExtractLane: { Register dst = i.OutputRegister(); __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsx_b(dst, dst); break; } case kSSEI8x16ReplaceLane: { @@ -2919,6 +3131,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register shift = i.InputRegister(1); Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + // Take shift value modulo 8. + __ and_(shift, 7); // Mask off the unwanted bits before word-shifting. __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); __ mov(tmp, shift); @@ -2938,6 +3152,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register shift = i.InputRegister(1); Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + // Take shift value modulo 8. + __ and_(shift, 7); // Mask off the unwanted bits before word-shifting. __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ mov(tmp, shift); @@ -2959,6 +3175,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpckhbw(kScratchDoubleReg, dst); __ punpcklbw(dst, dst); __ mov(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ and_(tmp, 7); __ add(tmp, Immediate(8)); __ movd(tmp_simd, tmp); __ psraw(kScratchDoubleReg, tmp_simd); @@ -3223,6 +3441,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpckhbw(kScratchDoubleReg, dst); __ punpcklbw(dst, dst); __ mov(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ and_(tmp, 7); __ add(tmp, Immediate(8)); __ movd(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); @@ -3365,6 +3585,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2)); break; } + case kIA32S8x16Swizzle: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister mask = i.TempSimd128Register(0); + + // Out-of-range indices should return 0, add 112 so that any value > 15 + // saturates to 128 (top bit set), so pshufb will zero that lane. + __ Move(mask, (uint32_t)0x70707070); + __ Pshufd(mask, mask, 0x0); + __ Paddusb(mask, i.InputSimd128Register(1)); + __ Pshufb(dst, mask); + break; + } case kIA32S8x16Shuffle: { XMMRegister dst = i.OutputSimd128Register(); Operand src0 = i.InputOperand(0); diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h index 7530c716b85..a77fb8cd372 100644 --- a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h +++ b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h @@ -116,6 +116,23 @@ namespace compiler { V(IA32PushSimd128) \ V(IA32Poke) \ V(IA32Peek) \ + V(SSEF64x2Splat) \ + V(AVXF64x2Splat) \ + V(SSEF64x2ExtractLane) \ + V(AVXF64x2ExtractLane) \ + V(SSEF64x2ReplaceLane) \ + V(AVXF64x2ReplaceLane) \ + V(IA32F64x2Sqrt) \ + V(IA32F64x2Add) \ + V(IA32F64x2Sub) \ + V(IA32F64x2Mul) \ + V(IA32F64x2Div) \ + V(IA32F64x2Min) \ + V(IA32F64x2Max) \ + V(IA32F64x2Eq) \ + V(IA32F64x2Ne) \ + V(IA32F64x2Lt) \ + V(IA32F64x2Le) \ V(SSEF32x4Splat) \ V(AVXF32x4Splat) \ V(SSEF32x4ExtractLane) \ @@ -129,6 +146,8 @@ namespace compiler { V(AVXF32x4Abs) \ V(SSEF32x4Neg) \ V(AVXF32x4Neg) \ + V(SSEF32x4Sqrt) \ + V(AVXF32x4Sqrt) \ V(IA32F32x4RecipApprox) \ V(IA32F32x4RecipSqrtApprox) \ V(SSEF32x4Add) \ @@ -313,6 +332,7 @@ namespace compiler { V(AVXS128Xor) \ V(SSES128Select) \ V(AVXS128Select) \ + V(IA32S8x16Swizzle) \ V(IA32S8x16Shuffle) \ V(IA32S32x4Swizzle) \ V(IA32S32x4Shuffle) \ diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc index c2097a6691f..287eb49a480 100644 --- a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc +++ b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc @@ -97,6 +97,23 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXFloat32Neg: case kIA32BitcastFI: case kIA32BitcastIF: + case kSSEF64x2Splat: + case kAVXF64x2Splat: + case kSSEF64x2ExtractLane: + case kAVXF64x2ExtractLane: + case kSSEF64x2ReplaceLane: + case kAVXF64x2ReplaceLane: + case kIA32F64x2Sqrt: + case kIA32F64x2Add: + case kIA32F64x2Sub: + case kIA32F64x2Mul: + case kIA32F64x2Div: + case kIA32F64x2Min: + case kIA32F64x2Max: + case kIA32F64x2Eq: + case kIA32F64x2Ne: + case kIA32F64x2Lt: + case kIA32F64x2Le: case kSSEF32x4Splat: case kAVXF32x4Splat: case kSSEF32x4ExtractLane: @@ -110,6 +127,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXF32x4Abs: case kSSEF32x4Neg: case kAVXF32x4Neg: + case kSSEF32x4Sqrt: + case kAVXF32x4Sqrt: case kIA32F32x4RecipApprox: case kIA32F32x4RecipSqrtApprox: case kSSEF32x4Add: @@ -294,6 +313,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXS128Xor: case kSSES128Select: case kAVXS128Select: + case kIA32S8x16Swizzle: case kIA32S8x16Shuffle: case kIA32S32x4Swizzle: case kIA32S32x4Shuffle: diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index ebef39a93a6..a24727aba20 100644 --- a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -200,12 +200,27 @@ namespace { void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) { IA32OperandGenerator g(selector); - InstructionOperand temps[] = {g.TempRegister()}; Node* input = node->InputAt(0); // We have to use a byte register as input to movsxb. InstructionOperand input_op = opcode == kIA32Movsxbl ? g.UseFixed(input, eax) : g.Use(input); - selector->Emit(opcode, g.DefineAsRegister(node), input_op, arraysize(temps), + selector->Emit(opcode, g.DefineAsRegister(node), input_op); +} + +void VisitROWithTemp(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempRegister()}; + selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), + arraysize(temps), temps); +} + +void VisitROWithTempSimd(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); } @@ -231,10 +246,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node, void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input, ArchOpcode avx_opcode, ArchOpcode sse_opcode) { IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; if (selector->IsSupported(AVX)) { - selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input)); + selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input), + arraysize(temps), temps); } else { - selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input)); + selector->Emit(sse_opcode, g.DefineSameAsFirst(node), + g.UseUniqueRegister(input), arraysize(temps), temps); } } @@ -804,12 +822,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \ - V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \ - V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \ - V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \ - V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \ V(BitcastFloat32ToInt32, kIA32BitcastFI) \ @@ -819,7 +833,15 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \ V(SignExtendWord8ToInt32, kIA32Movsxbl) \ - V(SignExtendWord16ToInt32, kIA32Movsxwl) + V(SignExtendWord16ToInt32, kIA32Movsxwl) \ + V(F64x2Sqrt, kIA32F64x2Sqrt) + +#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) + +#define RO_WITH_TEMP_SIMD_OP_LIST(V) \ + V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ + V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \ + V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) #define RR_OP_LIST(V) \ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \ @@ -841,13 +863,23 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ - V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) + V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \ + V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \ + V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \ + V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \ + V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \ + V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \ + V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \ + V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \ + V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le) #define FLOAT_UNOP_LIST(V) \ V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ - V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) + V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \ + V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ + V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg) #define RO_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ @@ -857,6 +889,22 @@ RO_OP_LIST(RO_VISITOR) #undef RO_VISITOR #undef RO_OP_LIST +#define RO_WITH_TEMP_VISITOR(Name, opcode) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitROWithTemp(this, node, opcode); \ + } +RO_WITH_TEMP_OP_LIST(RO_WITH_TEMP_VISITOR) +#undef RO_WITH_TEMP_VISITOR +#undef RO_WITH_TEMP_OP_LIST + +#define RO_WITH_TEMP_SIMD_VISITOR(Name, opcode) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitROWithTempSimd(this, node, opcode); \ + } +RO_WITH_TEMP_SIMD_OP_LIST(RO_WITH_TEMP_SIMD_VISITOR) +#undef RO_WITH_TEMP_SIMD_VISITOR +#undef RO_WITH_TEMP_SIMD_OP_LIST + #define RR_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ VisitRR(this, node, opcode); \ @@ -890,6 +938,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitInt32Add(Node* node) { IA32OperandGenerator g(this); @@ -1971,6 +2023,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { #define SIMD_UNOP_PREFIX_LIST(V) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(S128Not) #define SIMD_ANYTRUE_LIST(V) \ @@ -1995,6 +2048,43 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { V(I8x16ShrS) \ V(I8x16ShrU) +void InstructionSelector::VisitF64x2Min(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUnique(node->InputAt(1)); + + if (IsSupported(AVX)) { + Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + +void InstructionSelector::VisitF64x2Max(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUnique(node->InputAt(1)); + if (IsSupported(AVX)) { + Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + +void InstructionSelector::VisitF64x2Splat(Node* node) { + VisitRRSimd(this, node, kAVXF64x2Splat, kSSEF64x2Splat); +} + +void InstructionSelector::VisitF64x2ExtractLane(Node* node) { + VisitRRISimd(this, node, kAVXF64x2ExtractLane, kSSEF64x2ExtractLane); +} + void InstructionSelector::VisitF32x4Splat(Node* node) { VisitRRSimd(this, node, kAVXF32x4Splat, kSSEF32x4Splat); } @@ -2086,6 +2176,28 @@ VISIT_SIMD_REPLACE_LANE(F32x4) #undef VISIT_SIMD_REPLACE_LANE #undef SIMD_INT_TYPES +// The difference between this and VISIT_SIMD_REPLACE_LANE is that this forces +// operand2 to be UseRegister, because the codegen relies on insertps using +// registers. +// TODO(v8:9764) Remove this UseRegister requirement +#define VISIT_SIMD_REPLACE_LANE_USE_REG(Type) \ + void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ + IA32OperandGenerator g(this); \ + InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \ + InstructionOperand operand1 = \ + g.UseImmediate(OpParameter<int32_t>(node->op())); \ + InstructionOperand operand2 = g.UseRegister(node->InputAt(1)); \ + if (IsSupported(AVX)) { \ + Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \ + operand1, operand2); \ + } else { \ + Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \ + operand1, operand2); \ + } \ + } +VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2) +#undef VISIT_SIMD_REPLACE_LANE_USE_REG + #define VISIT_SIMD_SHIFT(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \ @@ -2132,12 +2244,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE) #undef VISIT_SIMD_ANYTRUE #undef SIMD_ANYTRUE_LIST -#define VISIT_SIMD_ALLTRUE(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - IA32OperandGenerator g(this); \ - InstructionOperand temps[] = {g.TempRegister()}; \ - Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), \ - arraysize(temps), temps); \ +#define VISIT_SIMD_ALLTRUE(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + IA32OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \ + Emit(kIA32##Opcode, g.DefineAsRegister(node), \ + g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \ } SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE) #undef VISIT_SIMD_ALLTRUE @@ -2489,6 +2601,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kIA32S8x16Swizzle, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), + arraysize(temps), temps); +} + // static MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { diff --git a/chromium/v8/src/compiler/backend/instruction-scheduler.cc b/chromium/v8/src/compiler/backend/instruction-scheduler.cc index dc66813740b..d4920cd575a 100644 --- a/chromium/v8/src/compiler/backend/instruction-scheduler.cc +++ b/chromium/v8/src/compiler/backend/instruction-scheduler.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/instruction-scheduler.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/utils/random-number-generator.h" #include "src/execution/isolate.h" diff --git a/chromium/v8/src/compiler/backend/instruction-selector-impl.h b/chromium/v8/src/compiler/backend/instruction-selector-impl.h index a3f62e7ba40..13ea049eba4 100644 --- a/chromium/v8/src/compiler/backend/instruction-selector-impl.h +++ b/chromium/v8/src/compiler/backend/instruction-selector-impl.h @@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) { // Helper struct containing data about a table or lookup switch. class SwitchInfo { public: - SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references) - int32_t min_value, int32_t max_value, BasicBlock* default_branch) + SwitchInfo(ZoneVector<CaseInfo> const& cases, int32_t min_value, + int32_t max_value, BasicBlock* default_branch) : cases_(cases), min_value_(min_value), max_value_(max_value), @@ -193,17 +193,6 @@ class OperandGenerator { reg.code(), GetVReg(node))); } - InstructionOperand UseExplicit(LinkageLocation location) { - MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); - if (location.IsRegister()) { - return ExplicitOperand(LocationOperand::REGISTER, rep, - location.AsRegister()); - } else { - return ExplicitOperand(LocationOperand::STACK_SLOT, rep, - location.GetLocation()); - } - } - InstructionOperand UseImmediate(int immediate) { return sequence()->AddImmediate(Constant(immediate)); } @@ -275,6 +264,16 @@ class OperandGenerator { InstructionOperand::kInvalidVirtualRegister); } + template <typename FPRegType> + InstructionOperand TempFpRegister(FPRegType reg) { + UnallocatedOperand op = + UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER, reg.code(), + sequence()->NextVirtualRegister()); + sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128, + op.virtual_register()); + return op; + } + InstructionOperand TempImmediate(int32_t imm) { return sequence()->AddImmediate(Constant(imm)); } diff --git a/chromium/v8/src/compiler/backend/instruction-selector.cc b/chromium/v8/src/compiler/backend/instruction-selector.cc index 43193ec2b11..22d81c0c550 100644 --- a/chromium/v8/src/compiler/backend/instruction-selector.cc +++ b/chromium/v8/src/compiler/backend/instruction-selector.cc @@ -6,7 +6,7 @@ #include <limits> -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/tick-counter.h" #include "src/compiler/backend/instruction-selector-impl.h" @@ -1439,6 +1439,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsWord64(node), VisitWord64ReverseBits(node); case IrOpcode::kWord64ReverseBytes: return MarkAsWord64(node), VisitWord64ReverseBytes(node); + case IrOpcode::kSimd128ReverseBytes: + return MarkAsSimd128(node), VisitSimd128ReverseBytes(node); case IrOpcode::kInt64AbsWithOverflow: return MarkAsWord64(node), VisitInt64AbsWithOverflow(node); case IrOpcode::kWord64Equal: @@ -1502,7 +1504,7 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUint64Mod: return MarkAsWord64(node), VisitUint64Mod(node); case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: return MarkAsRepresentation(MachineType::PointerRepresentation(), node), VisitBitcastTaggedToWord(node); case IrOpcode::kBitcastWordToTagged: @@ -1857,6 +1859,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF64x2Abs(node); case IrOpcode::kF64x2Neg: return MarkAsSimd128(node), VisitF64x2Neg(node); + case IrOpcode::kF64x2Sqrt: + return MarkAsSimd128(node), VisitF64x2Sqrt(node); case IrOpcode::kF64x2Add: return MarkAsSimd128(node), VisitF64x2Add(node); case IrOpcode::kF64x2Sub: @@ -1877,6 +1881,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF64x2Lt(node); case IrOpcode::kF64x2Le: return MarkAsSimd128(node), VisitF64x2Le(node); + case IrOpcode::kF64x2Qfma: + return MarkAsSimd128(node), VisitF64x2Qfma(node); + case IrOpcode::kF64x2Qfms: + return MarkAsSimd128(node), VisitF64x2Qfms(node); case IrOpcode::kF32x4Splat: return MarkAsSimd128(node), VisitF32x4Splat(node); case IrOpcode::kF32x4ExtractLane: @@ -1891,6 +1899,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Abs(node); case IrOpcode::kF32x4Neg: return MarkAsSimd128(node), VisitF32x4Neg(node); + case IrOpcode::kF32x4Sqrt: + return MarkAsSimd128(node), VisitF32x4Sqrt(node); case IrOpcode::kF32x4RecipApprox: return MarkAsSimd128(node), VisitF32x4RecipApprox(node); case IrOpcode::kF32x4RecipSqrtApprox: @@ -1917,6 +1927,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Lt(node); case IrOpcode::kF32x4Le: return MarkAsSimd128(node), VisitF32x4Le(node); + case IrOpcode::kF32x4Qfma: + return MarkAsSimd128(node), VisitF32x4Qfma(node); + case IrOpcode::kF32x4Qfms: + return MarkAsSimd128(node), VisitF32x4Qfms(node); case IrOpcode::kI64x2Splat: return MarkAsSimd128(node), VisitI64x2Splat(node); case IrOpcode::kI64x2ExtractLane: @@ -2137,6 +2151,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitS128Not(node); case IrOpcode::kS128Select: return MarkAsSimd128(node), VisitS128Select(node); + case IrOpcode::kS8x16Swizzle: + return MarkAsSimd128(node), VisitS8x16Swizzle(node); case IrOpcode::kS8x16Shuffle: return MarkAsSimd128(node), VisitS8x16Shuffle(node); case IrOpcode::kS1x2AnyTrue: @@ -2286,8 +2302,8 @@ void InstructionSelector::VisitFloat64Tanh(Node* node) { VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh); } -void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw, - InstructionOperand& index_operand) { +void InstructionSelector::EmitTableSwitch( + const SwitchInfo& sw, InstructionOperand const& index_operand) { OperandGenerator g(this); size_t input_count = 2 + sw.value_range(); DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2); @@ -2304,8 +2320,8 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw, Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr); } -void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand) { +void InstructionSelector::EmitLookupSwitch( + const SwitchInfo& sw, InstructionOperand const& value_operand) { OperandGenerator g(this); std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder(); size_t input_count = 2 + sw.case_count() * 2; @@ -2322,7 +2338,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw, } void InstructionSelector::EmitBinarySearchSwitch( - const SwitchInfo& sw, InstructionOperand& value_operand) { + const SwitchInfo& sw, InstructionOperand const& value_operand) { OperandGenerator g(this); size_t input_count = 2 + sw.case_count() * 2; DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2); @@ -2607,21 +2623,25 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { #if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_ARM64 +#if !V8_TARGET_ARCH_IA32 void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_IA32 void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } @@ -2630,6 +2650,7 @@ void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } @@ -2639,8 +2660,11 @@ void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); } #endif // !V8_TARGET_ARCH_ARM64 -void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); } diff --git a/chromium/v8/src/compiler/backend/instruction-selector.h b/chromium/v8/src/compiler/backend/instruction-selector.h index eb3e0984272..e951c90f953 100644 --- a/chromium/v8/src/compiler/backend/instruction-selector.h +++ b/chromium/v8/src/compiler/backend/instruction-selector.h @@ -502,15 +502,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final { FeedbackSource const& feedback, Node* frame_state); - void EmitTableSwitch( - const SwitchInfo& sw, - InstructionOperand& index_operand); // NOLINT(runtime/references) - void EmitLookupSwitch( - const SwitchInfo& sw, - InstructionOperand& value_operand); // NOLINT(runtime/references) - void EmitBinarySearchSwitch( - const SwitchInfo& sw, - InstructionOperand& value_operand); // NOLINT(runtime/references) + void EmitTableSwitch(const SwitchInfo& sw, + InstructionOperand const& index_operand); + void EmitLookupSwitch(const SwitchInfo& sw, + InstructionOperand const& value_operand); + void EmitBinarySearchSwitch(const SwitchInfo& sw, + InstructionOperand const& value_operand); void TryRename(InstructionOperand* op); int GetRename(int virtual_register); diff --git a/chromium/v8/src/compiler/backend/instruction.cc b/chromium/v8/src/compiler/backend/instruction.cc index 06158b0c72e..076f1b596e2 100644 --- a/chromium/v8/src/compiler/backend/instruction.cc +++ b/chromium/v8/src/compiler/backend/instruction.cc @@ -168,7 +168,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) { return os << "[immediate:" << imm.indexed_value() << "]"; } } - case InstructionOperand::EXPLICIT: case InstructionOperand::ALLOCATED: { LocationOperand allocated = LocationOperand::cast(op); if (op.IsStackSlot()) { @@ -192,9 +191,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) { os << "[" << Simd128Register::from_code(allocated.register_code()) << "|R"; } - if (allocated.IsExplicit()) { - os << "|E"; - } switch (allocated.representation()) { case MachineRepresentation::kNone: os << "|-"; @@ -294,17 +290,6 @@ void ParallelMove::PrepareInsertAfter( if (replacement != nullptr) move->set_source(replacement->source()); } -ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep, - int index) - : LocationOperand(EXPLICIT, kind, rep, index) { - DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep), - GetRegConfig()->IsAllocatableGeneralCode(index)); - DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32, - GetRegConfig()->IsAllocatableFloatCode(index)); - DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64), - GetRegConfig()->IsAllocatableDoubleCode(index)); -} - Instruction::Instruction(InstructionCode opcode) : opcode_(opcode), bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) | diff --git a/chromium/v8/src/compiler/backend/instruction.h b/chromium/v8/src/compiler/backend/instruction.h index f5f7f64c51e..321f069531f 100644 --- a/chromium/v8/src/compiler/backend/instruction.h +++ b/chromium/v8/src/compiler/backend/instruction.h @@ -43,9 +43,8 @@ class V8_EXPORT_PRIVATE InstructionOperand { CONSTANT, IMMEDIATE, // Location operand kinds. - EXPLICIT, ALLOCATED, - FIRST_LOCATION_OPERAND_KIND = EXPLICIT + FIRST_LOCATION_OPERAND_KIND = ALLOCATED // Location operand kinds must be last. }; @@ -68,11 +67,6 @@ class V8_EXPORT_PRIVATE InstructionOperand { // embedded directly in instructions, e.g. small integers and on some // platforms Objects. INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE) - // ExplicitOperands do not participate in register allocation. They are - // created by the instruction selector for direct access to registers and - // stack slots, completely bypassing the register allocator. They are never - // associated with a virtual register - INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT) // AllocatedOperands are registers or stack slots that are assigned by the // register allocator and are always associated with a virtual register. INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED) @@ -515,19 +509,6 @@ class LocationOperand : public InstructionOperand { using IndexField = BitField64<int32_t, 35, 29>; }; -class V8_EXPORT_PRIVATE ExplicitOperand - : public NON_EXPORTED_BASE(LocationOperand) { - public: - ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index); - - static ExplicitOperand* New(Zone* zone, LocationKind kind, - MachineRepresentation rep, int index) { - return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index)); - } - - INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT) -}; - class AllocatedOperand : public LocationOperand { public: AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index) @@ -643,7 +624,7 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const { } return InstructionOperand::KindField::update( LocationOperand::RepresentationField::update(this->value_, canonical), - LocationOperand::EXPLICIT); + LocationOperand::ALLOCATED); } return this->value_; } @@ -776,11 +757,11 @@ class V8_EXPORT_PRIVATE Instruction final { public: size_t OutputCount() const { return OutputCountField::decode(bit_field_); } const InstructionOperand* OutputAt(size_t i) const { - DCHECK(i < OutputCount()); + DCHECK_LT(i, OutputCount()); return &operands_[i]; } InstructionOperand* OutputAt(size_t i) { - DCHECK(i < OutputCount()); + DCHECK_LT(i, OutputCount()); return &operands_[i]; } @@ -790,21 +771,21 @@ class V8_EXPORT_PRIVATE Instruction final { size_t InputCount() const { return InputCountField::decode(bit_field_); } const InstructionOperand* InputAt(size_t i) const { - DCHECK(i < InputCount()); + DCHECK_LT(i, InputCount()); return &operands_[OutputCount() + i]; } InstructionOperand* InputAt(size_t i) { - DCHECK(i < InputCount()); + DCHECK_LT(i, InputCount()); return &operands_[OutputCount() + i]; } size_t TempCount() const { return TempCountField::decode(bit_field_); } const InstructionOperand* TempAt(size_t i) const { - DCHECK(i < TempCount()); + DCHECK_LT(i, TempCount()); return &operands_[OutputCount() + InputCount() + i]; } InstructionOperand* TempAt(size_t i) { - DCHECK(i < TempCount()); + DCHECK_LT(i, TempCount()); return &operands_[OutputCount() + InputCount() + i]; } diff --git a/chromium/v8/src/compiler/backend/jump-threading.cc b/chromium/v8/src/compiler/backend/jump-threading.cc index dfb917a58c4..ee195bf51e1 100644 --- a/chromium/v8/src/compiler/backend/jump-threading.cc +++ b/chromium/v8/src/compiler/backend/jump-threading.cc @@ -69,11 +69,11 @@ bool IsBlockWithBranchPoisoning(InstructionSequence* code, } // namespace bool JumpThreading::ComputeForwarding(Zone* local_zone, - ZoneVector<RpoNumber>& result, + ZoneVector<RpoNumber>* result, InstructionSequence* code, bool frame_at_start) { ZoneStack<RpoNumber> stack(local_zone); - JumpThreadingState state = {false, result, stack}; + JumpThreadingState state = {false, *result, stack}; state.Clear(code->InstructionBlockCount()); // Iterate over the blocks forward, pushing the blocks onto the stack. @@ -135,15 +135,15 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone, } #ifdef DEBUG - for (RpoNumber num : result) { + for (RpoNumber num : *result) { DCHECK(num.IsValid()); } #endif if (FLAG_trace_turbo_jt) { - for (int i = 0; i < static_cast<int>(result.size()); i++) { + for (int i = 0; i < static_cast<int>(result->size()); i++) { TRACE("B%d ", i); - int to = result[i].ToInt(); + int to = (*result)[i].ToInt(); if (i != to) { TRACE("-> B%d\n", to); } else { @@ -156,7 +156,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone, } void JumpThreading::ApplyForwarding(Zone* local_zone, - ZoneVector<RpoNumber>& result, + ZoneVector<RpoNumber> const& result, InstructionSequence* code) { if (!FLAG_turbo_jt) return; diff --git a/chromium/v8/src/compiler/backend/jump-threading.h b/chromium/v8/src/compiler/backend/jump-threading.h index ce60ebcb2e3..ce9e3949249 100644 --- a/chromium/v8/src/compiler/backend/jump-threading.h +++ b/chromium/v8/src/compiler/backend/jump-threading.h @@ -17,17 +17,14 @@ class V8_EXPORT_PRIVATE JumpThreading { public: // Compute the forwarding map of basic blocks to their ultimate destination. // Returns {true} if there is at least one block that is forwarded. - static bool ComputeForwarding( - Zone* local_zone, - ZoneVector<RpoNumber>& result, // NOLINT(runtime/references) - InstructionSequence* code, bool frame_at_start); + static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>* result, + InstructionSequence* code, bool frame_at_start); // Rewrite the instructions to forward jumps and branches. // May also negate some branches. - static void ApplyForwarding( - Zone* local_zone, - ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references) - InstructionSequence* code); + static void ApplyForwarding(Zone* local_zone, + ZoneVector<RpoNumber> const& forwarding, + InstructionSequence* code); }; } // namespace compiler diff --git a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc index 239075392af..ee23402e69b 100644 --- a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -265,34 +265,33 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU( - bool& predicate, // NOLINT(runtime/references) - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { switch (condition) { case kEqual: - predicate = true; + *predicate = true; return EQ; case kNotEqual: - predicate = false; + *predicate = false; return EQ; case kUnsignedLessThan: - predicate = true; + *predicate = true; return OLT; case kUnsignedGreaterThanOrEqual: - predicate = false; + *predicate = false; return OLT; case kUnsignedLessThanOrEqual: - predicate = true; + *predicate = true; return OLE; case kUnsignedGreaterThan: - predicate = false; + *predicate = false; return OLE; case kUnorderedEqual: case kUnorderedNotEqual: - predicate = true; + *predicate = true; break; default: - predicate = true; + *predicate = true; break; } UNREACHABLE(); @@ -303,9 +302,9 @@ FPUCondition FlagsConditionToConditionCmpFPU( << "\""; \ UNIMPLEMENTED(); -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + MipsOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -780,12 +779,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); - Label return_location; - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + // from start_call to return address. + int offset = 40; +#if V8_HOST_ARCH_MIPS + if (__ emit_debug_code()) { + offset += 16; + } +#endif + if (isWasmCapiFunction) { // Put the return address in a stack slot. - __ LoadAddress(kScratchReg, &return_location); - __ sw(kScratchReg, - MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(kScratchReg, ra); + __ bind(&start_call); + __ nal(); + __ nop(); + __ Addu(ra, ra, offset - 8); // 8 = nop + nal + __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(ra, kScratchReg); } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); @@ -794,7 +806,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } - __ bind(&return_location); + if (isWasmCapiFunction) { + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + } + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack @@ -1179,7 +1194,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroSingleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { @@ -1239,7 +1254,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroDoubleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { __ Move(kDoubleRegZero, 0.0); @@ -2038,6 +2053,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } + case kMipsF32x4Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kMipsF32x4RecipApprox: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -3026,7 +3046,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMipsCmpS || instr->arch_opcode() == kMipsCmpD) { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ BranchTrueF(tlabel); } else { @@ -3116,7 +3136,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, case kMipsCmpS: case kMipsCmpD: { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); } else { @@ -3314,7 +3334,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, __ Move(kDoubleRegZero, 0.0); } bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (!IsMipsArchVariant(kMips32r6)) { __ li(result, Operand(1)); if (predicate) { diff --git a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h index e8020d9e895..af0774f4688 100644 --- a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h +++ b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -159,6 +159,7 @@ namespace compiler { V(MipsI32x4MinU) \ V(MipsF32x4Abs) \ V(MipsF32x4Neg) \ + V(MipsF32x4Sqrt) \ V(MipsF32x4RecipApprox) \ V(MipsF32x4RecipSqrtApprox) \ V(MipsF32x4Add) \ diff --git a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 4e6aef52f49..ba17ad25819 100644 --- a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -54,6 +54,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMipsF32x4Div: case kMipsF32x4Ne: case kMipsF32x4Neg: + case kMipsF32x4Sqrt: case kMipsF32x4RecipApprox: case kMipsF32x4RecipSqrtApprox: case kMipsF32x4ReplaceLane: diff --git a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc index bb47262c6c3..7ee5c7c2c77 100644 --- a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -781,6 +780,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Ctz(Node* node) { MipsOperandGenerator g(this); Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); @@ -2015,6 +2018,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \ V(F32x4Abs, kMipsF32x4Abs) \ V(F32x4Neg, kMipsF32x4Neg) \ + V(F32x4Sqrt, kMipsF32x4Sqrt) \ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \ diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 5682bed71a4..9cec463e875 100644 --- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -278,42 +278,41 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU( - bool& predicate, // NOLINT(runtime/references) - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { switch (condition) { case kEqual: - predicate = true; + *predicate = true; return EQ; case kNotEqual: - predicate = false; + *predicate = false; return EQ; case kUnsignedLessThan: - predicate = true; + *predicate = true; return OLT; case kUnsignedGreaterThanOrEqual: - predicate = false; + *predicate = false; return OLT; case kUnsignedLessThanOrEqual: - predicate = true; + *predicate = true; return OLE; case kUnsignedGreaterThan: - predicate = false; + *predicate = false; return OLE; case kUnorderedEqual: case kUnorderedNotEqual: - predicate = true; + *predicate = true; break; default: - predicate = true; + *predicate = true; break; } UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + MipsOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -758,12 +757,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); - Label return_location; - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + // from start_call to return address. + int offset = 48; +#if V8_HOST_ARCH_MIPS64 + if (__ emit_debug_code()) { + offset += 16; + } +#endif + if (isWasmCapiFunction) { // Put the return address in a stack slot. - __ LoadAddress(kScratchReg, &return_location); - __ sd(kScratchReg, - MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(kScratchReg, ra); + __ bind(&start_call); + __ nal(); + __ nop(); + __ Daddu(ra, ra, offset - 8); // 8 = nop + nal + __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(ra, kScratchReg); } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); @@ -772,7 +784,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } - __ bind(&return_location); + if (isWasmCapiFunction) { + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + } + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack @@ -1276,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroSingleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { @@ -1339,7 +1354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroDoubleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { __ Move(kDoubleRegZero, 0.0); @@ -2233,6 +2248,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } + case kMips64F32x4Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kMips64I32x4Neg: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); @@ -3151,7 +3171,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMips64CmpS || instr->arch_opcode() == kMips64CmpD) { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ BranchTrueF(tlabel); } else { @@ -3261,7 +3281,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, case kMips64CmpS: case kMips64CmpD: { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); } else { @@ -3470,7 +3490,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, __ Move(kDoubleRegZero, 0.0); } bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (kArchVariant != kMips64r6) { __ li(result, Operand(1)); if (predicate) { diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h index edc8924757d..bcf3532b572 100644 --- a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h +++ b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h @@ -189,6 +189,7 @@ namespace compiler { V(Mips64I32x4MinU) \ V(Mips64F32x4Abs) \ V(Mips64F32x4Neg) \ + V(Mips64F32x4Sqrt) \ V(Mips64F32x4RecipApprox) \ V(Mips64F32x4RecipSqrtApprox) \ V(Mips64F32x4Add) \ diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 880b424c416..fe2d33d1db5 100644 --- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -82,6 +82,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMips64F32x4Div: case kMips64F32x4Ne: case kMips64F32x4Neg: + case kMips64F32x4Sqrt: case kMips64F32x4RecipApprox: case kMips64F32x4RecipSqrtApprox: case kMips64F32x4ReplaceLane: diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 9c717ab1e91..dfc0ff5badf 100644 --- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -823,6 +822,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Ctz(Node* node) { Mips64OperandGenerator g(this); Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); @@ -2678,6 +2681,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \ V(F32x4Abs, kMips64F32x4Abs) \ V(F32x4Neg, kMips64F32x4Neg) \ + V(F32x4Sqrt, kMips64F32x4Sqrt) \ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \ diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 5c69bc34a12..dde1804adbb 100644 --- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -263,9 +263,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, Instruction* instr, - PPCOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, + PPCOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -1024,7 +1023,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); +#if defined(_AIX) + // AIX/PPC64BE Linux uses a function descriptor + // and emits 2 extra Load instrcutions under CallCFunctionHelper. + constexpr int offset = 11 * kInstrSize; +#else constexpr int offset = 9 * kInstrSize; +#endif if (isWasmCapiFunction) { __ mflr(r0); __ bind(&start_call); @@ -1043,9 +1048,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } // TODO(miladfar): In the above block, kScratchReg must be populated with // the strictly-correct PC, which is the return address at this spot. The - // offset is set to 36 (9 * kInstrSize) right now, which is counted from - // where we are binding to the label and ends at this spot. If failed, - // replace it with the correct offset suggested. More info on f5ab7d3. + // offset is set to 36 (9 * kInstrSize) on pLinux and 44 on AIX, which is + // counted from where we are binding to the label and ends at this spot. + // If failed, replace it with the correct offset suggested. More info on + // f5ab7d3. if (isWasmCapiFunction) CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index ef8490a7265..2ffd6495d72 100644 --- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -926,6 +926,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes. + // Check if the input node is a Load and do a Load Reverse at once. + UNIMPLEMENTED(); +} + void InstructionSelector::VisitInt32Add(Node* node) { VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm); } @@ -2283,6 +2289,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } diff --git a/chromium/v8/src/compiler/backend/register-allocator-verifier.cc b/chromium/v8/src/compiler/backend/register-allocator-verifier.cc index 53349c9c2b4..17e0b8ca755 100644 --- a/chromium/v8/src/compiler/backend/register-allocator-verifier.cc +++ b/chromium/v8/src/compiler/backend/register-allocator-verifier.cc @@ -92,7 +92,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier( void RegisterAllocatorVerifier::VerifyInput( const OperandConstraint& constraint) { CHECK_NE(kSameAsFirst, constraint.type_); - if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) { + if (constraint.type_ != kImmediate) { CHECK_NE(InstructionOperand::kInvalidVirtualRegister, constraint.virtual_register_); } @@ -102,14 +102,12 @@ void RegisterAllocatorVerifier::VerifyTemp( const OperandConstraint& constraint) { CHECK_NE(kSameAsFirst, constraint.type_); CHECK_NE(kImmediate, constraint.type_); - CHECK_NE(kExplicit, constraint.type_); CHECK_NE(kConstant, constraint.type_); } void RegisterAllocatorVerifier::VerifyOutput( const OperandConstraint& constraint) { CHECK_NE(kImmediate, constraint.type_); - CHECK_NE(kExplicit, constraint.type_); CHECK_NE(InstructionOperand::kInvalidVirtualRegister, constraint.virtual_register_); } @@ -149,8 +147,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, constraint->type_ = kConstant; constraint->value_ = ConstantOperand::cast(op)->virtual_register(); constraint->virtual_register_ = constraint->value_; - } else if (op->IsExplicit()) { - constraint->type_ = kExplicit; } else if (op->IsImmediate()) { const ImmediateOperand* imm = ImmediateOperand::cast(op); int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value() @@ -235,9 +231,6 @@ void RegisterAllocatorVerifier::CheckConstraint( case kFPRegister: CHECK_WITH_MSG(op->IsFPRegister(), caller_info_); return; - case kExplicit: - CHECK_WITH_MSG(op->IsExplicit(), caller_info_); - return; case kFixedRegister: case kRegisterAndSlot: CHECK_WITH_MSG(op->IsRegister(), caller_info_); @@ -503,8 +496,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves() { instr_constraint.operand_constraints_; size_t count = 0; for (size_t i = 0; i < instr->InputCount(); ++i, ++count) { - if (op_constraints[count].type_ == kImmediate || - op_constraints[count].type_ == kExplicit) { + if (op_constraints[count].type_ == kImmediate) { continue; } int virtual_register = op_constraints[count].virtual_register_; diff --git a/chromium/v8/src/compiler/backend/register-allocator-verifier.h b/chromium/v8/src/compiler/backend/register-allocator-verifier.h index 68e69c0d164..7110c2eb42c 100644 --- a/chromium/v8/src/compiler/backend/register-allocator-verifier.h +++ b/chromium/v8/src/compiler/backend/register-allocator-verifier.h @@ -188,7 +188,6 @@ class RegisterAllocatorVerifier final : public ZoneObject { kRegisterOrSlot, kRegisterOrSlotFP, kRegisterOrSlotOrConstant, - kExplicit, kSameAsFirst, kRegisterAndSlot }; diff --git a/chromium/v8/src/compiler/backend/register-allocator.cc b/chromium/v8/src/compiler/backend/register-allocator.cc index 21eef0485c5..945554eb323 100644 --- a/chromium/v8/src/compiler/backend/register-allocator.cc +++ b/chromium/v8/src/compiler/backend/register-allocator.cc @@ -6,7 +6,7 @@ #include <iomanip> -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/small-vector.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/tick-counter.h" @@ -317,7 +317,6 @@ UsePositionHintType UsePosition::HintTypeForOperand( switch (op.kind()) { case InstructionOperand::CONSTANT: case InstructionOperand::IMMEDIATE: - case InstructionOperand::EXPLICIT: return UsePositionHintType::kNone; case InstructionOperand::UNALLOCATED: return UsePositionHintType::kUnresolved; @@ -797,12 +796,13 @@ LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const { return start_search->end(); } -LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const { +LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) { UseInterval* start_search = FirstSearchIntervalForPosition(position); while (start_search->start() < position) { start_search = start_search->next(); } - return start_search->start(); + next_start_ = start_search->start(); + return next_start_; } LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { @@ -1940,8 +1940,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { // Handle fixed input operands of second instruction. for (size_t i = 0; i < second->InputCount(); i++) { InstructionOperand* input = second->InputAt(i); - if (input->IsImmediate() || input->IsExplicit()) { - continue; // Ignore immediates and explicitly reserved registers. + if (input->IsImmediate()) { + continue; // Ignore immediates. } UnallocatedOperand* cur_input = UnallocatedOperand::cast(input); if (cur_input->HasFixedPolicy()) { @@ -2323,8 +2323,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, for (size_t i = 0; i < instr->InputCount(); i++) { InstructionOperand* input = instr->InputAt(i); - if (input->IsImmediate() || input->IsExplicit()) { - continue; // Ignore immediates and explicitly reserved registers. + if (input->IsImmediate()) { + continue; // Ignore immediates. } LifetimePosition use_pos; if (input->IsUnallocated() && @@ -2504,10 +2504,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, predecessor_hint_preference |= kNotDeferredBlockPreference; } - // - Prefer hints from allocated (or explicit) operands. + // - Prefer hints from allocated operands. // - // Already-allocated or explicit operands are typically assigned using - // the parallel moves on the last instruction. For example: + // Already-allocated operands are typically assigned using the parallel + // moves on the last instruction. For example: // // gap (v101 = [x0|R|w32]) (v100 = v101) // ArchJmp @@ -2515,7 +2515,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, // phi: v100 = v101 v102 // // We have already found the END move, so look for a matching START move - // from an allocated (or explicit) operand. + // from an allocated operand. // // Note that we cannot simply look up data()->live_ranges()[vreg] here // because the live ranges are still being built when this function is @@ -2527,7 +2527,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, for (MoveOperands* move : *moves) { InstructionOperand& to = move->destination(); if (predecessor_hint->Equals(to)) { - if (move->source().IsAllocated() || move->source().IsExplicit()) { + if (move->source().IsAllocated()) { predecessor_hint_preference |= kMoveIsAllocatedPreference; } break; @@ -3095,11 +3095,11 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, : RegisterAllocator(data, kind), unhandled_live_ranges_(local_zone), active_live_ranges_(local_zone), - inactive_live_ranges_(local_zone), + inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone), + local_zone), next_active_ranges_change_(LifetimePosition::Invalid()), next_inactive_ranges_change_(LifetimePosition::Invalid()) { active_live_ranges().reserve(8); - inactive_live_ranges().reserve(8); } void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range, @@ -3143,15 +3143,15 @@ void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) { } } -void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, +void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live, LifetimePosition position, SpillMode spill_mode) { for (auto it = active_live_ranges().begin(); it != active_live_ranges().end();) { LiveRange* active_range = *it; TopLevelLiveRange* toplevel = (*it)->TopLevel(); - auto found = to_be_live.find({toplevel, kUnassignedRegister}); - if (found == to_be_live.end()) { + auto found = to_be_live->find({toplevel, kUnassignedRegister}); + if (found == to_be_live->end()) { // Is not contained in {to_be_live}, spill it. // Fixed registers are exempt from this. They might have been // added from inactive at the block boundary but we know that @@ -3207,7 +3207,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, } else { // This range is contained in {to_be_live}, so we can keep it. int expected_register = (*found).expected_register; - to_be_live.erase(found); + to_be_live->erase(found); if (expected_register == active_range->assigned_register()) { // Was life and in correct register, simply pass through. TRACE("Keeping %d:%d in %s\n", toplevel->vreg(), @@ -3238,31 +3238,22 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range, // give reloading registers pecedence. That way we would compute the // intersection for the entire future. LifetimePosition new_end = range->End(); - for (const auto inactive : inactive_live_ranges()) { - if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (inactive->assigned_register() != reg) continue; - } else { - bool conflict = inactive->assigned_register() == reg; - if (!conflict) { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases(range->representation(), reg, - inactive->representation(), - &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases-- && !conflict) { - int aliased_reg = alias_base_index + aliases; - if (aliased_reg == reg) { - conflict = true; - } - } - } - if (!conflict) continue; + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { + if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) { + continue; } - for (auto interval = inactive->first_interval(); interval != nullptr; - interval = interval->next()) { - if (interval->start() > new_end) break; - if (interval->end() <= range->Start()) continue; - if (new_end > interval->start()) new_end = interval->start(); + for (const auto cur_inactive : inactive_live_ranges(cur_reg)) { + if (!kSimpleFPAliasing && check_fp_aliasing() && + !data()->config()->AreAliases(cur_inactive->representation(), cur_reg, + range->representation(), reg)) { + continue; + } + for (auto interval = cur_inactive->first_interval(); interval != nullptr; + interval = interval->next()) { + if (interval->start() > new_end) break; + if (interval->end() <= range->Start()) continue; + if (new_end > interval->start()) new_end = interval->start(); + } } } if (new_end != range->End()) { @@ -3275,8 +3266,8 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range, return range; } -void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position) { +void LinearScanAllocator::ReloadLiveRanges( + RangeWithRegisterSet const& to_be_live, LifetimePosition position) { // Assumption: All ranges in {to_be_live} are currently spilled and there are // no conflicting registers in the active ranges. // The former is ensured by SpillNotLiveRanges, the latter is by construction @@ -3558,11 +3549,17 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, Min(updated->End(), next_active_ranges_change_); }); } - for (auto inactive : inactive_live_ranges()) { - split_conflicting(range, inactive, [this](LiveRange* updated) { - next_inactive_ranges_change_ = - Min(updated->End(), next_inactive_ranges_change_); - }); + for (int reg = 0; reg < num_registers(); ++reg) { + if ((kSimpleFPAliasing || !check_fp_aliasing()) && + reg != range->assigned_register()) { + continue; + } + for (auto inactive : inactive_live_ranges(reg)) { + split_conflicting(range, inactive, [this](LiveRange* updated) { + next_inactive_ranges_change_ = + Min(updated->End(), next_inactive_ranges_change_); + }); + } } }; if (mode() == GENERAL_REGISTERS) { @@ -3600,12 +3597,14 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, } } else { // Remove all ranges. - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - if ((*it)->TopLevel()->IsDeferredFixed()) { - it = inactive_live_ranges().erase(it); - } else { - ++it; + for (int reg = 0; reg < num_registers(); ++reg) { + for (auto it = inactive_live_ranges(reg).begin(); + it != inactive_live_ranges(reg).end();) { + if ((*it)->TopLevel()->IsDeferredFixed()) { + it = inactive_live_ranges(reg).erase(it); + } else { + ++it; + } } } } @@ -3636,7 +3635,9 @@ bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) { void LinearScanAllocator::AllocateRegisters() { DCHECK(unhandled_live_ranges().empty()); DCHECK(active_live_ranges().empty()); - DCHECK(inactive_live_ranges().empty()); + for (int reg = 0; reg < num_registers(); ++reg) { + DCHECK(inactive_live_ranges(reg).empty()); + } SplitAndSpillRangesDefinedByMemoryOperand(); data()->ResetSpillState(); @@ -3853,7 +3854,7 @@ void LinearScanAllocator::AllocateRegisters() { } if (!no_change_required) { - SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode); + SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode); ReloadLiveRanges(to_be_live, next_block_boundary); } @@ -3941,9 +3942,10 @@ void LinearScanAllocator::AddToActive(LiveRange* range) { void LinearScanAllocator::AddToInactive(LiveRange* range) { TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(), range->relative_id()); - inactive_live_ranges().push_back(range); next_inactive_ranges_change_ = std::min( next_inactive_ranges_change_, range->NextStartAfter(range->Start())); + DCHECK(range->HasRegisterAssigned()); + inactive_live_ranges(range->assigned_register()).insert(range); } void LinearScanAllocator::AddToUnhandled(LiveRange* range) { @@ -3966,30 +3968,36 @@ ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled( ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive( const ZoneVector<LiveRange*>::iterator it, LifetimePosition position) { LiveRange* range = *it; - inactive_live_ranges().push_back(range); TRACE("Moving live range %d:%d from active to inactive\n", (range)->TopLevel()->vreg(), range->relative_id()); + LifetimePosition next_active = range->NextStartAfter(position); next_inactive_ranges_change_ = - std::min(next_inactive_ranges_change_, range->NextStartAfter(position)); + std::min(next_inactive_ranges_change_, next_active); + DCHECK(range->HasRegisterAssigned()); + inactive_live_ranges(range->assigned_register()).insert(range); return active_live_ranges().erase(it); } -ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled( - ZoneVector<LiveRange*>::iterator it) { +LinearScanAllocator::InactiveLiveRangeQueue::iterator +LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) { + LiveRange* range = *it; TRACE("Moving live range %d:%d from inactive to handled\n", - (*it)->TopLevel()->vreg(), (*it)->relative_id()); - return inactive_live_ranges().erase(it); + range->TopLevel()->vreg(), range->relative_id()); + int reg = range->assigned_register(); + return inactive_live_ranges(reg).erase(it); } -ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive( - ZoneVector<LiveRange*>::iterator it, LifetimePosition position) { +LinearScanAllocator::InactiveLiveRangeQueue::iterator +LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it, + LifetimePosition position) { LiveRange* range = *it; active_live_ranges().push_back(range); TRACE("Moving live range %d:%d from inactive to active\n", range->TopLevel()->vreg(), range->relative_id()); next_active_ranges_change_ = std::min(next_active_ranges_change_, range->NextEndAfter(position)); - return inactive_live_ranges().erase(it); + int reg = range->assigned_register(); + return inactive_live_ranges(reg).erase(it); } void LinearScanAllocator::ForwardStateTo(LifetimePosition position) { @@ -4012,18 +4020,25 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) { if (position >= next_inactive_ranges_change_) { next_inactive_ranges_change_ = LifetimePosition::MaxPosition(); - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - LiveRange* cur_inactive = *it; - if (cur_inactive->End() <= position) { - it = InactiveToHandled(it); - } else if (cur_inactive->Covers(position)) { - it = InactiveToActive(it, position); - } else { - next_inactive_ranges_change_ = - std::min(next_inactive_ranges_change_, - cur_inactive->NextStartAfter(position)); - ++it; + for (int reg = 0; reg < num_registers(); ++reg) { + ZoneVector<LiveRange*> reorder(data()->allocation_zone()); + for (auto it = inactive_live_ranges(reg).begin(); + it != inactive_live_ranges(reg).end();) { + LiveRange* cur_inactive = *it; + if (cur_inactive->End() <= position) { + it = InactiveToHandled(it); + } else if (cur_inactive->Covers(position)) { + it = InactiveToActive(it, position); + } else { + next_inactive_ranges_change_ = + std::min(next_inactive_ranges_change_, + cur_inactive->NextStartAfter(position)); + it = inactive_live_ranges(reg).erase(it); + reorder.push_back(cur_inactive); + } + } + for (LiveRange* range : reorder) { + inactive_live_ranges(reg).insert(range); } } } @@ -4094,31 +4109,34 @@ void LinearScanAllocator::FindFreeRegistersForRange( } } - for (LiveRange* cur_inactive : inactive_live_ranges()) { - DCHECK(cur_inactive->End() > range->Start()); - int cur_reg = cur_inactive->assigned_register(); - // No need to carry out intersections, when this register won't be - // interesting to this range anyway. - // TODO(mtrofin): extend to aliased ranges, too. - if ((kSimpleFPAliasing || !check_fp_aliasing()) && - positions[cur_reg] < range->Start()) { - continue; - } - - LifetimePosition next_intersection = cur_inactive->FirstIntersection(range); - if (!next_intersection.IsValid()) continue; - if (kSimpleFPAliasing || !check_fp_aliasing()) { - positions[cur_reg] = Min(positions[cur_reg], next_intersection); - TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg), - Min(positions[cur_reg], next_intersection).value()); - } else { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases( - cur_inactive->representation(), cur_reg, rep, &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases--) { - int aliased_reg = alias_base_index + aliases; - positions[aliased_reg] = Min(positions[aliased_reg], next_intersection); + for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) { + for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) { + DCHECK_GT(cur_inactive->End(), range->Start()); + CHECK_EQ(cur_inactive->assigned_register(), cur_reg); + // No need to carry out intersections, when this register won't be + // interesting to this range anyway. + // TODO(mtrofin): extend to aliased ranges, too. + if ((kSimpleFPAliasing || !check_fp_aliasing()) && + positions[cur_reg] <= cur_inactive->NextStart()) { + break; + } + LifetimePosition next_intersection = + cur_inactive->FirstIntersection(range); + if (!next_intersection.IsValid()) continue; + if (kSimpleFPAliasing || !check_fp_aliasing()) { + positions[cur_reg] = std::min(positions[cur_reg], next_intersection); + TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg), + positions[cur_reg].value()); + } else { + int alias_base_index = -1; + int aliases = data()->config()->GetAliases( + cur_inactive->representation(), cur_reg, rep, &alias_base_index); + DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); + while (aliases--) { + int aliased_reg = alias_base_index + aliases; + positions[aliased_reg] = + std::min(positions[aliased_reg], next_intersection); + } } } } @@ -4337,46 +4355,46 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current, } } - for (LiveRange* range : inactive_live_ranges()) { - DCHECK(range->End() > current->Start()); - int cur_reg = range->assigned_register(); - bool is_fixed = range->TopLevel()->IsFixed(); - - // Don't perform costly intersections if they are guaranteed to not update - // block_pos or use_pos. - // TODO(mtrofin): extend to aliased ranges, too. - if ((kSimpleFPAliasing || !check_fp_aliasing())) { - if (is_fixed) { - if (block_pos[cur_reg] < range->Start()) continue; - } else { - if (use_pos[cur_reg] < range->Start()) continue; + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { + for (LiveRange* range : inactive_live_ranges(cur_reg)) { + DCHECK(range->End() > current->Start()); + DCHECK_EQ(range->assigned_register(), cur_reg); + bool is_fixed = range->TopLevel()->IsFixed(); + + // Don't perform costly intersections if they are guaranteed to not update + // block_pos or use_pos. + // TODO(mtrofin): extend to aliased ranges, too. + if ((kSimpleFPAliasing || !check_fp_aliasing())) { + DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]); + if (block_pos[cur_reg] <= range->NextStart()) break; + if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue; } - } - LifetimePosition next_intersection = range->FirstIntersection(current); - if (!next_intersection.IsValid()) continue; + LifetimePosition next_intersection = range->FirstIntersection(current); + if (!next_intersection.IsValid()) continue; - if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (is_fixed) { - block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); - use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); - } else { - use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); - } - } else { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases( - range->representation(), cur_reg, rep, &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases--) { - int aliased_reg = alias_base_index + aliases; + if (kSimpleFPAliasing || !check_fp_aliasing()) { if (is_fixed) { - block_pos[aliased_reg] = - Min(block_pos[aliased_reg], next_intersection); - use_pos[aliased_reg] = - Min(block_pos[aliased_reg], use_pos[aliased_reg]); + block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); + use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); } else { - use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection); + use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); + } + } else { + int alias_base_index = -1; + int aliases = data()->config()->GetAliases( + range->representation(), cur_reg, rep, &alias_base_index); + DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); + while (aliases--) { + int aliased_reg = alias_base_index + aliases; + if (is_fixed) { + block_pos[aliased_reg] = + Min(block_pos[aliased_reg], next_intersection); + use_pos[aliased_reg] = + Min(block_pos[aliased_reg], use_pos[aliased_reg]); + } else { + use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection); + } } } } @@ -4490,40 +4508,38 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current, it = ActiveToHandled(it); } - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - LiveRange* range = *it; - DCHECK(range->End() > current->Start()); - if (range->TopLevel()->IsFixed()) { - ++it; - continue; - } + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (range->assigned_register() != reg) { + if (cur_reg != reg) continue; + } + for (auto it = inactive_live_ranges(cur_reg).begin(); + it != inactive_live_ranges(cur_reg).end();) { + LiveRange* range = *it; + if (!kSimpleFPAliasing && check_fp_aliasing() && + !data()->config()->AreAliases(current->representation(), reg, + range->representation(), cur_reg)) { ++it; continue; } - } else { - if (!data()->config()->AreAliases(current->representation(), reg, - range->representation(), - range->assigned_register())) { + DCHECK(range->End() > current->Start()); + if (range->TopLevel()->IsFixed()) { ++it; continue; } - } - LifetimePosition next_intersection = range->FirstIntersection(current); - if (next_intersection.IsValid()) { - UsePosition* next_pos = range->NextRegisterPosition(current->Start()); - if (next_pos == nullptr) { - SpillAfter(range, split_pos, spill_mode); + LifetimePosition next_intersection = range->FirstIntersection(current); + if (next_intersection.IsValid()) { + UsePosition* next_pos = range->NextRegisterPosition(current->Start()); + if (next_pos == nullptr) { + SpillAfter(range, split_pos, spill_mode); + } else { + next_intersection = Min(next_intersection, next_pos->pos()); + SpillBetween(range, split_pos, next_intersection, spill_mode); + } + it = InactiveToHandled(it); } else { - next_intersection = Min(next_intersection, next_pos->pos()); - SpillBetween(range, split_pos, next_intersection, spill_mode); + ++it; } - it = InactiveToHandled(it); - } else { - ++it; } } } diff --git a/chromium/v8/src/compiler/backend/register-allocator.h b/chromium/v8/src/compiler/backend/register-allocator.h index 2396384e2b3..17d664e5077 100644 --- a/chromium/v8/src/compiler/backend/register-allocator.h +++ b/chromium/v8/src/compiler/backend/register-allocator.h @@ -630,9 +630,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) { bool ShouldBeAllocatedBefore(const LiveRange* other) const; bool CanCover(LifetimePosition position) const; bool Covers(LifetimePosition position) const; - LifetimePosition NextStartAfter(LifetimePosition position) const; + LifetimePosition NextStartAfter(LifetimePosition position); LifetimePosition NextEndAfter(LifetimePosition position) const; LifetimePosition FirstIntersection(LiveRange* other) const; + LifetimePosition NextStart() const { return next_start_; } void VerifyChildStructure() const { VerifyIntervals(); @@ -693,6 +694,8 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) { // Cache the last position splintering stopped at. mutable UsePosition* splitting_pointer_; LiveRangeBundle* bundle_ = nullptr; + // Next interval start, relative to the current linear scan position. + LifetimePosition next_start_; DISALLOW_COPY_AND_ASSIGN(LiveRange); }; @@ -1302,29 +1305,39 @@ class LinearScanAllocator final : public RegisterAllocator { LifetimePosition begin_pos, LiveRange* end_range); void MaybeUndoPreviousSplit(LiveRange* range); - void SpillNotLiveRanges( - RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) - LifetimePosition position, SpillMode spill_mode); + void SpillNotLiveRanges(RangeWithRegisterSet* to_be_live, + LifetimePosition position, SpillMode spill_mode); LiveRange* AssignRegisterOnReload(LiveRange* range, int reg); - void ReloadLiveRanges( - RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) - LifetimePosition position); + void ReloadLiveRanges(RangeWithRegisterSet const& to_be_live, + LifetimePosition position); void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block); bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred( const InstructionBlock* block); bool HasNonDeferredPredecessor(InstructionBlock* block); - struct LiveRangeOrdering { + struct UnhandledLiveRangeOrdering { bool operator()(const LiveRange* a, const LiveRange* b) const { return a->ShouldBeAllocatedBefore(b); } }; - using LiveRangeQueue = ZoneMultiset<LiveRange*, LiveRangeOrdering>; - LiveRangeQueue& unhandled_live_ranges() { return unhandled_live_ranges_; } + + struct InactiveLiveRangeOrdering { + bool operator()(const LiveRange* a, const LiveRange* b) const { + return a->NextStart() < b->NextStart(); + } + }; + + using UnhandledLiveRangeQueue = + ZoneMultiset<LiveRange*, UnhandledLiveRangeOrdering>; + using InactiveLiveRangeQueue = + ZoneMultiset<LiveRange*, InactiveLiveRangeOrdering>; + UnhandledLiveRangeQueue& unhandled_live_ranges() { + return unhandled_live_ranges_; + } ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; } - ZoneVector<LiveRange*>& inactive_live_ranges() { - return inactive_live_ranges_; + InactiveLiveRangeQueue& inactive_live_ranges(int reg) { + return inactive_live_ranges_[reg]; } void SetLiveRangeAssignedRegister(LiveRange* range, int reg); @@ -1337,10 +1350,10 @@ class LinearScanAllocator final : public RegisterAllocator { ZoneVector<LiveRange*>::iterator it); ZoneVector<LiveRange*>::iterator ActiveToInactive( ZoneVector<LiveRange*>::iterator it, LifetimePosition position); - ZoneVector<LiveRange*>::iterator InactiveToHandled( - ZoneVector<LiveRange*>::iterator it); - ZoneVector<LiveRange*>::iterator InactiveToActive( - ZoneVector<LiveRange*>::iterator it, LifetimePosition position); + InactiveLiveRangeQueue::iterator InactiveToHandled( + InactiveLiveRangeQueue::iterator it); + InactiveLiveRangeQueue::iterator InactiveToActive( + InactiveLiveRangeQueue::iterator it, LifetimePosition position); void ForwardStateTo(LifetimePosition position); @@ -1390,9 +1403,9 @@ class LinearScanAllocator final : public RegisterAllocator { void PrintRangeOverview(std::ostream& os); - LiveRangeQueue unhandled_live_ranges_; + UnhandledLiveRangeQueue unhandled_live_ranges_; ZoneVector<LiveRange*> active_live_ranges_; - ZoneVector<LiveRange*> inactive_live_ranges_; + ZoneVector<InactiveLiveRangeQueue> inactive_live_ranges_; // Approximate at what position the set of ranges will change next. // Used to avoid scanning for updates even if none are present. diff --git a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc index 4c2d862fc44..d0f97eca57b 100644 --- a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -1246,9 +1246,8 @@ void AdjustStackPointerForTailCall( } } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, Instruction* instr, - S390OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, + S390OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { diff --git a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc index 7f3277fc68d..7b002fe6d3b 100644 --- a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -436,68 +435,64 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode, #endif template <class CanCombineWithLoad> -void GenerateRightOperands( - InstructionSelector* selector, Node* node, Node* right, - InstructionCode& opcode, // NOLINT(runtime/references) - OperandModes& operand_mode, // NOLINT(runtime/references) - InstructionOperand* inputs, - size_t& input_count, // NOLINT(runtime/references) - CanCombineWithLoad canCombineWithLoad) { +void GenerateRightOperands(InstructionSelector* selector, Node* node, + Node* right, InstructionCode* opcode, + OperandModes* operand_mode, + InstructionOperand* inputs, size_t* input_count, + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); - if ((operand_mode & OperandMode::kAllowImmediate) && - g.CanBeImmediate(right, operand_mode)) { - inputs[input_count++] = g.UseImmediate(right); + if ((*operand_mode & OperandMode::kAllowImmediate) && + g.CanBeImmediate(right, *operand_mode)) { + inputs[(*input_count)++] = g.UseImmediate(right); // Can only be RI or RRI - operand_mode &= OperandMode::kAllowImmediate; - } else if (operand_mode & OperandMode::kAllowMemoryOperand) { + *operand_mode &= OperandMode::kAllowImmediate; + } else if (*operand_mode & OperandMode::kAllowMemoryOperand) { NodeMatcher mright(right); if (mright.IsLoad() && selector->CanCover(node, right) && canCombineWithLoad(SelectLoadOpcode(right))) { AddressingMode mode = g.GetEffectiveAddressMemoryOperand( - right, inputs, &input_count, OpcodeImmMode(opcode)); - opcode |= AddressingModeField::encode(mode); - operand_mode &= ~OperandMode::kAllowImmediate; - if (operand_mode & OperandMode::kAllowRM) - operand_mode &= ~OperandMode::kAllowDistinctOps; - } else if (operand_mode & OperandMode::kAllowRM) { - DCHECK(!(operand_mode & OperandMode::kAllowRRM)); - inputs[input_count++] = g.UseAnyExceptImmediate(right); + right, inputs, input_count, OpcodeImmMode(*opcode)); + *opcode |= AddressingModeField::encode(mode); + *operand_mode &= ~OperandMode::kAllowImmediate; + if (*operand_mode & OperandMode::kAllowRM) + *operand_mode &= ~OperandMode::kAllowDistinctOps; + } else if (*operand_mode & OperandMode::kAllowRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); // Can not be Immediate - operand_mode &= + *operand_mode &= ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps; - } else if (operand_mode & OperandMode::kAllowRRM) { - DCHECK(!(operand_mode & OperandMode::kAllowRM)); - inputs[input_count++] = g.UseAnyExceptImmediate(right); + } else if (*operand_mode & OperandMode::kAllowRRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); // Can not be Immediate - operand_mode &= ~OperandMode::kAllowImmediate; + *operand_mode &= ~OperandMode::kAllowImmediate; } else { UNREACHABLE(); } } else { - inputs[input_count++] = g.UseRegister(right); + inputs[(*input_count)++] = g.UseRegister(right); // Can only be RR or RRR - operand_mode &= OperandMode::kAllowRRR; + *operand_mode &= OperandMode::kAllowRRR; } } template <class CanCombineWithLoad> -void GenerateBinOpOperands( - InstructionSelector* selector, Node* node, Node* left, Node* right, - InstructionCode& opcode, // NOLINT(runtime/references) - OperandModes& operand_mode, // NOLINT(runtime/references) - InstructionOperand* inputs, - size_t& input_count, // NOLINT(runtime/references) - CanCombineWithLoad canCombineWithLoad) { +void GenerateBinOpOperands(InstructionSelector* selector, Node* node, + Node* left, Node* right, InstructionCode* opcode, + OperandModes* operand_mode, + InstructionOperand* inputs, size_t* input_count, + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); // left is always register InstructionOperand const left_input = g.UseRegister(left); - inputs[input_count++] = left_input; + inputs[(*input_count)++] = left_input; if (left == right) { - inputs[input_count++] = left_input; + inputs[(*input_count)++] = left_input; // Can only be RR or RRR - operand_mode &= OperandMode::kAllowRRR; + *operand_mode &= OperandMode::kAllowRRR; } else { GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs, input_count, canCombineWithLoad); @@ -575,8 +570,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node, size_t output_count = 0; Node* input = node->InputAt(0); - GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs, - input_count, canCombineWithLoad); + GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs, + &input_count, canCombineWithLoad); bool input_is_word32 = ProduceWord32Result(input); @@ -631,8 +626,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node, std::swap(left, right); } - GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode, - inputs, input_count, canCombineWithLoad); + GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode, + inputs, &input_count, canCombineWithLoad); bool left_is_word32 = ProduceWord32Result(left); @@ -1175,6 +1170,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + // TODO(miladfar): Implement the s390 selector for reversing SIMD bytes. + // Check if the input node is a Load and do a Load Reverse at once. + UNIMPLEMENTED(); +} + template <class Matcher, ArchOpcode neg_opcode> static inline bool TryMatchNegFromSub(InstructionSelector* selector, Node* node) { @@ -2691,6 +2692,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } diff --git a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc index a4f82b153b6..44da872f26d 100644 --- a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -361,7 +361,6 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap { void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i, // NOLINT(runtime/references) int pc) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); @@ -370,9 +369,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, } } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + X64OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -1876,30 +1875,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg); break; case kX64Movsxbl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxbq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbq); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbq); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movb: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { @@ -1911,29 +1910,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movsxwl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxwl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxwl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxwq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxwq); break; case kX64Movzxwq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwq); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movw: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { @@ -1945,7 +1944,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { if (HasAddressingMode(instr)) { __ movl(i.OutputRegister(), i.MemoryOperand()); @@ -1969,7 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxlq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxlq); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; @@ -2021,7 +2020,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ movq(i.OutputRegister(), i.MemoryOperand()); } else { @@ -2036,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movss: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ Movss(i.OutputDoubleRegister(), i.MemoryOperand()); } else { @@ -2046,7 +2045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kX64Movsd: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { const MemoryAccessMode access_mode = static_cast<MemoryAccessMode>(MiscField::decode(opcode)); @@ -2069,7 +2068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64Movdqu: { CpuFeatureScope sse_scope(tasm(), SSSE3); - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand()); } else { @@ -2293,6 +2292,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ movq(i.OutputDoubleRegister(), kScratchRegister); break; } + case kX64F64x2Sqrt: { + __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kX64F64x2Add: { ASSEMBLE_SSE_BINOP(addpd); break; @@ -2350,22 +2353,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64F64x2Eq: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Ne: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Lt: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Le: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Qfma: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movapd(tmp, i.InputSimd128Register(2)); + __ mulpd(tmp, i.InputSimd128Register(1)); + __ addpd(i.OutputSimd128Register(), tmp); + } + break; + } + case kX64F64x2Qfms: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movapd(tmp, i.InputSimd128Register(2)); + __ mulpd(tmp, i.InputSimd128Register(1)); + __ subpd(i.OutputSimd128Register(), tmp); + } break; } // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below @@ -2445,6 +2474,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kX64F32x4Sqrt: { + __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kX64F32x4RecipApprox: { __ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; @@ -2538,6 +2571,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } + case kX64F32x4Qfma: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movaps(tmp, i.InputSimd128Register(2)); + __ mulps(tmp, i.InputSimd128Register(1)); + __ addps(i.OutputSimd128Register(), tmp); + } + break; + } + case kX64F32x4Qfms: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movaps(tmp, i.InputSimd128Register(2)); + __ mulps(tmp, i.InputSimd128Register(1)); + __ subps(i.OutputSimd128Register(), tmp); + } + break; + } case kX64I64x2Splat: { CpuFeatureScope sse_scope(tasm(), SSE3); XMMRegister dst = i.OutputSimd128Register(); @@ -2577,7 +2636,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 8. + __ andq(shift, Immediate(63)); + __ movq(tmp, shift); __ psllq(i.OutputSimd128Register(), tmp); break; } @@ -2588,6 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); Register tmp = i.ToRegister(instr->TempAt(0)); + // Modulo 64 not required as sarq_cl will mask cl to 6 bits. // lower quadword __ pextrq(tmp, src, 0x0); @@ -2640,15 +2703,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (CpuFeatures::IsSupported(SSE4_2)) { CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(1); + XMMRegister src0 = i.InputSimd128Register(0); + XMMRegister src1 = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); - DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); - __ movaps(tmp, src); - __ pcmpgtq(src, dst); - __ blendvpd(tmp, dst); // implicit use of xmm0 as mask - __ movaps(dst, tmp); + __ movaps(tmp, src1); + __ pcmpgtq(tmp, src0); + __ movaps(dst, src1); + __ blendvpd(dst, src0); // implicit use of xmm0 as mask } else { CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); @@ -2689,11 +2752,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); __ movaps(tmp, src); - __ pcmpgtq(src, dst); - __ blendvpd(dst, tmp); // implicit use of xmm0 as mask + __ pcmpgtq(tmp, dst); + __ blendvpd(dst, src); // implicit use of xmm0 as mask break; } case kX64I64x2Eq: { @@ -2732,7 +2795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 64. + __ andq(shift, Immediate(63)); + __ movq(tmp, shift); __ psrlq(i.OutputSimd128Register(), tmp); break; } @@ -2740,24 +2806,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(1); - XMMRegister src_tmp = i.TempSimd128Register(0); - XMMRegister dst_tmp = i.TempSimd128Register(1); - DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + XMMRegister src0 = i.InputSimd128Register(0); + XMMRegister src1 = i.InputSimd128Register(1); + XMMRegister tmp0 = i.TempSimd128Register(0); + XMMRegister tmp1 = i.TempSimd128Register(1); + DCHECK_EQ(tmp1, xmm0); - __ movaps(src_tmp, src); - __ movaps(dst_tmp, dst); + __ movaps(dst, src1); + __ movaps(tmp0, src0); - __ pcmpeqd(src, src); - __ psllq(src, 63); + __ pcmpeqd(tmp1, tmp1); + __ psllq(tmp1, 63); - __ pxor(dst_tmp, src); - __ pxor(src, src_tmp); + __ pxor(tmp0, tmp1); + __ pxor(tmp1, dst); - __ pcmpgtq(src, dst_tmp); - __ blendvpd(src_tmp, dst); // implicit use of xmm0 as mask - __ movaps(dst, src_tmp); + __ pcmpgtq(tmp1, tmp0); + __ blendvpd(dst, src0); // implicit use of xmm0 as mask break; } case kX64I64x2MaxU: { @@ -2765,22 +2830,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - XMMRegister src_tmp = i.TempSimd128Register(0); - XMMRegister dst_tmp = i.TempSimd128Register(1); + XMMRegister dst_tmp = i.TempSimd128Register(0); + XMMRegister tmp = i.TempSimd128Register(1); DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); - __ movaps(src_tmp, src); __ movaps(dst_tmp, dst); - __ pcmpeqd(src, src); - __ psllq(src, 63); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); - __ pxor(dst_tmp, src); - __ pxor(src, src_tmp); + __ pxor(dst_tmp, tmp); + __ pxor(tmp, src); - __ pcmpgtq(src, dst_tmp); - __ blendvpd(dst, src_tmp); // implicit use of xmm0 as mask + __ pcmpgtq(tmp, dst_tmp); + __ blendvpd(dst, src); // implicit use of xmm0 as mask break; } case kX64I64x2GtU: { @@ -2820,11 +2884,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); if (HasRegisterInput(instr, 0)) { - __ movd(dst, i.InputRegister(0)); + __ Movd(dst, i.InputRegister(0)); } else { - __ movd(dst, i.InputOperand(0)); + __ Movd(dst, i.InputOperand(0)); } - __ pshufd(dst, dst, 0x0); + __ Pshufd(dst, dst, 0x0); break; } case kX64I32x4ExtractLane: { @@ -2878,28 +2942,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); if (dst == src) { - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psignd(dst, kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psignd(dst, kScratchDoubleReg); } else { - __ pxor(dst, dst); - __ psubd(dst, src); + __ Pxor(dst, dst); + __ Psubd(dst, src); } break; } case kX64I32x4Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ pslld(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Pslld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4ShrS: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ psrad(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Psrad(i.OutputSimd128Register(), tmp); break; } case kX64I32x4Add: { - __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4AddHoriz: { @@ -2908,45 +2978,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I32x4Sub: { - __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Mul: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MinS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MaxS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Eq: { - __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Ne: { XMMRegister tmp = i.TempSimd128Register(0); - __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqd(tmp, tmp); - __ pxor(i.OutputSimd128Register(), tmp); + __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpeqd(tmp, tmp); + __ Pxor(i.OutputSimd128Register(), tmp); break; } case kX64I32x4GtS: { - __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4GeS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - __ pminsd(dst, src); - __ pcmpeqd(dst, src); + __ Pminsd(dst, src); + __ Pcmpeqd(dst, src); break; } case kX64I32x4UConvertF32x4: { @@ -2992,18 +3062,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I32x4ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ psrld(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Psrld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4MinU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MaxU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4GtU: { @@ -3011,18 +3084,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); - __ pmaxud(dst, src); - __ pcmpeqd(dst, src); - __ pcmpeqd(tmp, tmp); - __ pxor(dst, tmp); + __ Pmaxud(dst, src); + __ Pcmpeqd(dst, src); + __ Pcmpeqd(tmp, tmp); + __ Pxor(dst, tmp); break; } case kX64I32x4GeU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - __ pminud(dst, src); - __ pcmpeqd(dst, src); + __ Pminud(dst, src); + __ Pcmpeqd(dst, src); break; } case kX64S128Zero: { @@ -3044,17 +3117,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I16x8ExtractLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); Register dst = i.OutputRegister(); - __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsxwl(dst, dst); + __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); break; } case kX64I16x8ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); if (HasRegisterInput(instr, 2)) { - __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2), + __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { - __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); } break; } @@ -3085,13 +3157,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psllw(i.OutputSimd128Register(), tmp); break; } case kX64I16x8ShrS: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psraw(i.OutputSimd128Register(), tmp); break; } @@ -3173,7 +3251,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psrlw(i.OutputSimd128Register(), tmp); break; } @@ -3230,28 +3311,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSSE3); XMMRegister dst = i.OutputSimd128Register(); if (HasRegisterInput(instr, 0)) { - __ movd(dst, i.InputRegister(0)); + __ Movd(dst, i.InputRegister(0)); } else { - __ movd(dst, i.InputOperand(0)); + __ Movd(dst, i.InputOperand(0)); } - __ xorps(kScratchDoubleReg, kScratchDoubleReg); - __ pshufb(dst, kScratchDoubleReg); + __ Xorps(kScratchDoubleReg, kScratchDoubleReg); + __ Pshufb(dst, kScratchDoubleReg); break; } case kX64I8x16ExtractLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); Register dst = i.OutputRegister(); - __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsxbl(dst, dst); + __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); break; } case kX64I8x16ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); if (HasRegisterInput(instr, 2)) { - __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2), + __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { - __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); } break; } @@ -3279,15 +3359,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // Temp registers for shift mask andadditional moves to XMM registers. Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + Register shift = i.InputRegister(1); // Mask off the unwanted bits before word-shifting. __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(shift, Immediate(7)); + __ movq(tmp, shift); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); __ packuswb(kScratchDoubleReg, kScratchDoubleReg); __ pand(dst, kScratchDoubleReg); - __ movq(tmp_simd, i.InputRegister(1)); + __ movq(tmp_simd, shift); __ psllw(dst, tmp_simd); break; } @@ -3302,6 +3385,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpcklbw(dst, dst); // Prepare shift value __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(tmp, Immediate(7)); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psraw(kScratchDoubleReg, tmp_simd); @@ -3414,6 +3499,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpcklbw(dst, dst); // Prepare shift value __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(tmp, Immediate(7)); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); @@ -3422,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I8x16AddSaturateU: { - __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I8x16SubSaturateU: { @@ -3487,10 +3574,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64S128Select: { // Mask used here is stored in dst. XMMRegister dst = i.OutputSimd128Register(); - __ movaps(kScratchDoubleReg, i.InputSimd128Register(1)); - __ xorps(kScratchDoubleReg, i.InputSimd128Register(2)); - __ andps(dst, kScratchDoubleReg); - __ xorps(dst, i.InputSimd128Register(2)); + __ Movaps(kScratchDoubleReg, i.InputSimd128Register(1)); + __ Xorps(kScratchDoubleReg, i.InputSimd128Register(2)); + __ Andps(dst, kScratchDoubleReg); + __ Xorps(dst, i.InputSimd128Register(2)); + break; + } + case kX64S8x16Swizzle: { + CpuFeatureScope sse_scope(tasm(), SSSE3); + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister mask = i.TempSimd128Register(0); + + // Out-of-range indices should return 0, add 112 so that any value > 15 + // saturates to 128 (top bit set), so pshufb will zero that lane. + __ Move(mask, static_cast<uint32_t>(0x70707070)); + __ Pshufd(mask, mask, 0x0); + __ Paddusb(mask, i.InputSimd128Register(1)); + __ Pshufb(dst, mask); break; } case kX64S8x16Shuffle: { @@ -3507,10 +3608,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SetupShuffleMaskOnStack(tasm(), mask); - __ pshufb(dst, Operand(rsp, 0)); + __ Pshufb(dst, Operand(rsp, 0)); } else { // two input operands DCHECK_EQ(6, instr->InputCount()); - ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 0); + ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 0); uint32_t mask[4] = {}; for (int j = 5; j > 1; j--) { uint32_t lanes = i.InputUint32(j); @@ -3520,13 +3621,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } } SetupShuffleMaskOnStack(tasm(), mask); - __ pshufb(kScratchDoubleReg, Operand(rsp, 0)); + __ Pshufb(kScratchDoubleReg, Operand(rsp, 0)); uint32_t mask1[4] = {}; if (instr->InputAt(1)->IsSimd128Register()) { XMMRegister src1 = i.InputSimd128Register(1); if (src1 != dst) __ movups(dst, src1); } else { - __ movups(dst, i.InputOperand(1)); + __ Movups(dst, i.InputOperand(1)); } for (int j = 5; j > 1; j--) { uint32_t lanes = i.InputUint32(j); @@ -3536,8 +3637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } } SetupShuffleMaskOnStack(tasm(), mask1); - __ pshufb(dst, Operand(rsp, 0)); - __ por(dst, kScratchDoubleReg); + __ Pshufb(dst, Operand(rsp, 0)); + __ Por(dst, kScratchDoubleReg); } __ movq(rsp, tmp); break; diff --git a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h index 8a0a45a916a..e390c6922c8 100644 --- a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h +++ b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h @@ -160,6 +160,7 @@ namespace compiler { V(X64F64x2ReplaceLane) \ V(X64F64x2Abs) \ V(X64F64x2Neg) \ + V(X64F64x2Sqrt) \ V(X64F64x2Add) \ V(X64F64x2Sub) \ V(X64F64x2Mul) \ @@ -170,6 +171,8 @@ namespace compiler { V(X64F64x2Ne) \ V(X64F64x2Lt) \ V(X64F64x2Le) \ + V(X64F64x2Qfma) \ + V(X64F64x2Qfms) \ V(X64F32x4Splat) \ V(X64F32x4ExtractLane) \ V(X64F32x4ReplaceLane) \ @@ -177,6 +180,7 @@ namespace compiler { V(X64F32x4UConvertI32x4) \ V(X64F32x4Abs) \ V(X64F32x4Neg) \ + V(X64F32x4Sqrt) \ V(X64F32x4RecipApprox) \ V(X64F32x4RecipSqrtApprox) \ V(X64F32x4Add) \ @@ -190,6 +194,8 @@ namespace compiler { V(X64F32x4Ne) \ V(X64F32x4Lt) \ V(X64F32x4Le) \ + V(X64F32x4Qfma) \ + V(X64F32x4Qfms) \ V(X64I64x2Splat) \ V(X64I64x2ExtractLane) \ V(X64I64x2ReplaceLane) \ @@ -300,6 +306,7 @@ namespace compiler { V(X64S128Or) \ V(X64S128Xor) \ V(X64S128Select) \ + V(X64S8x16Swizzle) \ V(X64S8x16Shuffle) \ V(X64S32x4Swizzle) \ V(X64S32x4Shuffle) \ diff --git a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc index e9fa450c382..28a935fd916 100644 --- a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F64x2ReplaceLane: case kX64F64x2Abs: case kX64F64x2Neg: + case kX64F64x2Sqrt: case kX64F64x2Add: case kX64F64x2Sub: case kX64F64x2Mul: @@ -139,6 +140,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F64x2Ne: case kX64F64x2Lt: case kX64F64x2Le: + case kX64F64x2Qfma: + case kX64F64x2Qfms: case kX64F32x4Splat: case kX64F32x4ExtractLane: case kX64F32x4ReplaceLane: @@ -148,6 +151,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4RecipSqrtApprox: case kX64F32x4Abs: case kX64F32x4Neg: + case kX64F32x4Sqrt: case kX64F32x4Add: case kX64F32x4AddHoriz: case kX64F32x4Sub: @@ -159,6 +163,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4Ne: case kX64F32x4Lt: case kX64F32x4Le: + case kX64F32x4Qfma: + case kX64F32x4Qfms: case kX64I64x2Splat: case kX64I64x2ExtractLane: case kX64I64x2ReplaceLane: @@ -275,6 +281,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64S1x4AllTrue: case kX64S1x8AnyTrue: case kX64S1x8AllTrue: + case kX64S8x16Swizzle: case kX64S8x16Shuffle: case kX64S32x4Swizzle: case kX64S32x4Shuffle: diff --git a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc index 5379074bac8..f5d05fdd85a 100644 --- a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -4,7 +4,7 @@ #include <algorithm> -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/overflowing-math.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -250,9 +250,21 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { #else UNREACHABLE(); #endif +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kX64MovqDecompressTaggedSigned; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kX64MovqDecompressTaggedPointer; + break; + case MachineRepresentation::kTagged: + opcode = kX64MovqDecompressAnyTagged; + break; +#else case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. +#endif case MachineRepresentation::kWord64: opcode = kX64Movq; break; @@ -288,7 +300,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) { #endif case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTagged: + return kX64MovqCompressTagged; case MachineRepresentation::kWord64: return kX64Movq; case MachineRepresentation::kSimd128: // Fall through. @@ -875,6 +888,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitInt32Add(Node* node) { X64OperandGenerator g(this); @@ -1843,17 +1860,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, node->op()->HasProperty(Operator::kCommutative)); } -// Shared routine for 64-bit word comparison operations. -void VisitWord64Compare(InstructionSelector* selector, Node* node, - FlagsContinuation* cont) { - X64OperandGenerator g(selector); +void VisitWord64EqualImpl(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { if (selector->CanUseRootsRegister()) { + X64OperandGenerator g(selector); const RootsTable& roots_table = selector->isolate()->roots_table(); RootIndex root_index; HeapObjectBinopMatcher m(node); if (m.right().HasValue() && roots_table.IsRootHandle(m.right().Value(), &root_index)) { - if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); InstructionCode opcode = kX64Cmp | AddressingModeField::encode(kMode_Root); return VisitCompare( @@ -1861,18 +1876,30 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node, g.TempImmediate( TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(m.left().node()), cont); - } else if (m.left().HasValue() && - roots_table.IsRootHandle(m.left().Value(), &root_index)) { + } + } + VisitWordCompare(selector, node, kX64Cmp, cont); +} + +void VisitWord32EqualImpl(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + if (COMPRESS_POINTERS_BOOL && selector->CanUseRootsRegister()) { + X64OperandGenerator g(selector); + const RootsTable& roots_table = selector->isolate()->roots_table(); + RootIndex root_index; + CompressedHeapObjectBinopMatcher m(node); + if (m.right().HasValue() && + roots_table.IsRootHandle(m.right().Value(), &root_index)) { InstructionCode opcode = - kX64Cmp | AddressingModeField::encode(kMode_Root); + kX64Cmp32 | AddressingModeField::encode(kMode_Root); return VisitCompare( selector, opcode, g.TempImmediate( TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), - g.UseRegister(m.right().node()), cont); + g.UseRegister(m.left().node()), cont); } } - VisitWordCompare(selector, node, kX64Cmp, cont); + VisitWordCompare(selector, node, kX64Cmp32, cont); } // Shared routine for comparison with zero. @@ -2048,7 +2075,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, switch (value->opcode()) { case IrOpcode::kWord32Equal: cont->OverwriteAndNegateIfEqual(kEqual); - return VisitWordCompare(this, value, kX64Cmp32, cont); + return VisitWord32EqualImpl(this, value, cont); case IrOpcode::kInt32LessThan: cont->OverwriteAndNegateIfEqual(kSignedLessThan); return VisitWordCompare(this, value, kX64Cmp32, cont); @@ -2071,7 +2098,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, if (CanCover(user, value)) { switch (value->opcode()) { case IrOpcode::kInt64Sub: - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kWord64And: return VisitWordCompare(this, value, kX64Test, cont); default: @@ -2080,20 +2107,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, } return VisitCompareZero(this, user, value, kX64Cmp, cont); } - return VisitWord64Compare(this, value, cont); + return VisitWord64EqualImpl(this, value, cont); } case IrOpcode::kInt64LessThan: cont->OverwriteAndNegateIfEqual(kSignedLessThan); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kInt64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kUint64LessThan: cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kUint64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kFloat32Equal: cont->OverwriteAndNegateIfEqual(kUnorderedEqual); return VisitFloat32Compare(this, value, cont); @@ -2221,7 +2248,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) { if (m.right().Is(0)) { return VisitWordCompareZero(m.node(), m.left().node(), &cont); } - VisitWordCompare(this, node, kX64Cmp32, &cont); + VisitWord32EqualImpl(this, node, &cont); } void InstructionSelector::VisitInt32LessThan(Node* node) { @@ -2246,7 +2273,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { VisitWordCompare(this, node, kX64Cmp32, &cont); } -void InstructionSelector::VisitWord64Equal(Node* const node) { +void InstructionSelector::VisitWord64Equal(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); Int64BinopMatcher m(node); if (m.right().Is(0)) { @@ -2256,7 +2283,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) { if (CanCover(user, value)) { switch (value->opcode()) { case IrOpcode::kInt64Sub: - return VisitWord64Compare(this, value, &cont); + return VisitWordCompare(this, value, kX64Cmp, &cont); case IrOpcode::kWord64And: return VisitWordCompare(this, value, kX64Test, &cont); default: @@ -2264,7 +2291,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) { } } } - VisitWord64Compare(this, node, &cont); + VisitWord64EqualImpl(this, node, &cont); } void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { @@ -2287,24 +2314,24 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { void InstructionSelector::VisitInt64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitUint64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitFloat32Equal(Node* node) { @@ -2685,9 +2712,11 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16GtU) #define SIMD_UNOP_LIST(V) \ + V(F64x2Sqrt) \ V(F32x4SConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ V(I64x2Neg) \ @@ -2872,6 +2901,27 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { g.UseRegister(node->InputAt(0))); } +#define VISIT_SIMD_QFMOP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + if (CpuFeatures::IsSupported(FMA3)) { \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2))); \ + } else { \ + InstructionOperand temps[] = {g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2)), arraysize(temps), temps); \ + } \ + } +VISIT_SIMD_QFMOP(F64x2Qfma) +VISIT_SIMD_QFMOP(F64x2Qfms) +VISIT_SIMD_QFMOP(F32x4Qfma) +VISIT_SIMD_QFMOP(F32x4Qfms) +#undef VISIT_SIMD_QFMOP + void InstructionSelector::VisitI64x2ShrS(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempRegister()}; @@ -2893,10 +2943,10 @@ void InstructionSelector::VisitI64x2Mul(Node* node) { void InstructionSelector::VisitI64x2MinS(Node* node) { X64OperandGenerator g(this); if (this->IsSupported(SSE4_2)) { - InstructionOperand temps[] = {g.TempSimd128Register()}; - Emit(kX64I64x2MinS, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), - arraysize(temps), temps); + InstructionOperand temps[] = {g.TempFpRegister(xmm0)}; + Emit(kX64I64x2MinS, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } else { InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(), g.TempRegister()}; @@ -2908,27 +2958,27 @@ void InstructionSelector::VisitI64x2MinS(Node* node) { void InstructionSelector::VisitI64x2MaxS(Node* node) { X64OperandGenerator g(this); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempFpRegister(xmm0)}; Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } void InstructionSelector::VisitI64x2MinU(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempSimd128Register(), - g.TempSimd128Register()}; - Emit(kX64I64x2MinU, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), - arraysize(temps), temps); + g.TempFpRegister(xmm0)}; + Emit(kX64I64x2MinU, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } void InstructionSelector::VisitI64x2MaxU(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempSimd128Register(), - g.TempSimd128Register()}; + g.TempFpRegister(xmm0)}; Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } @@ -3256,6 +3306,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kX64S8x16Swizzle, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), + arraysize(temps), temps); +} + // static MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc index b1051be5719..17472a305dc 100644 --- a/chromium/v8/src/compiler/bytecode-graph-builder.cc +++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc @@ -24,7 +24,7 @@ #include "src/objects/literal-objects-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/smi.h" -#include "src/objects/template-objects-inl.h" +#include "src/objects/template-objects.h" namespace v8 { namespace internal { @@ -215,6 +215,9 @@ class BytecodeGraphBuilder { FeedbackSlot slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct( const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot); + JSTypeHintLowering::LoweringResult TryBuildSimplifiedGetIterator( + const Operator* op, Node* receiver, FeedbackSlot load_slot, + FeedbackSlot call_slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed( const Operator* op, Node* receiver, FeedbackSlot slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed( @@ -945,7 +948,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( bytecode_array().parameter_count(), bytecode_array().register_count(), shared_info.object())), bytecode_iterator_( - base::make_unique<OffHeapBytecodeArray>(bytecode_array())), + std::make_unique<OffHeapBytecodeArray>(bytecode_array())), bytecode_analysis_(broker_->GetBytecodeAnalysis( bytecode_array().object(), osr_offset, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, @@ -971,12 +974,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( if (FLAG_concurrent_inlining) { // With concurrent inlining on, the source position address doesn't change // because it's been copied from the heap. - source_position_iterator_ = base::make_unique<SourcePositionTableIterator>( + source_position_iterator_ = std::make_unique<SourcePositionTableIterator>( Vector<const byte>(bytecode_array().source_positions_address(), bytecode_array().source_positions_size())); } else { // Otherwise, we need to access the table through a handle. - source_position_iterator_ = base::make_unique<SourcePositionTableIterator>( + source_position_iterator_ = std::make_unique<SourcePositionTableIterator>( handle(bytecode_array().object()->SourcePositionTableIfCollected(), isolate())); } @@ -2087,12 +2090,13 @@ void BytecodeGraphBuilder::VisitCloneObject() { void BytecodeGraphBuilder::VisitGetTemplateObject() { DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); - FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - ObjectRef description( + FeedbackSource source = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); + TemplateObjectDescriptionRef description( broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); - JSArrayRef template_object = - shared_info().GetTemplateObject(description, feedback_vector(), slot); - environment()->BindAccumulator(jsgraph()->Constant(template_object)); + Node* template_object = NewNode(javascript()->GetTemplateObject( + description.object(), shared_info().object(), source)); + environment()->BindAccumulator(template_object); } Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters( @@ -3297,19 +3301,21 @@ void BytecodeGraphBuilder::VisitForInStep() { void BytecodeGraphBuilder::VisitGetIterator() { PrepareEagerCheckpoint(); - Node* object = + Node* receiver = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - FeedbackSource feedback = + FeedbackSource load_feedback = CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); - const Operator* op = javascript()->GetIterator(feedback); + FeedbackSource call_feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); + const Operator* op = javascript()->GetIterator(load_feedback, call_feedback); - JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedLoadNamed(op, object, feedback.slot); + JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedGetIterator( + op, receiver, load_feedback.slot, call_feedback.slot); if (lowering.IsExit()) return; DCHECK(!lowering.Changed()); - Node* node = NewNode(op, object); - environment()->BindAccumulator(node, Environment::kAttachFrameState); + Node* iterator = NewNode(op, receiver); + environment()->BindAccumulator(iterator, Environment::kAttachFrameState); } void BytecodeGraphBuilder::VisitSuspendGenerator() { @@ -3776,6 +3782,20 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op, } JSTypeHintLowering::LoweringResult +BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op, + Node* receiver, + FeedbackSlot load_slot, + FeedbackSlot call_slot) { + Node* effect = environment()->GetEffectDependency(); + Node* control = environment()->GetControlDependency(); + JSTypeHintLowering::LoweringResult early_reduction = + type_hint_lowering().ReduceGetIteratorOperation( + op, receiver, effect, control, load_slot, call_slot); + ApplyEarlyReduction(early_reduction); + return early_reduction; +} + +JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver, FeedbackSlot slot) { diff --git a/chromium/v8/src/compiler/c-linkage.cc b/chromium/v8/src/compiler/c-linkage.cc index 428ba058a7f..4c576b771ac 100644 --- a/chromium/v8/src/compiler/c-linkage.cc +++ b/chromium/v8/src/compiler/c-linkage.cc @@ -27,7 +27,7 @@ namespace { // == x64 ==================================================================== // =========================================================================== -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // == x64 windows ============================================================ #define STACK_SHADOW_WORDS 4 #define PARAM_REGISTERS rcx, rdx, r8, r9 @@ -39,12 +39,12 @@ namespace { (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \ (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \ (1 << xmm15.code()) -#else +#else // V8_TARGET_OS_WIN // == x64 other ============================================================== #define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9 #define CALLEE_SAVE_REGISTERS \ rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit() -#endif +#endif // V8_TARGET_OS_WIN #elif V8_TARGET_ARCH_ARM // =========================================================================== diff --git a/chromium/v8/src/compiler/code-assembler.cc b/chromium/v8/src/compiler/code-assembler.cc index 4f180114631..5b89e1b663e 100644 --- a/chromium/v8/src/compiler/code-assembler.cc +++ b/chromium/v8/src/compiler/code-assembler.cc @@ -29,6 +29,7 @@ namespace internal { constexpr MachineType MachineTypeOf<Smi>::value; constexpr MachineType MachineTypeOf<Object>::value; +constexpr MachineType MachineTypeOf<MaybeObject>::value; namespace compiler { @@ -1349,8 +1350,8 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor, Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, - size_t result_size, Node* target, - SloppyTNode<Object> context, + size_t result_size, TNode<Object> target, + TNode<Object> context, std::initializer_list<Node*> args) { DCHECK(call_mode == StubCallMode::kCallCodeObject || call_mode == StubCallMode::kCallBuiltinPointer); @@ -1369,7 +1370,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode, inputs.data()); } -Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl( +void CodeAssembler::TailCallStubThenBytecodeDispatchImpl( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, std::initializer_list<Node*> args) { constexpr size_t kMaxNumArgs = 6; @@ -1389,33 +1390,33 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl( for (auto arg : args) inputs.Add(arg); inputs.Add(context); - return raw_assembler()->TailCallN(call_descriptor, inputs.size(), - inputs.data()); + raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data()); } template <class... TArgs> -Node* CodeAssembler::TailCallBytecodeDispatch( - const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) { +void CodeAssembler::TailCallBytecodeDispatch( + const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target, + TArgs... args) { DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args)); auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor( zone(), descriptor, descriptor.GetStackParameterCount()); Node* nodes[] = {target, args...}; CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes)); - return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); + raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); } // Instantiate TailCallBytecodeDispatch() for argument counts used by // CSA-generated code -template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch( - const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*, - Node*, Node*); - -TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code, - TNode<Context> context, - TNode<JSFunction> function, - TNode<Object> new_target, - TNode<Int32T> arg_count) { +template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch( + const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target, + TNode<Object>, TNode<IntPtrT>, TNode<BytecodeArray>, + TNode<ExternalReference>); + +void CodeAssembler::TailCallJSCode(TNode<Code> code, TNode<Context> context, + TNode<JSFunction> function, + TNode<Object> new_target, + TNode<Int32T> arg_count) { JSTrampolineDescriptor descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( zone(), descriptor, descriptor.GetStackParameterCount(), @@ -1423,8 +1424,7 @@ TNode<Object> CodeAssembler::TailCallJSCode(TNode<Code> code, Node* nodes[] = {code, function, new_target, arg_count, context}; CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes)); - return UncheckedCast<Object>( - raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes)); + raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); } Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature, @@ -1914,7 +1914,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler( compatibility_label_(label), exception_(exception) { if (has_handler_) { - label_ = base::make_unique<CodeAssemblerExceptionHandlerLabel>( + label_ = std::make_unique<CodeAssemblerExceptionHandlerLabel>( assembler, CodeAssemblerLabel::kDeferred); assembler_->state()->PushExceptionHandler(label_.get()); } diff --git a/chromium/v8/src/compiler/code-assembler.h b/chromium/v8/src/compiler/code-assembler.h index c9adb1601db..036b00b14d8 100644 --- a/chromium/v8/src/compiler/code-assembler.h +++ b/chromium/v8/src/compiler/code-assembler.h @@ -17,6 +17,7 @@ #include "src/codegen/code-factory.h" #include "src/codegen/machine-type.h" #include "src/codegen/source-position.h" +#include "src/codegen/tnode.h" #include "src/heap/heap.h" #include "src/objects/arguments.h" #include "src/objects/data-handler.h" @@ -79,210 +80,6 @@ TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED) template <typename T> class Signature; -struct UntaggedT {}; - -struct IntegralT : UntaggedT {}; - -struct WordT : IntegralT { - static const MachineRepresentation kMachineRepresentation = - (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 - : MachineRepresentation::kWord64; -}; - -struct RawPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::Pointer(); -}; - -template <class To> -struct RawPtr : RawPtrT {}; - -struct Word32T : IntegralT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kWord32; -}; -struct Int32T : Word32T { - static constexpr MachineType kMachineType = MachineType::Int32(); -}; -struct Uint32T : Word32T { - static constexpr MachineType kMachineType = MachineType::Uint32(); -}; -struct Int16T : Int32T { - static constexpr MachineType kMachineType = MachineType::Int16(); -}; -struct Uint16T : Uint32T, Int32T { - static constexpr MachineType kMachineType = MachineType::Uint16(); -}; -struct Int8T : Int16T { - static constexpr MachineType kMachineType = MachineType::Int8(); -}; -struct Uint8T : Uint16T, Int16T { - static constexpr MachineType kMachineType = MachineType::Uint8(); -}; - -struct Word64T : IntegralT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kWord64; -}; -struct Int64T : Word64T { - static constexpr MachineType kMachineType = MachineType::Int64(); -}; -struct Uint64T : Word64T { - static constexpr MachineType kMachineType = MachineType::Uint64(); -}; - -struct IntPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::IntPtr(); -}; -struct UintPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::UintPtr(); -}; - -struct Float32T : UntaggedT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kFloat32; - static constexpr MachineType kMachineType = MachineType::Float32(); -}; - -struct Float64T : UntaggedT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kFloat64; - static constexpr MachineType kMachineType = MachineType::Float64(); -}; - -#ifdef V8_COMPRESS_POINTERS -using TaggedT = Int32T; -#else -using TaggedT = IntPtrT; -#endif - -// Result of a comparison operation. -struct BoolT : Word32T {}; - -// Value type of a Turbofan node with two results. -template <class T1, class T2> -struct PairT {}; - -inline constexpr MachineType CommonMachineType(MachineType type1, - MachineType type2) { - return (type1 == type2) ? type1 - : ((type1.IsTagged() && type2.IsTagged()) - ? MachineType::AnyTagged() - : MachineType::None()); -} - -template <class Type, class Enable = void> -struct MachineTypeOf { - static constexpr MachineType value = Type::kMachineType; -}; - -template <class Type, class Enable> -constexpr MachineType MachineTypeOf<Type, Enable>::value; - -template <> -struct MachineTypeOf<Object> { - static constexpr MachineType value = MachineType::AnyTagged(); -}; -template <> -struct MachineTypeOf<MaybeObject> { - static constexpr MachineType value = MachineType::AnyTagged(); -}; -template <> -struct MachineTypeOf<Smi> { - static constexpr MachineType value = MachineType::TaggedSigned(); -}; -template <class HeapObjectSubtype> -struct MachineTypeOf<HeapObjectSubtype, - typename std::enable_if<std::is_base_of< - HeapObject, HeapObjectSubtype>::value>::type> { - static constexpr MachineType value = MachineType::TaggedPointer(); -}; - -template <class HeapObjectSubtype> -constexpr MachineType MachineTypeOf< - HeapObjectSubtype, typename std::enable_if<std::is_base_of< - HeapObject, HeapObjectSubtype>::value>::type>::value; - -template <class Type, class Enable = void> -struct MachineRepresentationOf { - static const MachineRepresentation value = Type::kMachineRepresentation; -}; -template <class T> -struct MachineRepresentationOf< - T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> { - static const MachineRepresentation value = - MachineTypeOf<T>::value.representation(); -}; -template <class T> -struct MachineRepresentationOf< - T, typename std::enable_if<std::is_base_of<MaybeObject, T>::value>::type> { - static const MachineRepresentation value = - MachineTypeOf<T>::value.representation(); -}; - -template <class T> -struct is_valid_type_tag { - static const bool value = std::is_base_of<Object, T>::value || - std::is_base_of<UntaggedT, T>::value || - std::is_base_of<MaybeObject, T>::value || - std::is_same<ExternalReference, T>::value; - static const bool is_tagged = std::is_base_of<Object, T>::value || - std::is_base_of<MaybeObject, T>::value; -}; - -template <class T1, class T2> -struct is_valid_type_tag<PairT<T1, T2>> { - static const bool value = - is_valid_type_tag<T1>::value && is_valid_type_tag<T2>::value; - static const bool is_tagged = false; -}; - -template <class T1, class T2> -struct UnionT; - -template <class T1, class T2> -struct is_valid_type_tag<UnionT<T1, T2>> { - static const bool is_tagged = - is_valid_type_tag<T1>::is_tagged && is_valid_type_tag<T2>::is_tagged; - static const bool value = is_tagged; -}; - -template <class T1, class T2> -struct UnionT { - static constexpr MachineType kMachineType = - CommonMachineType(MachineTypeOf<T1>::value, MachineTypeOf<T2>::value); - static const MachineRepresentation kMachineRepresentation = - kMachineType.representation(); - static_assert(kMachineRepresentation != MachineRepresentation::kNone, - "no common representation"); - static_assert(is_valid_type_tag<T1>::is_tagged && - is_valid_type_tag<T2>::is_tagged, - "union types are only possible for tagged values"); -}; - -using Number = UnionT<Smi, HeapNumber>; -using Numeric = UnionT<Number, BigInt>; - -// A pointer to a builtin function, used by Torque's function pointers. -using BuiltinPtr = Smi; - -class int31_t { - public: - int31_t() : value_(0) {} - int31_t(int value) : value_(value) { // NOLINT(runtime/explicit) - DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); - } - int31_t& operator=(int value) { - DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); - value_ = value; - return *this; - } - int32_t value() const { return value_; } - operator int32_t() const { return value_; } - - private: - int32_t value_; -}; - #define ENUM_ELEMENT(Name) k##Name, #define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name, enum class ObjectType { @@ -334,6 +131,7 @@ class Undetectable; class UniqueName; class WasmCapiFunctionData; class WasmExceptionObject; +class WasmExceptionPackage; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; @@ -396,143 +194,6 @@ using CodeAssemblerVariableList = ZoneVector<CodeAssemblerVariable*>; using CodeAssemblerCallback = std::function<void()>; -template <class T, class U> -struct is_subtype { - static const bool value = std::is_base_of<U, T>::value; -}; -template <class T1, class T2, class U> -struct is_subtype<UnionT<T1, T2>, U> { - static const bool value = - is_subtype<T1, U>::value && is_subtype<T2, U>::value; -}; -template <class T, class U1, class U2> -struct is_subtype<T, UnionT<U1, U2>> { - static const bool value = - is_subtype<T, U1>::value || is_subtype<T, U2>::value; -}; -template <class T1, class T2, class U1, class U2> -struct is_subtype<UnionT<T1, T2>, UnionT<U1, U2>> { - static const bool value = - (is_subtype<T1, U1>::value || is_subtype<T1, U2>::value) && - (is_subtype<T2, U1>::value || is_subtype<T2, U2>::value); -}; - -template <class T, class U> -struct types_have_common_values { - static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value; -}; -template <class U> -struct types_have_common_values<BoolT, U> { - static const bool value = types_have_common_values<Word32T, U>::value; -}; -template <class U> -struct types_have_common_values<Uint32T, U> { - static const bool value = types_have_common_values<Word32T, U>::value; -}; -template <class U> -struct types_have_common_values<Int32T, U> { - static const bool value = types_have_common_values<Word32T, U>::value; -}; -template <class U> -struct types_have_common_values<Uint64T, U> { - static const bool value = types_have_common_values<Word64T, U>::value; -}; -template <class U> -struct types_have_common_values<Int64T, U> { - static const bool value = types_have_common_values<Word64T, U>::value; -}; -template <class U> -struct types_have_common_values<IntPtrT, U> { - static const bool value = types_have_common_values<WordT, U>::value; -}; -template <class U> -struct types_have_common_values<UintPtrT, U> { - static const bool value = types_have_common_values<WordT, U>::value; -}; -template <class T1, class T2, class U> -struct types_have_common_values<UnionT<T1, T2>, U> { - static const bool value = types_have_common_values<T1, U>::value || - types_have_common_values<T2, U>::value; -}; - -template <class T, class U1, class U2> -struct types_have_common_values<T, UnionT<U1, U2>> { - static const bool value = types_have_common_values<T, U1>::value || - types_have_common_values<T, U2>::value; -}; -template <class T1, class T2, class U1, class U2> -struct types_have_common_values<UnionT<T1, T2>, UnionT<U1, U2>> { - static const bool value = types_have_common_values<T1, U1>::value || - types_have_common_values<T1, U2>::value || - types_have_common_values<T2, U1>::value || - types_have_common_values<T2, U2>::value; -}; - -template <class T> -struct types_have_common_values<T, MaybeObject> { - static const bool value = types_have_common_values<T, Object>::value; -}; - -template <class T> -struct types_have_common_values<MaybeObject, T> { - static const bool value = types_have_common_values<Object, T>::value; -}; - -// TNode<T> is an SSA value with the static type tag T, which is one of the -// following: -// - a subclass of internal::Object represents a tagged type -// - a subclass of internal::UntaggedT represents an untagged type -// - ExternalReference -// - PairT<T1, T2> for an operation returning two values, with types T1 -// and T2 -// - UnionT<T1, T2> represents either a value of type T1 or of type T2. -template <class T> -class TNode { - public: - template <class U, - typename std::enable_if<is_subtype<U, T>::value, int>::type = 0> - TNode(const TNode<U>& other) : node_(other) { - LazyTemplateChecks(); - } - TNode() : TNode(nullptr) {} - - TNode operator=(TNode other) { - DCHECK_NOT_NULL(other.node_); - node_ = other.node_; - return *this; - } - - operator compiler::Node*() const { return node_; } - - static TNode UncheckedCast(compiler::Node* node) { return TNode(node); } - - protected: - explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); } - - private: - // These checks shouldn't be checked before TNode is actually used. - void LazyTemplateChecks() { - static_assert(is_valid_type_tag<T>::value, "invalid type tag"); - } - - compiler::Node* node_; -}; - -// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from -// Node*. It is intended for function arguments as long as some call sites -// still use untyped Node* arguments. -// TODO(tebbi): Delete this class once transition is finished. -template <class T> -class SloppyTNode : public TNode<T> { - public: - SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit) - : TNode<T>(node) {} - template <class U, typename std::enable_if<is_subtype<U, T>::value, - int>::type = 0> - SloppyTNode(const TNode<U>& other) // NOLINT(runtime/explicit) - : TNode<T>(other) {} -}; - template <class... Types> class CodeAssemblerParameterizedLabel; @@ -627,7 +288,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b); V(Float64ExtractLowWord32, Uint32T, Float64T) \ V(Float64ExtractHighWord32, Uint32T, Float64T) \ V(BitcastTaggedToWord, IntPtrT, Object) \ - V(BitcastTaggedSignedToWord, IntPtrT, Smi) \ + V(BitcastTaggedToWordForTagAndSmiBits, IntPtrT, AnyTaggedT) \ V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \ V(BitcastWordToTagged, Object, WordT) \ V(BitcastWordToTaggedSigned, Smi, WordT) \ @@ -641,6 +302,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b); V(ChangeInt32ToInt64, Int64T, Int32T) \ V(ChangeUint32ToFloat64, Float64T, Word32T) \ V(ChangeUint32ToUint64, Uint64T, Word32T) \ + V(ChangeTaggedToCompressed, TaggedT, AnyTaggedT) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ @@ -1187,8 +849,12 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode<RawPtrT> RawPtrAdd(TNode<RawPtrT> left, TNode<IntPtrT> right) { return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right)); } - TNode<RawPtrT> RawPtrAdd(TNode<IntPtrT> left, TNode<RawPtrT> right) { - return ReinterpretCast<RawPtrT>(IntPtrAdd(left, right)); + TNode<RawPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<IntPtrT> right) { + return ReinterpretCast<RawPtrT>(IntPtrSub(left, right)); + } + TNode<IntPtrT> RawPtrSub(TNode<RawPtrT> left, TNode<RawPtrT> right) { + return Signed( + IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right))); } TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift); @@ -1243,7 +909,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { template <class Dummy = void> TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) { static_assert(sizeof(Dummy) < 0, - "Should use BitcastTaggedSignedToWord instead."); + "Should use BitcastTaggedToWordForTagAndSmiBits instead."); } // Changes a double to an inptr_t for pointer arithmetic outside of Smi range. @@ -1363,26 +1029,26 @@ class V8_EXPORT_PRIVATE CodeAssembler { void TailCallStub(Callable const& callable, SloppyTNode<Object> context, TArgs... args) { TNode<Code> target = HeapConstant(callable.code()); - return TailCallStub(callable.descriptor(), target, context, args...); + TailCallStub(callable.descriptor(), target, context, args...); } template <class... TArgs> void TailCallStub(const CallInterfaceDescriptor& descriptor, SloppyTNode<Code> target, SloppyTNode<Object> context, TArgs... args) { - return TailCallStubImpl(descriptor, target, context, {args...}); + TailCallStubImpl(descriptor, target, context, {args...}); } template <class... TArgs> - Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, - Node* target, TArgs... args); + void TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, + TNode<RawPtrT> target, TArgs... args); template <class... TArgs> - Node* TailCallStubThenBytecodeDispatch( + void TailCallStubThenBytecodeDispatch( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, TArgs... args) { - return TailCallStubThenBytecodeDispatchImpl(descriptor, target, context, - {args...}); + TailCallStubThenBytecodeDispatchImpl(descriptor, target, context, + {args...}); } // Tailcalls to the given code object with JSCall linkage. The JS arguments @@ -1392,14 +1058,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { // Note that no arguments adaption is going on here - all the JavaScript // arguments are left on the stack unmodified. Therefore, this tail call can // only be used after arguments adaptation has been performed already. - TNode<Object> TailCallJSCode(TNode<Code> code, TNode<Context> context, - TNode<JSFunction> function, - TNode<Object> new_target, - TNode<Int32T> arg_count); + void TailCallJSCode(TNode<Code> code, TNode<Context> context, + TNode<JSFunction> function, TNode<Object> new_target, + TNode<Int32T> arg_count); template <class... TArgs> - Node* CallJS(Callable const& callable, Node* context, Node* function, - Node* receiver, TArgs... args) { + TNode<Object> CallJS(Callable const& callable, Node* context, Node* function, + Node* receiver, TArgs... args) { int argc = static_cast<int>(sizeof...(args)); TNode<Int32T> arity = Int32Constant(argc); return CallStub(callable, context, function, arity, receiver, args...); @@ -1511,15 +1176,14 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode<Code> target, TNode<Object> context, std::initializer_list<Node*> args); - Node* TailCallStubThenBytecodeDispatchImpl( + void TailCallStubThenBytecodeDispatchImpl( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, std::initializer_list<Node*> args); Node* CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, - size_t result_size, Node* target, - SloppyTNode<Object> context, - std::initializer_list<Node*> args); + size_t result_size, TNode<Object> target, + TNode<Object> context, std::initializer_list<Node*> args); // These two don't have definitions and are here only for catching use cases // where the cast is not necessary. @@ -1810,7 +1474,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler { } // namespace compiler -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) +#if defined(V8_HOST_ARCH_32_BIT) #define BINT_IS_SMI using BInt = Smi; #elif defined(V8_HOST_ARCH_64_BIT) diff --git a/chromium/v8/src/compiler/compilation-dependencies.cc b/chromium/v8/src/compiler/compilation-dependencies.cc index 592d85440cc..33990dfa480 100644 --- a/chromium/v8/src/compiler/compilation-dependencies.cc +++ b/chromium/v8/src/compiler/compilation-dependencies.cc @@ -5,6 +5,7 @@ #include "src/compiler/compilation-dependencies.h" #include "src/compiler/compilation-dependency.h" +#include "src/execution/protectors.h" #include "src/handles/handles-inl.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/objects-inl.h" @@ -155,7 +156,7 @@ class FieldRepresentationDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the representation. - FieldRepresentationDependency(const MapRef& owner, int descriptor, + FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor, Representation representation) : owner_(owner), descriptor_(descriptor), @@ -180,7 +181,7 @@ class FieldRepresentationDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; Representation representation_; }; @@ -188,7 +189,7 @@ class FieldTypeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type. - FieldTypeDependency(const MapRef& owner, int descriptor, + FieldTypeDependency(const MapRef& owner, InternalIndex descriptor, const ObjectRef& type) : owner_(owner), descriptor_(descriptor), type_(type) { DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_))); @@ -210,13 +211,13 @@ class FieldTypeDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; ObjectRef type_; }; class FieldConstnessDependency final : public CompilationDependency { public: - FieldConstnessDependency(const MapRef& owner, int descriptor) + FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor) : owner_(owner), descriptor_(descriptor) { DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_))); DCHECK_EQ(PropertyConstness::kConst, @@ -238,7 +239,7 @@ class FieldConstnessDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; }; class GlobalPropertyDependency final : public CompilationDependency { @@ -282,12 +283,12 @@ class GlobalPropertyDependency final : public CompilationDependency { class ProtectorDependency final : public CompilationDependency { public: explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) { - DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid); + DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid); } bool IsValid() const override { Handle<PropertyCell> cell = cell_.object(); - return cell->value() == Smi::FromInt(Isolate::kProtectorValid); + return cell->value() == Smi::FromInt(Protectors::kProtectorValid); } void Install(const MaybeObjectHandle& code) const override { @@ -404,7 +405,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode( } PropertyConstness CompilationDependencies::DependOnFieldConstness( - const MapRef& map, int descriptor) { + const MapRef& map, InternalIndex descriptor) { MapRef owner = map.FindFieldOwner(descriptor); PropertyConstness constness = owner.GetPropertyDetails(descriptor).constness(); @@ -426,13 +427,13 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness( return PropertyConstness::kConst; } -void CompilationDependencies::DependOnFieldRepresentation(const MapRef& map, - int descriptor) { +void CompilationDependencies::DependOnFieldRepresentation( + const MapRef& map, InternalIndex descriptor) { RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor)); } void CompilationDependencies::DependOnFieldType(const MapRef& map, - int descriptor) { + InternalIndex descriptor) { RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor)); } @@ -444,7 +445,7 @@ void CompilationDependencies::DependOnGlobalProperty( } bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) { - if (cell.value().AsSmi() != Isolate::kProtectorValid) return false; + if (cell.value().AsSmi() != Protectors::kProtectorValid) return false; RecordDependency(new (zone_) ProtectorDependency(cell)); return true; } @@ -632,7 +633,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord( CompilationDependency const* CompilationDependencies::FieldRepresentationDependencyOffTheRecord( - const MapRef& map, int descriptor) const { + const MapRef& map, InternalIndex descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); PropertyDetails details = owner.GetPropertyDetails(descriptor); DCHECK(details.representation().Equals( @@ -642,8 +643,8 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord( } CompilationDependency const* -CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map, - int descriptor) const { +CompilationDependencies::FieldTypeDependencyOffTheRecord( + const MapRef& map, InternalIndex descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); ObjectRef type = owner.GetFieldType(descriptor); DCHECK(type.equals(map.GetFieldType(descriptor))); diff --git a/chromium/v8/src/compiler/compilation-dependencies.h b/chromium/v8/src/compiler/compilation-dependencies.h index cb6cea0685f..0b1612487ed 100644 --- a/chromium/v8/src/compiler/compilation-dependencies.h +++ b/chromium/v8/src/compiler/compilation-dependencies.h @@ -55,11 +55,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // Record the assumption that the field representation of a field does not // change. The field is identified by the arguments. - void DependOnFieldRepresentation(const MapRef& map, int descriptor); + void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor); // Record the assumption that the field type of a field does not change. The // field is identified by the arguments. - void DependOnFieldType(const MapRef& map, int descriptor); + void DependOnFieldType(const MapRef& map, InternalIndex descriptor); // Return a field's constness and, if kConst, record the assumption that it // remains kConst. The field is identified by the arguments. @@ -68,7 +68,8 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // kConst if the map is stable (and register stability dependency in that // case). This is to ensure that fast elements kind transitions cannot be // used to mutate fields without deoptimization of the dependent code. - PropertyConstness DependOnFieldConstness(const MapRef& map, int descriptor); + PropertyConstness DependOnFieldConstness(const MapRef& map, + InternalIndex descriptor); // Record the assumption that neither {cell}'s {CellType} changes, nor the // {IsReadOnly()} flag of {cell}'s {PropertyDetails}. @@ -119,9 +120,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { CompilationDependency const* TransitionDependencyOffTheRecord( const MapRef& target_map) const; CompilationDependency const* FieldRepresentationDependencyOffTheRecord( - const MapRef& map, int descriptor) const; + const MapRef& map, InternalIndex descriptor) const; CompilationDependency const* FieldTypeDependencyOffTheRecord( - const MapRef& map, int descriptor) const; + const MapRef& map, InternalIndex descriptor) const; // Exposed only for testing purposes. bool AreValid() const; diff --git a/chromium/v8/src/compiler/decompression-elimination.cc b/chromium/v8/src/compiler/decompression-elimination.cc index 537744652b9..5c0f6b1cfaa 100644 --- a/chromium/v8/src/compiler/decompression-elimination.cc +++ b/chromium/v8/src/compiler/decompression-elimination.cc @@ -67,7 +67,6 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) { Node* input_node = node->InputAt(0); IrOpcode::Value input_opcode = input_node->opcode(); if (IrOpcode::IsDecompressOpcode(input_opcode)) { - DCHECK(IsValidDecompress(node->opcode(), input_opcode)); DCHECK_EQ(input_node->InputCount(), 1); return Replace(input_node->InputAt(0)); } else if (IsReducibleConstantOpcode(input_opcode)) { @@ -167,6 +166,42 @@ Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) { return any_change ? Changed(node) : NoChange(); } +Reduction DecompressionElimination::ReduceWord32Equal(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kWord32Equal); + + DCHECK_EQ(node->InputCount(), 2); + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + if (!IrOpcode::IsCompressOpcode(lhs->opcode()) || + !IrOpcode::IsCompressOpcode(rhs->opcode())) { + return NoChange(); + } + // Input nodes for compress operation. + lhs = lhs->InputAt(0); + rhs = rhs->InputAt(0); + + bool changed = false; + + if (lhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) { + Node* input = lhs->InputAt(0); + if (IsReducibleConstantOpcode(input->opcode())) { + node->ReplaceInput(0, GetCompressedConstant(input)); + changed = true; + } + } + + if (rhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) { + Node* input = rhs->InputAt(0); + if (IsReducibleConstantOpcode(input->opcode())) { + node->ReplaceInput(1, GetCompressedConstant(input)); + changed = true; + } + } + + return changed ? Changed(node) : NoChange(); +} + Reduction DecompressionElimination::ReduceWord64Equal(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kWord64Equal); @@ -220,6 +255,8 @@ Reduction DecompressionElimination::Reduce(Node* node) { return ReducePhi(node); case IrOpcode::kTypedStateValues: return ReduceTypedStateValues(node); + case IrOpcode::kWord32Equal: + return ReduceWord32Equal(node); case IrOpcode::kWord64Equal: return ReduceWord64Equal(node); default: diff --git a/chromium/v8/src/compiler/decompression-elimination.h b/chromium/v8/src/compiler/decompression-elimination.h index 85a6c98aa0b..6b2be009c6b 100644 --- a/chromium/v8/src/compiler/decompression-elimination.h +++ b/chromium/v8/src/compiler/decompression-elimination.h @@ -65,6 +65,11 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // value of that constant. Reduction ReduceWord64Equal(Node* node); + // This is a workaround for load elimination test. + // Replaces Compress -> BitcastWordToTaggedSigned -> ReducibleConstant + // to CompressedConstant on both inputs of Word32Equal operation. + Reduction ReduceWord32Equal(Node* node); + Graph* graph() const { return graph_; } MachineOperatorBuilder* machine() const { return machine_; } CommonOperatorBuilder* common() const { return common_; } diff --git a/chromium/v8/src/compiler/effect-control-linearizer.cc b/chromium/v8/src/compiler/effect-control-linearizer.cc index 8dfe356c34d..ceff453164b 100644 --- a/chromium/v8/src/compiler/effect-control-linearizer.cc +++ b/chromium/v8/src/compiler/effect-control-linearizer.cc @@ -187,8 +187,11 @@ class EffectControlLinearizer { Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state); void LowerTransitionElementsKind(Node* node); Node* LowerLoadFieldByIndex(Node* node); + Node* LowerLoadMessage(Node* node); Node* LowerLoadTypedElement(Node* node); Node* LowerLoadDataViewElement(Node* node); + Node* LowerLoadStackArgument(Node* node); + void LowerStoreMessage(Node* node); void LowerStoreTypedElement(Node* node); void LowerStoreDataViewElement(Node* node); void LowerStoreSignedSmallElement(Node* node); @@ -227,6 +230,8 @@ class EffectControlLinearizer { Node* LowerStringComparison(Callable const& callable, Node* node); Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind); + Node* BuildTypedArrayDataPointer(Node* base, Node* external); + Node* ChangeInt32ToCompressedSmi(Node* value); Node* ChangeInt32ToSmi(Node* value); Node* ChangeInt32ToIntPtr(Node* value); @@ -247,6 +252,7 @@ class EffectControlLinearizer { Node* SmiShiftBitsConstant(); void TransitionElementsTo(Node* node, Node* array, ElementsKind from, ElementsKind to); + void ConnectUnreachableToEnd(Node* effect, Node* control); Factory* factory() const { return isolate()->factory(); } Isolate* isolate() const { return jsgraph()->isolate(); } @@ -308,19 +314,8 @@ struct PendingEffectPhi { : effect_phi(effect_phi), block(block) {} }; -void ConnectUnreachableToEnd(Node* effect, Node* control, JSGraph* jsgraph) { - Graph* graph = jsgraph->graph(); - CommonOperatorBuilder* common = jsgraph->common(); - if (effect->opcode() == IrOpcode::kDead) return; - if (effect->opcode() != IrOpcode::kUnreachable) { - effect = graph->NewNode(common->Unreachable(), effect, control); - } - Node* throw_node = graph->NewNode(common->Throw(), effect, control); - NodeProperties::MergeControlToEnd(graph, common, throw_node); -} - void UpdateEffectPhi(Node* node, BasicBlock* block, - BlockEffectControlMap* block_effects, JSGraph* jsgraph) { + BlockEffectControlMap* block_effects) { // Update all inputs to an effect phi with the effects from the given // block->effect map. DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode()); @@ -607,7 +602,7 @@ void EffectControlLinearizer::Run() { // record the effect phi for later processing. pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block)); } else { - UpdateEffectPhi(effect_phi, block, &block_effects, jsgraph()); + UpdateEffectPhi(effect_phi, block, &block_effects); } } @@ -649,7 +644,7 @@ void EffectControlLinearizer::Run() { if (control->opcode() == IrOpcode::kLoop) { pending_effect_phis.push_back(PendingEffectPhi(effect, block)); } else { - UpdateEffectPhi(effect, block, &block_effects, jsgraph()); + UpdateEffectPhi(effect, block, &block_effects); } } else if (control->opcode() == IrOpcode::kIfException) { // The IfException is connected into the effect chain, so we need @@ -734,7 +729,7 @@ void EffectControlLinearizer::Run() { // during the first pass (because they could have incoming back edges). for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) { UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block, - &block_effects, jsgraph()); + &block_effects); } } @@ -828,7 +823,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state, // Break the effect chain on {Unreachable} and reconnect to the graph end. // Mark the following code for deletion by connecting to the {Dead} node. if (node->opcode() == IrOpcode::kUnreachable) { - ConnectUnreachableToEnd(*effect, *control, jsgraph()); + ConnectUnreachableToEnd(*effect, *control); *effect = *control = jsgraph()->Dead(); } } @@ -1243,6 +1238,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kTransitionElementsKind: LowerTransitionElementsKind(node); break; + case IrOpcode::kLoadMessage: + result = LowerLoadMessage(node); + break; + case IrOpcode::kStoreMessage: + LowerStoreMessage(node); + break; case IrOpcode::kLoadFieldByIndex: result = LowerLoadFieldByIndex(node); break; @@ -1252,6 +1253,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kLoadDataViewElement: result = LowerLoadDataViewElement(node); break; + case IrOpcode::kLoadStackArgument: + result = LowerLoadStackArgument(node); + break; case IrOpcode::kStoreTypedElement: LowerStoreTypedElement(node); break; @@ -1325,6 +1329,13 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, return true; } +void EffectControlLinearizer::ConnectUnreachableToEnd(Node* effect, + Node* control) { + DCHECK_EQ(effect->opcode(), IrOpcode::kUnreachable); + Node* throw_node = graph()->NewNode(common()->Throw(), effect, control); + NodeProperties::MergeControlToEnd(graph(), common(), throw_node); +} + #define __ gasm()-> Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) { @@ -1601,7 +1612,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) { __ Bind(&if_smi); { // If {value} is a Smi, then we only need to check that it's not zero. - __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)), + __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)), __ Int32Constant(0))); } @@ -1952,7 +1963,7 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined( __ LoadField(AccessBuilder::ForMapInstanceType(), value_map); // Rule out all primitives except oddballs (true, false, undefined, null). - STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE); + STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE); STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE), value_instance_type); @@ -2028,9 +2039,8 @@ Node* EffectControlLinearizer::LowerStringConcat(Node* node) { callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, - rhs, __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); return value; } @@ -2112,8 +2122,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have // to return -kMinInt, which is not representable as Word32. - Node* check_lhs_minint = graph()->NewNode(machine()->Word32Equal(), lhs, - __ Int32Constant(kMinInt)); + Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt)); __ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint); __ Bind(&if_lhs_minint); @@ -2760,7 +2769,7 @@ Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) { DCHECK(machine()->Is64()); Node* value = node->InputAt(0); - Node* map = jsgraph()->HeapConstant(factory()->bigint_map()); + Node* map = __ HeapConstant(factory()->bigint_map()); // BigInts with value 0 must be of size 0 (canonical form). auto if_zerodigits = __ MakeLabel(); auto if_onedigit = __ MakeLabel(); @@ -2963,10 +2972,11 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) { Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); Node* value_instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), value_map); - STATIC_ASSERT(JS_TYPED_ARRAY_TYPE + 1 == JS_DATA_VIEW_TYPE); Node* vfalse = __ Uint32LessThan( - __ Int32Sub(value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)), - __ Int32Constant(2)); + __ Int32Sub(value_instance_type, + __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)), + __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE - + FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1)); __ Goto(&done, vfalse); __ Bind(&if_smi); @@ -3521,7 +3531,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) { __ Load(MachineType::Pointer(), frame, __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset)); Node* parent_frame_type = __ Load( - MachineType::TypeCompressedTagged(), parent_frame, + MachineType::IntPtr(), parent_frame, __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset)); __ GotoIf(__ IntPtrEqual(parent_frame_type, @@ -3541,7 +3551,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, - jsgraph()->HeapConstant(factory()->empty_fixed_array())); + __ HeapConstant(factory()->empty_fixed_array())); // Compute the effective size of the backing store. Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)), @@ -3589,7 +3599,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, - jsgraph()->HeapConstant(factory()->empty_fixed_array())); + __ HeapConstant(factory()->empty_fixed_array())); // Compute the effective size of the backing store. Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)), @@ -3671,10 +3681,9 @@ Node* EffectControlLinearizer::LowerNewConsString(Node* node) { __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)), &if_twobyte, &if_onebyte); __ Bind(&if_onebyte); - __ Goto(&done, - jsgraph()->HeapConstant(factory()->cons_one_byte_string_map())); + __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map())); __ Bind(&if_twobyte); - __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map())); + __ Goto(&done, __ HeapConstant(factory()->cons_string_map())); __ Bind(&done); Node* result_map = done.PhiAt(0); @@ -4287,9 +4296,8 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) { graph()->zone(), callable.descriptor(), callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kFoldable | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, - rhs, __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); // Check for exception sentinel: Smi is returned to signal BigIntTooBig. __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{}, @@ -4305,9 +4313,8 @@ Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) { graph()->zone(), callable.descriptor(), callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kFoldable | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), - node->InputAt(0), __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), + node->InputAt(0), __ NoContextConstant()); return value; } @@ -4746,6 +4753,20 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) { __ Bind(&done); } +Node* EffectControlLinearizer::LowerLoadMessage(Node* node) { + Node* offset = node->InputAt(0); + Node* object_pattern = + __ LoadField(AccessBuilder::ForExternalIntPtr(), offset); + return __ BitcastWordToTagged(object_pattern); +} + +void EffectControlLinearizer::LowerStoreMessage(Node* node) { + Node* offset = node->InputAt(0); + Node* object = node->InputAt(1); + Node* object_pattern = __ BitcastTaggedToWord(object); + __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern); +} + Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { Node* object = node->InputAt(0); Node* index = node->InputAt(1); @@ -4801,6 +4822,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { // architectures, or a mutable HeapNumber. __ Bind(&if_double); { + auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged); auto done_double = __ MakeLabel(MachineRepresentation::kFloat64); index = __ WordSar(index, one); @@ -4818,10 +4840,9 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { Node* result = __ Load(MachineType::Float64(), object, offset); __ Goto(&done_double, result); } else { - Node* result = + Node* field = __ Load(MachineType::TypeCompressedTagged(), object, offset); - result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result); - __ Goto(&done_double, result); + __ Goto(&loaded_field, field); } } @@ -4834,10 +4855,24 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { __ IntPtrConstant(kTaggedSizeLog2)), __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) - kHeapObjectTag)); - Node* result = + Node* field = __ Load(MachineType::TypeCompressedTagged(), properties, offset); - result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result); - __ Goto(&done_double, result); + __ Goto(&loaded_field, field); + } + + __ Bind(&loaded_field); + { + Node* field = loaded_field.PhiAt(0); + // We may have transitioned in-place away from double, so check that + // this is a HeapNumber -- otherwise the load is fine and we don't need + // to copy anything anyway. + __ GotoIf(ObjectIsSmi(field), &done, field); + Node* field_map = __ LoadField(AccessBuilder::ForMap(), field); + __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done, + field); + + Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field); + __ Goto(&done_double, value); } __ Bind(&done_double); @@ -4988,6 +5023,35 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) { done.PhiAt(0)); } +// Compute the data pointer, handling the case where the {external} pointer +// is the effective data pointer (i.e. the {base} is Smi zero). +Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base, + Node* external) { + if (IntPtrMatcher(base).Is(0)) { + return external; + } else { + if (COMPRESS_POINTERS_BOOL) { + // TurboFan does not support loading of compressed fields without + // decompression so we add the following operations to workaround that. + // We can't load the base value as word32 because in that case the + // value will not be marked as tagged in the pointer map and will not + // survive GC. + // Compress base value back to in order to be able to decompress by + // doing an unsafe add below. Both decompression and compression + // will be removed by the decompression elimination pass. + base = __ ChangeTaggedToCompressed(base); + base = __ BitcastTaggedToWord(base); + // Zero-extend Tagged_t to UintPtr according to current compression + // scheme so that the addition with |external_pointer| (which already + // contains compensated offset value) will decompress the tagged value. + // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for + // details. + base = ChangeUint32ToUintPtr(base); + } + return __ UnsafePointerAdd(base, external); + } +} + Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) { ExternalArrayType array_type = ExternalArrayTypeOf(node->op()); Node* buffer = node->InputAt(0); @@ -4999,17 +5063,22 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) { // ArrayBuffer (if there's any) as long as we are still operating on it. __ Retain(buffer); - // Compute the effective storage pointer, handling the case where the - // {external} pointer is the effective storage pointer (i.e. the {base} - // is Smi zero). - Node* storage = IntPtrMatcher(base).Is(0) - ? external - : __ UnsafePointerAdd(base, external); + Node* data_ptr = BuildTypedArrayDataPointer(base, external); // Perform the actual typed element access. return __ LoadElement(AccessBuilder::ForTypedArrayElement( array_type, true, LoadSensitivity::kCritical), - storage, index); + data_ptr, index); +} + +Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) { + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + Node* argument = + __ LoadElement(AccessBuilder::ForStackArgument(), base, index); + + return __ BitcastWordToTagged(argument); } void EffectControlLinearizer::LowerStoreTypedElement(Node* node) { @@ -5024,16 +5093,11 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) { // ArrayBuffer (if there's any) as long as we are still operating on it. __ Retain(buffer); - // Compute the effective storage pointer, handling the case where the - // {external} pointer is the effective storage pointer (i.e. the {base} - // is Smi zero). - Node* storage = IntPtrMatcher(base).Is(0) - ? external - : __ UnsafePointerAdd(base, external); + Node* data_ptr = BuildTypedArrayDataPointer(base, external); // Perform the actual typed element access. __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true), - storage, index, value); + data_ptr, index, value); } void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array, @@ -5402,7 +5466,7 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) { auto call_descriptor = Linkage::GetRuntimeCallDescriptor( graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags); __ Call(call_descriptor, __ CEntryStubConstant(1), - jsgraph()->SmiConstant(static_cast<int>(reason)), + __ SmiConstant(static_cast<int>(reason)), __ ExternalConstant(ExternalReference::Create(id)), __ Int32Constant(1), __ NoContextConstant()); } diff --git a/chromium/v8/src/compiler/escape-analysis-reducer.cc b/chromium/v8/src/compiler/escape-analysis-reducer.cc index 18ae069b21a..b2fb8d10cee 100644 --- a/chromium/v8/src/compiler/escape-analysis-reducer.cc +++ b/chromium/v8/src/compiler/escape-analysis-reducer.cc @@ -326,9 +326,8 @@ void EscapeAnalysisReducer::Finalize() { TypeCache::Get()->kArgumentsLengthType); NodeProperties::ReplaceValueInput(load, arguments_frame, 0); NodeProperties::ReplaceValueInput(load, offset, 1); - NodeProperties::ChangeOp(load, - jsgraph()->simplified()->LoadElement( - AccessBuilder::ForStackArgument())); + NodeProperties::ChangeOp( + load, jsgraph()->simplified()->LoadStackArgument()); break; } case IrOpcode::kLoadField: { diff --git a/chromium/v8/src/compiler/frame-states.cc b/chromium/v8/src/compiler/frame-states.cc index 9478c08c6c1..576f6ce5427 100644 --- a/chromium/v8/src/compiler/frame-states.cc +++ b/chromium/v8/src/compiler/frame-states.cc @@ -137,13 +137,17 @@ Node* CreateStubBuiltinContinuationFrameState( // Stack parameters first. Depending on {mode}, final parameters are added // by the deoptimizer and aren't explicitly passed in the frame state. int stack_parameter_count = - descriptor.GetParameterCount() - DeoptimizerParameterCountFor(mode); - // Reserving space in the vector, except for the case where - // stack_parameter_count is -1. - actual_parameters.reserve(stack_parameter_count >= 0 - ? stack_parameter_count + - descriptor.GetRegisterParameterCount() - : 0); + descriptor.GetStackParameterCount() - DeoptimizerParameterCountFor(mode); + + // Ensure the parameters added by the deoptimizer are passed on the stack. + // This check prevents using TFS builtins as continuations while doing the + // lazy deopt. Use TFC or TFJ builtin as a lazy deopt continuation which + // would pass the result parameter on the stack. + DCHECK_GE(stack_parameter_count, 0); + + // Reserving space in the vector. + actual_parameters.reserve(stack_parameter_count + + descriptor.GetRegisterParameterCount()); for (int i = 0; i < stack_parameter_count; ++i) { actual_parameters.push_back( parameters[descriptor.GetRegisterParameterCount() + i]); diff --git a/chromium/v8/src/compiler/functional-list.h b/chromium/v8/src/compiler/functional-list.h index 2345f1d3605..6af63030f83 100644 --- a/chromium/v8/src/compiler/functional-list.h +++ b/chromium/v8/src/compiler/functional-list.h @@ -90,6 +90,8 @@ class FunctionalList { size_t Size() const { return elements_ ? elements_->size : 0; } + void Clear() { elements_ = nullptr; } + class iterator { public: explicit iterator(Cons* cur) : current_(cur) {} diff --git a/chromium/v8/src/compiler/graph-assembler.cc b/chromium/v8/src/compiler/graph-assembler.cc index b4ad81ecda0..5c167db9805 100644 --- a/chromium/v8/src/compiler/graph-assembler.cc +++ b/chromium/v8/src/compiler/graph-assembler.cc @@ -99,6 +99,10 @@ Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) { } Node* GraphAssembler::TaggedEqual(Node* left, Node* right) { + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal(ChangeTaggedToCompressed(left), + ChangeTaggedToCompressed(right)); + } return WordEqual(left, right); } @@ -232,10 +236,10 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) { current_effect_, current_control_); } -Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) { +Node* GraphAssembler::BitcastTaggedToWordForTagAndSmiBits(Node* value) { return current_effect_ = - graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value, - current_effect_, current_control_); + graph()->NewNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), + value, current_effect_, current_control_); } Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { diff --git a/chromium/v8/src/compiler/graph-assembler.h b/chromium/v8/src/compiler/graph-assembler.h index 0088f867c54..d2df5a75f3a 100644 --- a/chromium/v8/src/compiler/graph-assembler.h +++ b/chromium/v8/src/compiler/graph-assembler.h @@ -233,7 +233,7 @@ class GraphAssembler { Node* ToNumber(Node* value); Node* BitcastWordToTagged(Node* value); Node* BitcastTaggedToWord(Node* value); - Node* BitcastTaggedSignedToWord(Node* value); + Node* BitcastTaggedToWordForTagAndSmiBits(Node* value); Node* Allocate(AllocationType allocation, Node* size); Node* LoadField(FieldAccess const&, Node* object); Node* LoadElement(ElementAccess const&, Node* object, Node* index); diff --git a/chromium/v8/src/compiler/graph-reducer.cc b/chromium/v8/src/compiler/graph-reducer.cc index 9a0dea6b260..91b4b51c91b 100644 --- a/chromium/v8/src/compiler/graph-reducer.cc +++ b/chromium/v8/src/compiler/graph-reducer.cc @@ -94,7 +94,8 @@ Reduction GraphReducer::Reduce(Node* const node) { // all the other reducers for this node, as now there may be more // opportunities for reduction. if (FLAG_trace_turbo_reduction) { - StdoutStream{} << "- In-place update of " << *node << " by reducer " + AllowHandleDereference allow_deref; + StdoutStream{} << "- In-place update of #" << *node << " by reducer " << (*i)->reducer_name() << std::endl; } skip = i; @@ -103,7 +104,8 @@ Reduction GraphReducer::Reduce(Node* const node) { } else { // {node} was replaced by another node. if (FLAG_trace_turbo_reduction) { - StdoutStream{} << "- Replacement of " << *node << " with " + AllowHandleDereference allow_deref; + StdoutStream{} << "- Replacement of #" << *node << " with #" << *(reduction.replacement()) << " by reducer " << (*i)->reducer_name() << std::endl; } diff --git a/chromium/v8/src/compiler/graph-visualizer.cc b/chromium/v8/src/compiler/graph-visualizer.cc index 85123261dbd..dddba7d36f6 100644 --- a/chromium/v8/src/compiler/graph-visualizer.cc +++ b/chromium/v8/src/compiler/graph-visualizer.cc @@ -163,7 +163,6 @@ void JsonPrintInlinedFunctionInfo( void JsonPrintAllSourceWithPositions(std::ostream& os, OptimizedCompilationInfo* info, Isolate* isolate) { - AllowDeferredHandleDereference allow_deference_for_print_code; os << "\"sources\" : {"; Handle<Script> script = (info->shared_info().is_null() || @@ -1055,15 +1054,9 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) { } break; } - case InstructionOperand::EXPLICIT: case InstructionOperand::ALLOCATED: { const LocationOperand* allocated = LocationOperand::cast(op); - os << "\"type\": "; - if (allocated->IsExplicit()) { - os << "\"explicit\", "; - } else { - os << "\"allocated\", "; - } + os << "\"type\": \"allocated\", "; os << "\"text\": \""; if (op->IsStackSlot()) { os << "stack:" << allocated->index(); diff --git a/chromium/v8/src/compiler/heap-refs.h b/chromium/v8/src/compiler/heap-refs.h index 9b1aa53eb91..f08e49832e2 100644 --- a/chromium/v8/src/compiler/heap-refs.h +++ b/chromium/v8/src/compiler/heap-refs.h @@ -29,7 +29,6 @@ class NativeContext; class ScriptContextTable; namespace compiler { - // Whether we are loading a property or storing to a property. // For a store during literal creation, do not walk up the prototype chain. enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; @@ -95,10 +94,12 @@ enum class OddballType : uint8_t { V(PropertyCell) \ V(SharedFunctionInfo) \ V(SourceTextModule) \ + V(TemplateObjectDescription) \ /* Subtypes of Object */ \ V(HeapObject) class CompilationDependencies; +struct FeedbackSource; class JSHeapBroker; class ObjectData; class PerIsolateCompilerCache; @@ -163,8 +164,8 @@ class V8_EXPORT_PRIVATE ObjectRef { private: friend class FunctionTemplateInfoRef; friend class JSArrayData; - friend class JSGlobalProxyRef; - friend class JSGlobalProxyData; + friend class JSGlobalObjectData; + friend class JSGlobalObjectRef; friend class JSHeapBroker; friend class JSObjectData; friend class StringData; @@ -329,8 +330,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { SharedFunctionInfoRef shared() const; FeedbackVectorRef feedback_vector() const; int InitialMapInstanceSizeWithMinSlack() const; - - bool IsSerializedForCompilation() const; }; class JSRegExpRef : public JSObjectRef { @@ -344,6 +343,8 @@ class JSRegExpRef : public JSObjectRef { ObjectRef source() const; ObjectRef flags() const; ObjectRef last_index() const; + + void SerializeAsRegExpBoilerplate(); }; class HeapNumberRef : public HeapObjectRef { @@ -388,6 +389,7 @@ class ContextRef : public HeapObjectRef { V(JSFunction, object_function) \ V(JSFunction, promise_function) \ V(JSFunction, promise_then) \ + V(JSFunction, regexp_function) \ V(JSFunction, string_function) \ V(JSFunction, symbol_function) \ V(JSGlobalObject, global_object) \ @@ -496,7 +498,6 @@ class FeedbackVectorRef : public HeapObjectRef { double invocation_count() const; void Serialize(); - ObjectRef get(FeedbackSlot slot) const; FeedbackCellRef GetClosureFeedbackCell(int index) const; }; @@ -535,6 +536,9 @@ class AllocationSiteRef : public HeapObjectRef { // // If PointsToLiteral() is false, then IsFastLiteral() is also false. bool IsFastLiteral() const; + + void SerializeBoilerplate(); + // We only serialize boilerplate if IsFastLiteral is true. base::Optional<JSObjectRef> boilerplate() const; @@ -585,7 +589,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { bool is_migration_target() const; bool supports_fast_array_iteration() const; bool supports_fast_array_resize() const; - bool IsMapOfTargetGlobalProxy() const; bool is_abandoned_prototype_map() const; OddballType oddball_type() const; @@ -609,15 +612,15 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { // Concerning the underlying instance_descriptors: void SerializeOwnDescriptors(); - void SerializeOwnDescriptor(int descriptor_index); - bool serialized_own_descriptor(int descriptor_index) const; - MapRef FindFieldOwner(int descriptor_index) const; - PropertyDetails GetPropertyDetails(int descriptor_index) const; - NameRef GetPropertyKey(int descriptor_index) const; - FieldIndex GetFieldIndexFor(int descriptor_index) const; - ObjectRef GetFieldType(int descriptor_index) const; - bool IsUnboxedDoubleField(int descriptor_index) const; - ObjectRef GetStrongValue(int descriptor_number) const; + void SerializeOwnDescriptor(InternalIndex descriptor_index); + bool serialized_own_descriptor(InternalIndex descriptor_index) const; + MapRef FindFieldOwner(InternalIndex descriptor_index) const; + PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const; + NameRef GetPropertyKey(InternalIndex descriptor_index) const; + FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const; + ObjectRef GetFieldType(InternalIndex descriptor_index) const; + bool IsUnboxedDoubleField(InternalIndex descriptor_index) const; + ObjectRef GetStrongValue(InternalIndex descriptor_number) const; void SerializeRootMap(); base::Optional<MapRef> FindRootMap() const; @@ -727,7 +730,6 @@ class BytecodeArrayRef : public FixedArrayBaseRef { Address handler_table_address() const; int handler_table_size() const; - bool IsSerializedForCompilation() const; void SerializeForCompilation(); }; @@ -769,7 +771,8 @@ class ScopeInfoRef : public HeapObjectRef { V(bool, is_safe_to_skip_arguments_adaptor) \ V(bool, IsInlineable) \ V(int, StartPosition) \ - V(bool, is_compiled) + V(bool, is_compiled) \ + V(bool, IsUserJavaScript) class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { public: @@ -791,7 +794,7 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { // wraps the retrieval of the template object and creates it if // necessary. JSArrayRef GetTemplateObject( - ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot, + TemplateObjectDescriptionRef description, FeedbackSource const& source, SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); void SerializeFunctionTemplateInfo(); @@ -826,7 +829,7 @@ class JSTypedArrayRef : public JSObjectRef { bool is_on_heap() const; size_t length() const; - void* external_pointer() const; + void* data_ptr() const; void Serialize(); bool serialized() const; @@ -845,6 +848,13 @@ class SourceTextModuleRef : public HeapObjectRef { base::Optional<CellRef> GetCell(int cell_index) const; }; +class TemplateObjectDescriptionRef : public HeapObjectRef { + public: + DEFINE_REF_CONSTRUCTOR(TemplateObjectDescription, HeapObjectRef) + + Handle<TemplateObjectDescription> object() const; +}; + class CellRef : public HeapObjectRef { public: DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef) @@ -859,13 +869,8 @@ class JSGlobalObjectRef : public JSObjectRef { DEFINE_REF_CONSTRUCTOR(JSGlobalObject, JSObjectRef) Handle<JSGlobalObject> object() const; -}; - -class JSGlobalProxyRef : public JSObjectRef { - public: - DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef) - Handle<JSGlobalProxy> object() const; + bool IsDetached() const; // If {serialize} is false: // If the property is known to exist as a property cell (on the global @@ -879,6 +884,13 @@ class JSGlobalProxyRef : public JSObjectRef { SerializationPolicy::kAssumeSerialized) const; }; +class JSGlobalProxyRef : public JSObjectRef { + public: + DEFINE_REF_CONSTRUCTOR(JSGlobalProxy, JSObjectRef) + + Handle<JSGlobalProxy> object() const; +}; + class CodeRef : public HeapObjectRef { public: DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef) diff --git a/chromium/v8/src/compiler/int64-lowering.h b/chromium/v8/src/compiler/int64-lowering.h index 1e2a36089b1..0190d3a9c42 100644 --- a/chromium/v8/src/compiler/int64-lowering.h +++ b/chromium/v8/src/compiler/int64-lowering.h @@ -5,6 +5,8 @@ #ifndef V8_COMPILER_INT64_LOWERING_H_ #define V8_COMPILER_INT64_LOWERING_H_ +#include <memory> + #include "src/common/globals.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc index 0b7b4a65f45..b86b1e6baff 100644 --- a/chromium/v8/src/compiler/js-call-reducer.cc +++ b/chromium/v8/src/compiler/js-call-reducer.cc @@ -473,10 +473,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) { return inference.NoChange(); } - if (!receiver_map.serialized_own_descriptor( - JSFunction::kLengthDescriptorIndex) || - !receiver_map.serialized_own_descriptor( - JSFunction::kNameDescriptorIndex)) { + const InternalIndex kLengthIndex(JSFunction::kLengthDescriptorIndex); + const InternalIndex kNameIndex(JSFunction::kNameDescriptorIndex); + if (!receiver_map.serialized_own_descriptor(kLengthIndex) || + !receiver_map.serialized_own_descriptor(kNameIndex)) { TRACE_BROKER_MISSING(broker(), "serialized descriptors on map " << receiver_map); return inference.NoChange(); @@ -485,14 +485,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { StringRef length_string(broker(), roots.length_string_handle()); StringRef name_string(broker(), roots.name_string_handle()); - if (!receiver_map.GetPropertyKey(JSFunction::kLengthDescriptorIndex) - .equals(length_string) || - !receiver_map.GetStrongValue(JSFunction::kLengthDescriptorIndex) - .IsAccessorInfo() || - !receiver_map.GetPropertyKey(JSFunction::kNameDescriptorIndex) - .equals(name_string) || - !receiver_map.GetStrongValue(JSFunction::kNameDescriptorIndex) - .IsAccessorInfo()) { + if (!receiver_map.GetPropertyKey(kLengthIndex).equals(length_string) || + !receiver_map.GetStrongValue(kLengthIndex).IsAccessorInfo() || + !receiver_map.GetPropertyKey(kNameIndex).equals(name_string) || + !receiver_map.GetStrongValue(kNameIndex).IsAccessorInfo()) { return inference.NoChange(); } } @@ -3013,12 +3009,13 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( node->opcode() == IrOpcode::kJSConstructWithArrayLike || node->opcode() == IrOpcode::kJSConstructWithSpread); - // Check if {arguments_list} is an arguments object, and {node} is the only - // value user of {arguments_list} (except for value uses in frame states). Node* arguments_list = NodeProperties::GetValueInput(node, arity); if (arguments_list->opcode() != IrOpcode::kJSCreateArguments) { return NoChange(); } + + // Check if {node} is the only value user of {arguments_list} (except for + // value uses in frame states). If not, we give up for now. for (Edge edge : arguments_list->use_edges()) { if (!NodeProperties::IsValueEdge(edge)) continue; Node* const user = edge.from(); @@ -3704,7 +3701,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, case Builtins::kMapIteratorPrototypeNext: return ReduceCollectionIteratorPrototypeNext( node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(), - FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE); + FIRST_JS_MAP_ITERATOR_TYPE, LAST_JS_MAP_ITERATOR_TYPE); case Builtins::kSetPrototypeEntries: return ReduceCollectionIteration(node, CollectionKind::kSet, IterationKind::kEntries); @@ -3716,7 +3713,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, case Builtins::kSetIteratorPrototypeNext: return ReduceCollectionIteratorPrototypeNext( node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(), - FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE); + FIRST_JS_SET_ITERATOR_TYPE, LAST_JS_SET_ITERATOR_TYPE); case Builtins::kDatePrototypeGetTime: return ReduceDatePrototypeGetTime(node); case Builtins::kDateNow: @@ -5676,8 +5673,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - if (!FLAG_experimental_inline_promise_constructor) return NoChange(); - // Only handle builtins Promises, not subclasses. if (target != new_target) return NoChange(); @@ -7103,11 +7098,14 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { Node* control = NodeProperties::GetControlInput(node); Node* regexp = NodeProperties::GetValueInput(node, 1); + // Only the initial JSRegExp map is valid here, since the following lastIndex + // check as well as the lowered builtin call rely on a known location of the + // lastIndex field. + Handle<Map> regexp_initial_map = + native_context().regexp_function().initial_map().object(); + MapInference inference(broker(), regexp, effect); - if (!inference.HaveMaps() || - !inference.AllOfInstanceTypes(InstanceTypeChecker::IsJSRegExp)) { - return inference.NoChange(); - } + if (!inference.Is(regexp_initial_map)) return inference.NoChange(); MapHandles const& regexp_maps = inference.GetMaps(); ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); diff --git a/chromium/v8/src/compiler/js-context-specialization.cc b/chromium/v8/src/compiler/js-context-specialization.cc index 035e8b7ceb9..409fc6c9a14 100644 --- a/chromium/v8/src/compiler/js-context-specialization.cc +++ b/chromium/v8/src/compiler/js-context-specialization.cc @@ -38,7 +38,7 @@ Reduction JSContextSpecialization::ReduceParameter(Node* node) { // Constant-fold the function parameter {node}. Handle<JSFunction> function; if (closure().ToHandle(&function)) { - Node* value = jsgraph()->HeapConstant(function); + Node* value = jsgraph()->Constant(JSFunctionRef(broker_, function)); return Replace(value); } } diff --git a/chromium/v8/src/compiler/js-create-lowering.cc b/chromium/v8/src/compiler/js-create-lowering.cc index cb52ccaccb1..6ab54d793a8 100644 --- a/chromium/v8/src/compiler/js-create-lowering.cc +++ b/chromium/v8/src/compiler/js-create-lowering.cc @@ -18,6 +18,7 @@ #include "src/compiler/operator-properties.h" #include "src/compiler/simplified-operator.h" #include "src/compiler/state-values-utils.h" +#include "src/execution/protectors.h" #include "src/objects/arguments.h" #include "src/objects/hash-table-inl.h" #include "src/objects/heap-number.h" @@ -26,6 +27,7 @@ #include "src/objects/js-promise.h" #include "src/objects/js-regexp-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/template-objects.h" namespace v8 { namespace internal { @@ -84,6 +86,8 @@ Reduction JSCreateLowering::Reduce(Node* node) { return ReduceJSCreateLiteralArrayOrObject(node); case IrOpcode::kJSCreateLiteralRegExp: return ReduceJSCreateLiteralRegExp(node); + case IrOpcode::kJSGetTemplateObject: + return ReduceJSGetTemplateObject(node); case IrOpcode::kJSCreateEmptyLiteralArray: return ReduceJSCreateEmptyLiteralArray(node); case IrOpcode::kJSCreateEmptyLiteralObject: @@ -640,10 +644,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { allocation = dependencies()->DependOnPretenureMode(*site_ref); dependencies()->DependOnElementsKind(*site_ref); } else { - CellRef array_constructor_protector( + PropertyCellRef array_constructor_protector( broker(), factory()->array_constructor_protector()); - can_inline_call = - array_constructor_protector.value().AsSmi() == Isolate::kProtectorValid; + can_inline_call = array_constructor_protector.value().AsSmi() == + Protectors::kProtectorValid; } if (arity == 0) { @@ -1073,15 +1077,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - - FeedbackVectorRef feedback_vector(broker(), p.feedback().vector); - ObjectRef feedback = feedback_vector.get(p.feedback().slot); - // TODO(turbofan): we should consider creating a ProcessedFeedback for - // allocation sites/boiler plates so that we use GetFeedback here. Then - // we can eventually get rid of the additional copy of feedback slots that - // we currently have in FeedbackVectorData. - if (feedback.IsAllocationSite()) { - AllocationSiteRef site = feedback.AsAllocationSite(); + ProcessedFeedback const& feedback = + broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback()); + if (!feedback.IsInsufficient()) { + AllocationSiteRef site = feedback.AsLiteral().value(); if (site.IsFastLiteral()) { AllocationType allocation = AllocationType::kYoung; if (FLAG_allocation_site_pretenuring) { @@ -1095,20 +1094,17 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) { return Replace(value); } } + return NoChange(); } Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode()); FeedbackParameter const& p = FeedbackParameterOf(node->op()); - FeedbackVectorRef fv(broker(), p.feedback().vector); - ObjectRef feedback = fv.get(p.feedback().slot); - // TODO(turbofan): we should consider creating a ProcessedFeedback for - // allocation sites/boiler plates so that we use GetFeedback here. Then - // we can eventually get rid of the additional copy of feedback slots that - // we currently have in FeedbackVectorData. - if (feedback.IsAllocationSite()) { - AllocationSiteRef site = feedback.AsAllocationSite(); + ProcessedFeedback const& feedback = + broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback()); + if (!feedback.IsInsufficient()) { + AllocationSiteRef site = feedback.AsLiteral().value(); DCHECK(!site.PointsToLiteral()); MapRef initial_map = native_context().GetInitialJSArrayMap(site.GetElementsKind()); @@ -1162,22 +1158,30 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - - FeedbackVectorRef feedback_vector(broker(), p.feedback().vector); - ObjectRef feedback = feedback_vector.get(p.feedback().slot); - // TODO(turbofan): we should consider creating a ProcessedFeedback for - // allocation sites/boiler plates so that we use GetFeedback here. Then - // we can eventually get rid of the additional copy of feedback slots that - // we currently have in FeedbackVectorData. - if (feedback.IsJSRegExp()) { - JSRegExpRef boilerplate = feedback.AsJSRegExp(); - Node* value = effect = AllocateLiteralRegExp(effect, control, boilerplate); + ProcessedFeedback const& feedback = + broker()->GetFeedbackForRegExpLiteral(p.feedback()); + if (!feedback.IsInsufficient()) { + JSRegExpRef literal = feedback.AsRegExpLiteral().value(); + Node* value = effect = AllocateLiteralRegExp(effect, control, literal); ReplaceWithValue(node, value, effect, control); return Replace(value); } return NoChange(); } +Reduction JSCreateLowering::ReduceJSGetTemplateObject(Node* node) { + DCHECK_EQ(IrOpcode::kJSGetTemplateObject, node->opcode()); + GetTemplateObjectParameters const& parameters = + GetTemplateObjectParametersOf(node->op()); + SharedFunctionInfoRef shared(broker(), parameters.shared()); + JSArrayRef template_object = shared.GetTemplateObject( + TemplateObjectDescriptionRef(broker(), parameters.description()), + parameters.feedback()); + Node* value = jsgraph()->Constant(template_object); + ReplaceWithValue(node, value); + return Replace(value); +} + Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode()); const CreateFunctionContextParameters& parameters = @@ -1628,7 +1632,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control, ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone()); inobject_fields.reserve(boilerplate_map.GetInObjectProperties()); int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors(); - for (int i = 0; i < boilerplate_nof; ++i) { + for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) { PropertyDetails const property_details = boilerplate_map.GetPropertyDetails(i); if (property_details.location() != kField) continue; diff --git a/chromium/v8/src/compiler/js-create-lowering.h b/chromium/v8/src/compiler/js-create-lowering.h index 44a3b213b76..2fb28ebfd48 100644 --- a/chromium/v8/src/compiler/js-create-lowering.h +++ b/chromium/v8/src/compiler/js-create-lowering.h @@ -67,6 +67,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final Reduction ReduceJSCreateCatchContext(Node* node); Reduction ReduceJSCreateBlockContext(Node* node); Reduction ReduceJSCreateGeneratorObject(Node* node); + Reduction ReduceJSGetTemplateObject(Node* node); Reduction ReduceNewArray( Node* node, Node* length, MapRef initial_map, ElementsKind elements_kind, AllocationType allocation, diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc index d2a9b675f96..d419a804a57 100644 --- a/chromium/v8/src/compiler/js-generic-lowering.cc +++ b/chromium/v8/src/compiler/js-generic-lowering.cc @@ -236,14 +236,15 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) { } void JSGenericLowering::LowerJSGetIterator(Node* node) { - CallDescriptor::Flags flags = FrameStateFlagForCall(node); - const PropertyAccess& p = PropertyAccessOf(node->op()); - node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index())); - Node* vector = jsgraph()->HeapConstant(p.feedback().vector); - node->InsertInput(zone(), 2, vector); - Callable callable = - Builtins::CallableFor(isolate(), Builtins::kGetIteratorWithFeedback); - ReplaceWithStubCall(node, callable, flags); + // TODO(v8:9625): Currently, the GetIterator operator is desugared in the + // native context specialization phase. Thus, the following generic lowering + // would never be reachable. We can add a check in native context + // specialization to avoid desugaring the GetIterator operator when in the + // case of megamorphic feedback and here, add a call to the + // 'GetIteratorWithFeedback' builtin. This would reduce the size of the + // compiled code as it would insert 1 call to the builtin instead of 2 calls + // resulting from the generic lowering of the LoadNamed and Call operators. + UNREACHABLE(); } void JSGenericLowering::LowerJSStoreProperty(Node* node) { @@ -561,6 +562,10 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) { } } +void JSGenericLowering::LowerJSGetTemplateObject(Node* node) { + UNREACHABLE(); // Eliminated in native context specialization. +} + void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); FeedbackParameter const& p = FeedbackParameterOf(node->op()); diff --git a/chromium/v8/src/compiler/js-heap-broker.cc b/chromium/v8/src/compiler/js-heap-broker.cc index 7466a80f851..9a725eb4e90 100644 --- a/chromium/v8/src/compiler/js-heap-broker.cc +++ b/chromium/v8/src/compiler/js-heap-broker.cc @@ -16,6 +16,7 @@ #include "src/compiler/bytecode-analysis.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/per-isolate-compiler-cache.h" +#include "src/execution/protectors-inl.h" #include "src/init/bootstrapper.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/api-callbacks.h" @@ -86,6 +87,11 @@ class ObjectData : public ZoneObject { ObjectDataKind kind() const { return kind_; } bool is_smi() const { return kind_ == kSmi; } +#ifdef DEBUG + enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed}; + mutable Usage used_status = Usage::kUnused; +#endif // DEBUG + private: Handle<Object> const object_; ObjectDataKind const kind_; @@ -420,7 +426,7 @@ class JSTypedArrayData : public JSObjectData { bool is_on_heap() const { return is_on_heap_; } size_t length() const { return length_; } - void* external_pointer() const { return external_pointer_; } + void* data_ptr() const { return data_ptr_; } void Serialize(JSHeapBroker* broker); bool serialized() const { return serialized_; } @@ -430,7 +436,7 @@ class JSTypedArrayData : public JSObjectData { private: bool const is_on_heap_; size_t const length_; - void* const external_pointer_; + void* const data_ptr_; bool serialized_ = false; HeapObjectData* buffer_ = nullptr; @@ -441,7 +447,7 @@ JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage, : JSObjectData(broker, storage, object), is_on_heap_(object->is_on_heap()), length_(object->length()), - external_pointer_(object->external_pointer()) {} + data_ptr_(object->DataPtr()) {} void JSTypedArrayData::Serialize(JSHeapBroker* broker) { if (serialized_) return; @@ -833,8 +839,7 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth, // Check the in-object properties. Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(), isolate); - int limit = boilerplate->map().NumberOfOwnDescriptors(); - for (int i = 0; i < limit; i++) { + for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) { PropertyDetails details = descriptors->GetDetails(i); if (details.location() != kField) continue; DCHECK_EQ(kData, details.kind()); @@ -962,9 +967,6 @@ class MapData : public HeapObjectData { bool supports_fast_array_resize() const { return supports_fast_array_resize_; } - bool IsMapOfTargetGlobalProxy() const { - return is_map_of_target_global_proxy_; - } bool is_abandoned_prototype_map() const { return is_abandoned_prototype_map_; } @@ -979,9 +981,10 @@ class MapData : public HeapObjectData { // Serialize a single (or all) own slot(s) of the descriptor array and recurse // on field owner(s). - void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index); + void SerializeOwnDescriptor(JSHeapBroker* broker, + InternalIndex descriptor_index); void SerializeOwnDescriptors(JSHeapBroker* broker); - ObjectData* GetStrongValue(int descriptor_index) const; + ObjectData* GetStrongValue(InternalIndex descriptor_index) const; DescriptorArrayData* instance_descriptors() const { return instance_descriptors_; } @@ -1027,7 +1030,6 @@ class MapData : public HeapObjectData { int const unused_property_fields_; bool const supports_fast_array_iteration_; bool const supports_fast_array_resize_; - bool const is_map_of_target_global_proxy_; bool const is_abandoned_prototype_map_; bool serialized_elements_kind_generalizations_ = false; @@ -1109,8 +1111,9 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) { DCHECK(!jsarray_map->is_dictionary_map()); Handle<Name> length_string = isolate->factory()->length_string(); DescriptorArray descriptors = jsarray_map->instance_descriptors(); - int number = descriptors.Search(*length_string, *jsarray_map); - DCHECK_NE(DescriptorArray::kNotFound, number); + // TODO(jkummerow): We could skip the search and hardcode number == 0. + InternalIndex number = descriptors.Search(*length_string, *jsarray_map); + DCHECK(number.is_found()); return descriptors.GetDetails(number).IsReadOnly(); } @@ -1120,7 +1123,7 @@ bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) { map->prototype().IsJSArray() && isolate->IsAnyInitialArrayPrototype( handle(JSArray::cast(map->prototype()), isolate)) && - isolate->IsNoElementsProtectorIntact(); + Protectors::IsNoElementsIntact(isolate); } bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) { @@ -1154,8 +1157,6 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object) SupportsFastArrayIteration(broker->isolate(), object)), supports_fast_array_resize_( SupportsFastArrayResize(broker->isolate(), object)), - is_map_of_target_global_proxy_( - object->IsMapOfGlobalProxy(broker->target_native_context().object())), is_abandoned_prototype_map_(object->is_abandoned_prototype_map()), elements_kind_generalizations_(broker->zone()) {} @@ -1268,7 +1269,6 @@ class FeedbackVectorData : public HeapObjectData { double invocation_count() const { return invocation_count_; } void Serialize(JSHeapBroker* broker); - const ZoneVector<ObjectData*>& feedback() { return feedback_; } FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const; @@ -1276,7 +1276,6 @@ class FeedbackVectorData : public HeapObjectData { double const invocation_count_; bool serialized_ = false; - ZoneVector<ObjectData*> feedback_; ZoneVector<ObjectData*> closure_feedback_cell_array_; }; @@ -1285,7 +1284,6 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker, Handle<FeedbackVector> object) : HeapObjectData(broker, storage, object), invocation_count_(object->invocation_count()), - feedback_(broker->zone()), closure_feedback_cell_array_(broker->zone()) {} FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell( @@ -1309,26 +1307,6 @@ void FeedbackVectorData::Serialize(JSHeapBroker* broker) { TraceScope tracer(broker, this, "FeedbackVectorData::Serialize"); Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object()); - DCHECK(feedback_.empty()); - feedback_.reserve(vector->length()); - for (int i = 0; i < vector->length(); ++i) { - MaybeObject value = vector->get(i); - ObjectData* slot_value = - value->IsObject() ? broker->GetOrCreateData(value->cast<Object>()) - : nullptr; - feedback_.push_back(slot_value); - if (slot_value == nullptr) continue; - - if (slot_value->IsAllocationSite() && - slot_value->AsAllocationSite()->IsFastLiteral()) { - slot_value->AsAllocationSite()->SerializeBoilerplate(broker); - } else if (slot_value->IsJSRegExp()) { - slot_value->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker); - } - } - DCHECK_EQ(vector->length(), feedback_.size()); - TRACE(broker, "Copied " << feedback_.size() << " slots"); - DCHECK(closure_feedback_cell_array_.empty()); int length = vector->closure_feedback_cell_array().length(); closure_feedback_cell_array_.reserve(length); @@ -1496,10 +1474,6 @@ class BytecodeArrayData : public FixedArrayBaseData { return *(Handle<Smi>::cast(constant_pool_[index]->object())); } - bool IsSerializedForCompilation() const { - return is_serialized_for_compilation_; - } - void SerializeForCompilation(JSHeapBroker* broker) { if (is_serialized_for_compilation_) return; @@ -1843,23 +1817,15 @@ class JSGlobalObjectData : public JSObjectData { public: JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<JSGlobalObject> object); -}; - -JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker, - ObjectData** storage, - Handle<JSGlobalObject> object) - : JSObjectData(broker, storage, object) {} - -class JSGlobalProxyData : public JSObjectData { - public: - JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSGlobalProxy> object); + bool IsDetached() const { return is_detached_; } PropertyCellData* GetPropertyCell( JSHeapBroker* broker, NameData* name, SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); private: + bool const is_detached_; + // Properties that either // (1) are known to exist as property cells on the global object, or // (2) are known not to (possibly they don't exist at all). @@ -1867,9 +1833,22 @@ class JSGlobalProxyData : public JSObjectData { ZoneVector<std::pair<NameData*, PropertyCellData*>> properties_; }; +JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker, + ObjectData** storage, + Handle<JSGlobalObject> object) + : JSObjectData(broker, storage, object), + is_detached_(object->IsDetached()), + properties_(broker->zone()) {} + +class JSGlobalProxyData : public JSObjectData { + public: + JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, + Handle<JSGlobalProxy> object); +}; + JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, Handle<JSGlobalProxy> object) - : JSObjectData(broker, storage, object), properties_(broker->zone()) {} + : JSObjectData(broker, storage, object) {} namespace { base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker, @@ -1888,7 +1867,7 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker, } } // namespace -PropertyCellData* JSGlobalProxyData::GetPropertyCell( +PropertyCellData* JSGlobalObjectData::GetPropertyCell( JSHeapBroker* broker, NameData* name, SerializationPolicy policy) { CHECK_NOT_NULL(name); for (auto const& p : properties_) { @@ -1911,6 +1890,13 @@ PropertyCellData* JSGlobalProxyData::GetPropertyCell( return result; } +class TemplateObjectDescriptionData : public HeapObjectData { + public: + TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage, + Handle<TemplateObjectDescription> object) + : HeapObjectData(broker, storage, object) {} +}; + class CodeData : public HeapObjectData { public: CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object) @@ -2001,20 +1987,20 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) { Handle<Map> map = Handle<Map>::cast(object()); int const number_of_own = map->NumberOfOwnDescriptors(); - for (int i = 0; i < number_of_own; ++i) { + for (InternalIndex i : InternalIndex::Range(number_of_own)) { SerializeOwnDescriptor(broker, i); } } -ObjectData* MapData::GetStrongValue(int descriptor_index) const { - auto data = instance_descriptors_->contents().find(descriptor_index); +ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const { + auto data = instance_descriptors_->contents().find(descriptor_index.as_int()); if (data == instance_descriptors_->contents().end()) return nullptr; return data->second.value; } void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, - int descriptor_index) { + InternalIndex descriptor_index) { TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor"); Handle<Map> map = Handle<Map>::cast(object()); @@ -2025,8 +2011,8 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, ZoneMap<int, PropertyDescriptor>& contents = instance_descriptors()->contents(); - CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors()); - if (contents.find(descriptor_index) != contents.end()) return; + CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors()); + if (contents.find(descriptor_index.as_int()) != contents.end()) return; Isolate* const isolate = broker->isolate(); auto descriptors = @@ -2051,14 +2037,14 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index)); d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index); } - contents[descriptor_index] = d; + contents[descriptor_index.as_int()] = d; if (d.details.location() == kField) { // Recurse on the owner map. d.field_owner->SerializeOwnDescriptor(broker, descriptor_index); } - TRACE(broker, "Copied descriptor " << descriptor_index << " into " + TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into " << instance_descriptors_ << " (" << contents.size() << " total)"); } @@ -2146,8 +2132,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, // Check the in-object properties. Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(), isolate); - int const limit = boilerplate->map().NumberOfOwnDescriptors(); - for (int i = 0; i < limit; i++) { + for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) { PropertyDetails details = descriptors->GetDetails(i); if (details.location() != kField) continue; DCHECK_EQ(kData, details.kind()); @@ -2210,6 +2195,12 @@ void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) { } bool ObjectRef::equals(const ObjectRef& other) const { +#ifdef DEBUG + if (broker()->mode() == JSHeapBroker::kSerialized && + data_->used_status == ObjectData::Usage::kUnused) { + data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; + } +#endif // DEBUG return data_ == other.data_; } @@ -2269,7 +2260,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, TRACE(this, "Constructing heap broker"); } -std::ostream& JSHeapBroker::Trace() { +std::ostream& JSHeapBroker::Trace() const { return trace_out_ << "[" << this << "] " << std::string(trace_indentation_ * 2, ' '); } @@ -2280,10 +2271,92 @@ void JSHeapBroker::StopSerializing() { mode_ = kSerialized; } +#ifdef DEBUG +void JSHeapBroker::PrintRefsAnalysis() const { + // Usage counts + size_t used_total = 0, unused_total = 0, identity_used_total = 0; + for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr; + ref = refs_->Next(ref)) { + switch (ref->value->used_status) { + case ObjectData::Usage::kUnused: + ++unused_total; + break; + case ObjectData::Usage::kOnlyIdentityUsed: + ++identity_used_total; + break; + case ObjectData::Usage::kDataUsed: + ++used_total; + break; + } + } + + // Ref types analysis + TRACE_BROKER_MEMORY( + this, "Refs: " << refs_->occupancy() << "; data used: " << used_total + << "; only identity used: " << identity_used_total + << "; unused: " << unused_total); + size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0; + size_t used[LAST_TYPE + 1] = {0}; + size_t unused[LAST_TYPE + 1] = {0}; + size_t identity_used[LAST_TYPE + 1] = {0}; + for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr; + ref = refs_->Next(ref)) { + if (ref->value->is_smi()) { + switch (ref->value->used_status) { + case ObjectData::Usage::kUnused: + ++unused_smis; + break; + case ObjectData::Usage::kOnlyIdentityUsed: + ++identity_used_smis; + break; + case ObjectData::Usage::kDataUsed: + ++used_smis; + break; + } + } else { + InstanceType instance_type = + static_cast<const HeapObjectData*>(ref->value) + ->map() + ->instance_type(); + CHECK_LE(FIRST_TYPE, instance_type); + CHECK_LE(instance_type, LAST_TYPE); + switch (ref->value->used_status) { + case ObjectData::Usage::kUnused: + ++unused[instance_type]; + break; + case ObjectData::Usage::kOnlyIdentityUsed: + ++identity_used[instance_type]; + break; + case ObjectData::Usage::kDataUsed: + ++used[instance_type]; + break; + } + } + } + + TRACE_BROKER_MEMORY( + this, "Smis: " << used_smis + identity_used_smis + unused_smis + << "; data used: " << used_smis << "; only identity used: " + << identity_used_smis << "; unused: " << unused_smis); + for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) { + size_t total = used[i] + identity_used[i] + unused[i]; + if (total == 0) continue; + TRACE_BROKER_MEMORY( + this, InstanceType(i) << ": " << total << "; data used: " << used[i] + << "; only identity used: " << identity_used[i] + << "; unused: " << unused[i]); + } +} +#endif // DEBUG + void JSHeapBroker::Retire() { CHECK_EQ(mode_, kSerialized); TRACE(this, "Retiring"); mode_ = kRetired; + +#ifdef DEBUG + PrintRefsAnalysis(); +#endif // DEBUG } bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; } @@ -2473,6 +2546,7 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->empty_fixed_array()); GetOrCreateData(f->empty_string()); GetOrCreateData(f->eval_context_map()); + GetOrCreateData(f->exec_string()); GetOrCreateData(f->false_string()); GetOrCreateData(f->false_value()); GetOrCreateData(f->fixed_array_map()); @@ -2480,11 +2554,13 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->fixed_double_array_map()); GetOrCreateData(f->function_context_map()); GetOrCreateData(f->function_string()); + GetOrCreateData(f->has_instance_symbol()); GetOrCreateData(f->heap_number_map()); GetOrCreateData(f->length_string()); GetOrCreateData(f->many_closures_cell_map()); GetOrCreateData(f->minus_zero_value()); GetOrCreateData(f->name_dictionary_map()); + GetOrCreateData(f->name_string()); GetOrCreateData(f->NaN_string()); GetOrCreateData(f->null_map()); GetOrCreateData(f->null_string()); @@ -2495,6 +2571,7 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->optimized_out()); GetOrCreateData(f->optimized_out_map()); GetOrCreateData(f->property_array_map()); + GetOrCreateData(f->prototype_string()); GetOrCreateData(f->ReflectHas_string()); GetOrCreateData(f->ReflectGet_string()); GetOrCreateData(f->sloppy_arguments_elements_map()); @@ -2505,6 +2582,7 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->termination_exception_map()); GetOrCreateData(f->the_hole_map()); GetOrCreateData(f->the_hole_value()); + GetOrCreateData(f->then_string()); GetOrCreateData(f->true_string()); GetOrCreateData(f->true_value()); GetOrCreateData(f->undefined_map()); @@ -2517,7 +2595,9 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->array_buffer_detaching_protector()) ->AsPropertyCell() ->Serialize(this); - GetOrCreateData(f->array_constructor_protector())->AsCell()->Serialize(this); + GetOrCreateData(f->array_constructor_protector()) + ->AsPropertyCell() + ->Serialize(this); GetOrCreateData(f->array_iterator_protector()) ->AsPropertyCell() ->Serialize(this); @@ -2537,7 +2617,9 @@ void JSHeapBroker::InitializeAndStartSerializing( GetOrCreateData(f->promise_then_protector()) ->AsPropertyCell() ->Serialize(this); - GetOrCreateData(f->string_length_protector())->AsCell()->Serialize(this); + GetOrCreateData(f->string_length_protector()) + ->AsPropertyCell() + ->Serialize(this); // - CEntry stub GetOrCreateData( CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true)); @@ -2719,16 +2801,6 @@ bool MapRef::supports_fast_array_resize() const { return data()->AsMap()->supports_fast_array_resize(); } -bool MapRef::IsMapOfTargetGlobalProxy() const { - if (broker()->mode() == JSHeapBroker::kDisabled) { - AllowHandleDereference allow_handle_dereference; - AllowHandleAllocation handle_allocation; - return object()->IsMapOfGlobalProxy( - broker()->target_native_context().object()); - } - return data()->AsMap()->IsMapOfTargetGlobalProxy(); -} - int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; @@ -2785,18 +2857,6 @@ OddballType MapRef::oddball_type() const { return OddballType::kOther; } -ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const { - if (broker()->mode() == JSHeapBroker::kDisabled) { - AllowHandleAllocation handle_allocation; - AllowHandleDereference handle_dereference; - Handle<Object> value(object()->Get(slot)->cast<Object>(), - broker()->isolate()); - return ObjectRef(broker(), value); - } - int i = FeedbackVector::GetIndex(slot); - return ObjectRef(broker(), data()->AsFeedbackVector()->feedback().at(i)); -} - FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; @@ -2854,6 +2914,11 @@ bool AllocationSiteRef::IsFastLiteral() const { return data()->AsAllocationSite()->IsFastLiteral(); } +void AllocationSiteRef::SerializeBoilerplate() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + data()->AsAllocationSite()->SerializeBoilerplate(broker()); +} + void JSObjectRef::SerializeElements() { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); data()->AsJSObject()->SerializeElements(broker()); @@ -2880,13 +2945,13 @@ void JSObjectRef::EnsureElementsTenured() { CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured()); } -FieldIndex MapRef::GetFieldIndexFor(int descriptor_index) const { +FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; return FieldIndex::ForDescriptor(*object(), descriptor_index); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return descriptors->contents().at(descriptor_index).field_index; + return descriptors->contents().at(descriptor_index.as_int()).field_index; } int MapRef::GetInObjectPropertyOffset(int i) const { @@ -2897,16 +2962,17 @@ int MapRef::GetInObjectPropertyOffset(int i) const { return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize; } -PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const { +PropertyDetails MapRef::GetPropertyDetails( + InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; return object()->instance_descriptors().GetDetails(descriptor_index); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return descriptors->contents().at(descriptor_index).details; + return descriptors->contents().at(descriptor_index.as_int()).details; } -NameRef MapRef::GetPropertyKey(int descriptor_index) const { +NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; @@ -2916,7 +2982,8 @@ NameRef MapRef::GetPropertyKey(int descriptor_index) const { broker()->isolate())); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return NameRef(broker(), descriptors->contents().at(descriptor_index).key); + return NameRef(broker(), + descriptors->contents().at(descriptor_index.as_int()).key); } bool MapRef::IsFixedCowArrayMap() const { @@ -2926,10 +2993,10 @@ bool MapRef::IsFixedCowArrayMap() const { } bool MapRef::IsPrimitiveMap() const { - return instance_type() <= LAST_PRIMITIVE_TYPE; + return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE; } -MapRef MapRef::FindFieldOwner(int descriptor_index) const { +MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; @@ -2939,11 +3006,12 @@ MapRef MapRef::FindFieldOwner(int descriptor_index) const { return MapRef(broker(), owner); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return MapRef(broker(), - descriptors->contents().at(descriptor_index).field_owner); + return MapRef( + broker(), + descriptors->contents().at(descriptor_index.as_int()).field_owner); } -ObjectRef MapRef::GetFieldType(int descriptor_index) const { +ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; @@ -2953,18 +3021,21 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const { return ObjectRef(broker(), field_type); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return ObjectRef(broker(), - descriptors->contents().at(descriptor_index).field_type); + return ObjectRef( + broker(), + descriptors->contents().at(descriptor_index.as_int()).field_type); } -bool MapRef::IsUnboxedDoubleField(int descriptor_index) const { +bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; return object()->IsUnboxedDoubleField( FieldIndex::ForDescriptor(*object(), descriptor_index)); } DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors(); - return descriptors->contents().at(descriptor_index).is_unboxed_double_field; + return descriptors->contents() + .at(descriptor_index.as_int()) + .is_unboxed_double_field; } uint16_t StringRef::GetFirstChar() { @@ -3074,11 +3145,6 @@ Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const { return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index); } -bool BytecodeArrayRef::IsSerializedForCompilation() const { - if (broker()->mode() == JSHeapBroker::kDisabled) return true; - return data()->AsBytecodeArray()->IsSerializedForCompilation(); -} - void BytecodeArrayRef::SerializeForCompilation() { if (broker()->mode() == JSHeapBroker::kDisabled) return; data()->AsBytecodeArray()->SerializeForCompilation(broker()); @@ -3191,6 +3257,8 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype) BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared) BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector) +BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached) + BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap) BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length) BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer) @@ -3345,7 +3413,7 @@ BIMODAL_ACCESSOR_C(String, int, length) BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value) -ObjectRef MapRef::GetStrongValue(int descriptor_index) const { +ObjectRef MapRef::GetStrongValue(InternalIndex descriptor_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; return ObjectRef(broker(), @@ -3376,12 +3444,12 @@ base::Optional<MapRef> MapRef::FindRootMap() const { return base::nullopt; } -void* JSTypedArrayRef::external_pointer() const { +void* JSTypedArrayRef::data_ptr() const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleDereference allow_handle_dereference; - return object()->external_pointer(); + return object()->DataPtr(); } - return data()->AsJSTypedArray()->external_pointer(); + return data()->AsJSTypedArray()->data_ptr(); } bool MapRef::IsInobjectSlackTrackingInProgress() const { @@ -3774,12 +3842,37 @@ ObjectRef JSRegExpRef::source() const { return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source()); } -Handle<Object> ObjectRef::object() const { return data_->object(); } +void JSRegExpRef::SerializeAsRegExpBoilerplate() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker()); +} +Handle<Object> ObjectRef::object() const { +#ifdef DEBUG + if (broker()->mode() == JSHeapBroker::kSerialized && + data_->used_status == ObjectData::Usage::kUnused) { + data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; + } +#endif // DEBUG + return data_->object(); +} + +#ifdef DEBUG #define DEF_OBJECT_GETTER(T) \ Handle<T> T##Ref::object() const { \ + if (broker()->mode() == JSHeapBroker::kSerialized && \ + data_->used_status == ObjectData::Usage::kUnused) { \ + data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \ + } \ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \ } +#else +#define DEF_OBJECT_GETTER(T) \ + Handle<T> T##Ref::object() const { \ + return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \ + } +#endif // DEBUG + HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER) #undef DEF_OBJECT_GETTER @@ -3791,7 +3884,12 @@ ObjectData* ObjectRef::data() const { CHECK_NE(data_->kind(), kSerializedHeapObject); return data_; case JSHeapBroker::kSerializing: + CHECK_NE(data_->kind(), kUnserializedHeapObject); + return data_; case JSHeapBroker::kSerialized: +#ifdef DEBUG + data_->used_status = ObjectData::Usage::kDataUsed; +#endif // DEBUG CHECK_NE(data_->kind(), kUnserializedHeapObject); return data_; case JSHeapBroker::kRetired: @@ -3857,60 +3955,50 @@ bool JSFunctionRef::serialized() const { return data()->AsJSFunction()->serialized(); } -bool JSFunctionRef::IsSerializedForCompilation() const { - if (broker()->mode() == JSHeapBroker::kDisabled) { - return handle(object()->shared(), broker()->isolate())->HasBytecodeArray(); - } - - // We get a crash if we try to access the shared() getter without - // checking for `serialized` first. Also it's possible to have a - // JSFunctionRef without a feedback vector. - return serialized() && has_feedback_vector() && - shared().IsSerializedForCompilation(feedback_vector()); -} - JSArrayRef SharedFunctionInfoRef::GetTemplateObject( - ObjectRef description, FeedbackVectorRef vector, FeedbackSlot slot, + TemplateObjectDescriptionRef description, FeedbackSource const& source, SerializationPolicy policy) { - // Look in the feedback vector for the array. A Smi indicates that it's - // not yet cached here. - ObjectRef candidate = vector.get(slot); - if (!candidate.IsSmi()) { - return candidate.AsJSArray(); + // First, see if we have processed feedback from the vector, respecting + // the serialization policy. + ProcessedFeedback const& feedback = + policy == SerializationPolicy::kSerializeIfNeeded + ? broker()->ProcessFeedbackForTemplateObject(source) + : broker()->GetFeedbackForTemplateObject(source); + + if (!feedback.IsInsufficient()) { + return feedback.AsTemplateObject().value(); } if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; - Handle<TemplateObjectDescription> tod = - Handle<TemplateObjectDescription>::cast(description.object()); Handle<JSArray> template_object = TemplateObjectDescription::GetTemplateObject( - broker()->isolate(), broker()->target_native_context().object(), - tod, object(), slot.ToInt()); + isolate(), broker()->target_native_context().object(), + description.object(), object(), source.slot.ToInt()); return JSArrayRef(broker(), template_object); } - JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot); + JSArrayData* array = + data()->AsSharedFunctionInfo()->GetTemplateObject(source.slot); if (array != nullptr) return JSArrayRef(broker(), array); CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded); CHECK(broker()->SerializingAllowed()); - Handle<TemplateObjectDescription> tod = - Handle<TemplateObjectDescription>::cast(description.object()); Handle<JSArray> template_object = TemplateObjectDescription::GetTemplateObject( - broker()->isolate(), broker()->target_native_context().object(), tod, - object(), slot.ToInt()); + broker()->isolate(), broker()->target_native_context().object(), + description.object(), object(), source.slot.ToInt()); array = broker()->GetOrCreateData(template_object)->AsJSArray(); - data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array); + data()->AsSharedFunctionInfo()->SetTemplateObject(source.slot, array); return JSArrayRef(broker(), array); } void SharedFunctionInfoRef::SetSerializedForCompilation( FeedbackVectorRef feedback) { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + CHECK(HasBytecodeArray()); data()->AsSharedFunctionInfo()->SetSerializedForCompilation(broker(), feedback); } @@ -3937,7 +4025,7 @@ SharedFunctionInfoRef::function_template_info() const { bool SharedFunctionInfoRef::IsSerializedForCompilation( FeedbackVectorRef feedback) const { - if (broker()->mode() == JSHeapBroker::kDisabled) return true; + if (broker()->mode() == JSHeapBroker::kDisabled) return HasBytecodeArray(); return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback); } @@ -3953,19 +4041,19 @@ void MapRef::SerializeOwnDescriptors() { data()->AsMap()->SerializeOwnDescriptors(broker()); } -void MapRef::SerializeOwnDescriptor(int descriptor_index) { +void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index); } -bool MapRef::serialized_own_descriptor(int descriptor_index) const { - CHECK_LT(descriptor_index, NumberOfOwnDescriptors()); +bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const { + CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); if (broker()->mode() == JSHeapBroker::kDisabled) return true; DescriptorArrayData* desc_array_data = data()->AsMap()->instance_descriptors(); if (!desc_array_data) return false; - return desc_array_data->contents().find(descriptor_index) != + return desc_array_data->contents().find(descriptor_index.as_int()) != desc_array_data->contents().end(); } @@ -4027,14 +4115,14 @@ void FunctionTemplateInfoRef::SerializeCallCode() { data()->AsFunctionTemplateInfo()->SerializeCallCode(broker()); } -base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell( +base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell( NameRef const& name, SerializationPolicy policy) const { if (broker()->mode() == JSHeapBroker::kDisabled) { return GetPropertyCellFromHeap(broker(), name.object()); } PropertyCellData* property_cell_data = - data()->AsJSGlobalProxy()->GetPropertyCell(broker(), - name.data()->AsName(), policy); + data()->AsJSGlobalObject()->GetPropertyCell( + broker(), name.data()->AsName(), policy); if (property_cell_data == nullptr) return base::nullopt; return PropertyCellRef(broker(), property_cell_data); } @@ -4264,6 +4352,7 @@ void JSHeapBroker::SetFeedback(FeedbackSource const& source, } bool JSHeapBroker::HasFeedback(FeedbackSource const& source) const { + DCHECK(source.IsValid()); return feedback_.find(source) != feedback_.end(); } @@ -4314,7 +4403,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( MapHandles maps; nexus.ExtractMaps(&maps); - DCHECK_NE(nexus.ic_state(), PREMONOMORPHIC); if (!maps.empty()) { maps = GetRelevantReceiverMaps(isolate(), maps); if (maps.empty()) return *new (zone()) InsufficientFeedback(kind); @@ -4423,6 +4511,47 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf( return *new (zone()) InstanceOfFeedback(optional_constructor, nexus.kind()); } +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source) { + FeedbackNexus nexus(source.vector, source.slot); + HeapObject object; + if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) { + return *new (zone()) InsufficientFeedback(nexus.kind()); + } + + AllocationSiteRef site(this, handle(object, isolate())); + if (site.IsFastLiteral()) { + site.SerializeBoilerplate(); + } + + return *new (zone()) LiteralFeedback(site, nexus.kind()); +} + +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral( + FeedbackSource const& source) { + FeedbackNexus nexus(source.vector, source.slot); + HeapObject object; + if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) { + return *new (zone()) InsufficientFeedback(nexus.kind()); + } + + JSRegExpRef regexp(this, handle(object, isolate())); + regexp.SerializeAsRegExpBoilerplate(); + return *new (zone()) RegExpLiteralFeedback(regexp, nexus.kind()); +} + +ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject( + FeedbackSource const& source) { + FeedbackNexus nexus(source.vector, source.slot); + HeapObject object; + if (nexus.IsUninitialized() || !nexus.GetFeedback()->GetHeapObject(&object)) { + return *new (zone()) InsufficientFeedback(nexus.kind()); + } + + JSArrayRef array(this, handle(object, isolate())); + return *new (zone()) TemplateObjectFeedback(array, nexus.kind()); +} + ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot); @@ -4494,6 +4623,50 @@ ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( : ProcessFeedbackForGlobalAccess(source); } +ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source) { + return FLAG_concurrent_inlining + ? GetFeedback(source) + : ProcessFeedbackForArrayOrObjectLiteral(source); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral( + FeedbackSource const& source) { + return FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForRegExpLiteral(source); +} + +ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject( + FeedbackSource const& source) { + return FLAG_concurrent_inlining ? GetFeedback(source) + : ProcessFeedbackForTemplateObject(source); +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = + ReadFeedbackForArrayOrObjectLiteral(source); + SetFeedback(source, &feedback); + return feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source); + SetFeedback(source, &feedback); + return feedback; +} + +ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject( + FeedbackSource const& source) { + if (HasFeedback(source)) return GetFeedback(source); + ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source); + SetFeedback(source, &feedback); + return feedback; +} + ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForBinaryOperation( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); @@ -4649,9 +4822,10 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) { } std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) { - if (ref.broker()->mode() == JSHeapBroker::kDisabled) { - // If the broker is disabled we cannot be in a background thread so it's - // safe to read the heap. + if (ref.broker()->mode() == JSHeapBroker::kDisabled || + !FLAG_concurrent_recompilation) { + // We cannot be in a background thread so it's safe to read the heap. + AllowHandleDereference allow_handle_dereference; return os << ref.data() << " {" << ref.object() << "}"; } else { return os << ref.data(); @@ -4733,6 +4907,21 @@ NamedAccessFeedback const& ProcessedFeedback::AsNamedAccess() const { return *static_cast<NamedAccessFeedback const*>(this); } +LiteralFeedback const& ProcessedFeedback::AsLiteral() const { + CHECK_EQ(kLiteral, kind()); + return *static_cast<LiteralFeedback const*>(this); +} + +RegExpLiteralFeedback const& ProcessedFeedback::AsRegExpLiteral() const { + CHECK_EQ(kRegExpLiteral, kind()); + return *static_cast<RegExpLiteralFeedback const*>(this); +} + +TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const { + CHECK_EQ(kTemplateObject, kind()); + return *static_cast<TemplateObjectFeedback const*>(this); +} + BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis( Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id, bool analyze_liveness, SerializationPolicy policy) { diff --git a/chromium/v8/src/compiler/js-heap-broker.h b/chromium/v8/src/compiler/js-heap-broker.h index 8c2622bf488..c9667a2fedf 100644 --- a/chromium/v8/src/compiler/js-heap-broker.h +++ b/chromium/v8/src/compiler/js-heap-broker.h @@ -34,6 +34,12 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); broker->Trace() << x << '\n'; \ } while (false) +#define TRACE_BROKER_MEMORY(broker, x) \ + do { \ + if (broker->tracing_enabled() && FLAG_trace_heap_broker_memory) \ + broker->Trace() << x << std::endl; \ + } while (false) + #define TRACE_BROKER_MISSING(broker, x) \ do { \ if (broker->tracing_enabled()) \ @@ -86,6 +92,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker { void Retire(); bool SerializingAllowed() const; +#ifdef DEBUG + void PrintRefsAnalysis() const; +#endif // DEBUG + // Returns nullptr iff handle unknown. ObjectData* GetData(Handle<Object>) const; // Never returns nullptr. @@ -125,6 +135,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker { FeedbackSource const& source); ProcessedFeedback const& GetFeedbackForInstanceOf( FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForRegExpLiteral( + FeedbackSource const& source); + ProcessedFeedback const& GetFeedbackForTemplateObject( + FeedbackSource const& source); ProcessedFeedback const& GetFeedbackForPropertyAccess( FeedbackSource const& source, AccessMode mode, base::Optional<NameRef> static_name); @@ -143,6 +159,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ProcessedFeedback const& ProcessFeedbackForPropertyAccess( FeedbackSource const& source, AccessMode mode, base::Optional<NameRef> static_name); + ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForRegExpLiteral( + FeedbackSource const& source); + ProcessedFeedback const& ProcessFeedbackForTemplateObject( + FeedbackSource const& source); bool FeedbackIsInsufficient(FeedbackSource const& source) const; @@ -157,7 +179,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { StringRef GetTypedArrayStringTag(ElementsKind kind); - std::ostream& Trace(); + std::ostream& Trace() const; void IncrementTracingIndentation(); void DecrementTracingIndentation(); @@ -182,6 +204,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ProcessedFeedback const& ReadFeedbackForPropertyAccess( FeedbackSource const& source, AccessMode mode, base::Optional<NameRef> static_name); + ProcessedFeedback const& ReadFeedbackForArrayOrObjectLiteral( + FeedbackSource const& source); + ProcessedFeedback const& ReadFeedbackForRegExpLiteral( + FeedbackSource const& source); + ProcessedFeedback const& ReadFeedbackForTemplateObject( + FeedbackSource const& source); void InitializeRefsMap(); void CollectArrayAndObjectPrototypes(); @@ -199,7 +227,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { array_and_object_prototypes_; BrokerMode mode_ = kDisabled; bool const tracing_enabled_; - StdoutStream trace_out_; + mutable StdoutStream trace_out_; unsigned trace_indentation_ = 0; PerIsolateCompilerCache* compiler_cache_ = nullptr; ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*, diff --git a/chromium/v8/src/compiler/js-heap-copy-reducer.cc b/chromium/v8/src/compiler/js-heap-copy-reducer.cc index bf4b79bf92c..13bd6a12828 100644 --- a/chromium/v8/src/compiler/js-heap-copy-reducer.cc +++ b/chromium/v8/src/compiler/js-heap-copy-reducer.cc @@ -12,6 +12,7 @@ #include "src/heap/factory-inl.h" #include "src/objects/map.h" #include "src/objects/scope-info.h" +#include "src/objects/template-objects.h" namespace v8 { namespace internal { @@ -27,172 +28,145 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; } Reduction JSHeapCopyReducer::Reduce(Node* node) { switch (node->opcode()) { case IrOpcode::kHeapConstant: { - if (!FLAG_concurrent_inlining) { - ObjectRef object(broker(), HeapConstantOf(node->op())); - if (object.IsJSFunction()) object.AsJSFunction().Serialize(); - if (object.IsJSObject()) { - object.AsJSObject().SerializeObjectCreateMap(); - } - if (object.IsSourceTextModule()) { - object.AsSourceTextModule().Serialize(); - } + ObjectRef object(broker(), HeapConstantOf(node->op())); + if (object.IsJSFunction()) object.AsJSFunction().Serialize(); + if (object.IsJSObject()) { + object.AsJSObject().SerializeObjectCreateMap(); + } + if (object.IsSourceTextModule()) { + object.AsSourceTextModule().Serialize(); } break; } case IrOpcode::kJSCreateArray: { - if (!FLAG_concurrent_inlining) { - CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); - Handle<AllocationSite> site; - if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site); - } + CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); + Handle<AllocationSite> site; + if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site); break; } case IrOpcode::kJSCreateArguments: { - if (!FLAG_concurrent_inlining) { - Node* const frame_state = NodeProperties::GetFrameStateInput(node); - FrameStateInfo state_info = FrameStateInfoOf(frame_state->op()); - SharedFunctionInfoRef shared( - broker(), state_info.shared_info().ToHandleChecked()); - } + Node* const frame_state = NodeProperties::GetFrameStateInput(node); + FrameStateInfo state_info = FrameStateInfoOf(frame_state->op()); + SharedFunctionInfoRef shared(broker(), + state_info.shared_info().ToHandleChecked()); break; } case IrOpcode::kJSCreateBlockContext: { - if (!FLAG_concurrent_inlining) { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); - } + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); break; } case IrOpcode::kJSCreateBoundFunction: { - if (!FLAG_concurrent_inlining) { - CreateBoundFunctionParameters const& p = - CreateBoundFunctionParametersOf(node->op()); - MapRef(broker(), p.map()); - } + CreateBoundFunctionParameters const& p = + CreateBoundFunctionParametersOf(node->op()); + MapRef(broker(), p.map()); break; } case IrOpcode::kJSCreateCatchContext: { - if (!FLAG_concurrent_inlining) { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); - } + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); break; } case IrOpcode::kJSCreateClosure: { - if (!FLAG_concurrent_inlining) { - CreateClosureParameters const& p = - CreateClosureParametersOf(node->op()); - SharedFunctionInfoRef(broker(), p.shared_info()); - FeedbackCellRef(broker(), p.feedback_cell()); - HeapObjectRef(broker(), p.code()); - } + CreateClosureParameters const& p = CreateClosureParametersOf(node->op()); + SharedFunctionInfoRef(broker(), p.shared_info()); + FeedbackCellRef(broker(), p.feedback_cell()); + HeapObjectRef(broker(), p.code()); break; } case IrOpcode::kJSCreateEmptyLiteralArray: { - if (!FLAG_concurrent_inlining) { - FeedbackParameter const& p = FeedbackParameterOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + FeedbackParameter const& p = FeedbackParameterOf(node->op()); + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback()); } break; } case IrOpcode::kJSCreateFunctionContext: { - if (!FLAG_concurrent_inlining) { - CreateFunctionContextParameters const& p = - CreateFunctionContextParametersOf(node->op()); - ScopeInfoRef(broker(), p.scope_info()); - } + CreateFunctionContextParameters const& p = + CreateFunctionContextParametersOf(node->op()); + ScopeInfoRef(broker(), p.scope_info()); break; } case IrOpcode::kJSCreateLiteralArray: case IrOpcode::kJSCreateLiteralObject: { - if (!FLAG_concurrent_inlining) { - CreateLiteralParameters const& p = - CreateLiteralParametersOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback()); } break; } case IrOpcode::kJSCreateLiteralRegExp: { - if (!FLAG_concurrent_inlining) { - CreateLiteralParameters const& p = - CreateLiteralParametersOf(node->op()); - FeedbackVectorRef(broker(), p.feedback().vector).Serialize(); + CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForRegExpLiteral(p.feedback()); } break; } + case IrOpcode::kJSGetTemplateObject: { + GetTemplateObjectParameters const& p = + GetTemplateObjectParametersOf(node->op()); + SharedFunctionInfoRef shared(broker(), p.shared()); + TemplateObjectDescriptionRef description(broker(), p.description()); + shared.GetTemplateObject(description, p.feedback(), + SerializationPolicy::kSerializeIfNeeded); + break; + } case IrOpcode::kJSCreateWithContext: { - if (!FLAG_concurrent_inlining) { - ScopeInfoRef(broker(), ScopeInfoOf(node->op())); - } + ScopeInfoRef(broker(), ScopeInfoOf(node->op())); break; } case IrOpcode::kJSLoadNamed: { - if (!FLAG_concurrent_inlining) { - NamedAccess const& p = NamedAccessOf(node->op()); - NameRef name(broker(), p.name()); - if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForPropertyAccess(p.feedback(), - AccessMode::kLoad, name); - } + NamedAccess const& p = NamedAccessOf(node->op()); + NameRef name(broker(), p.name()); + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForPropertyAccess(p.feedback(), + AccessMode::kLoad, name); } break; } case IrOpcode::kJSStoreNamed: { - if (!FLAG_concurrent_inlining) { - NamedAccess const& p = NamedAccessOf(node->op()); - NameRef name(broker(), p.name()); - } + NamedAccess const& p = NamedAccessOf(node->op()); + NameRef name(broker(), p.name()); break; } case IrOpcode::kStoreField: case IrOpcode::kLoadField: { - if (!FLAG_concurrent_inlining) { - FieldAccess access = FieldAccessOf(node->op()); - Handle<Map> map_handle; - if (access.map.ToHandle(&map_handle)) { - MapRef(broker(), map_handle); - } - Handle<Name> name_handle; - if (access.name.ToHandle(&name_handle)) { - NameRef(broker(), name_handle); - } + FieldAccess access = FieldAccessOf(node->op()); + Handle<Map> map_handle; + if (access.map.ToHandle(&map_handle)) { + MapRef(broker(), map_handle); + } + Handle<Name> name_handle; + if (access.name.ToHandle(&name_handle)) { + NameRef(broker(), name_handle); } break; } case IrOpcode::kMapGuard: { - if (!FLAG_concurrent_inlining) { - ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op()); - for (Handle<Map> map : maps) { - MapRef(broker(), map); - } + ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op()); + for (Handle<Map> map : maps) { + MapRef(broker(), map); } break; } case IrOpcode::kCheckMaps: { - if (!FLAG_concurrent_inlining) { - ZoneHandleSet<Map> const& maps = - CheckMapsParametersOf(node->op()).maps(); - for (Handle<Map> map : maps) { - MapRef(broker(), map); - } + ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps(); + for (Handle<Map> map : maps) { + MapRef(broker(), map); } break; } case IrOpcode::kCompareMaps: { - if (!FLAG_concurrent_inlining) { - ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()); - for (Handle<Map> map : maps) { - MapRef(broker(), map); - } + ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op()); + for (Handle<Map> map : maps) { + MapRef(broker(), map); } break; } case IrOpcode::kJSLoadProperty: { - if (!FLAG_concurrent_inlining) { - PropertyAccess const& p = PropertyAccessOf(node->op()); - AccessMode access_mode = AccessMode::kLoad; - if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode, - base::nullopt); - } + PropertyAccess const& p = PropertyAccessOf(node->op()); + AccessMode access_mode = AccessMode::kLoad; + if (p.feedback().IsValid()) { + broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode, + base::nullopt); } break; } diff --git a/chromium/v8/src/compiler/js-inlining-heuristic.cc b/chromium/v8/src/compiler/js-inlining-heuristic.cc index ae271b3af9e..cc3f321d6b2 100644 --- a/chromium/v8/src/compiler/js-inlining-heuristic.cc +++ b/chromium/v8/src/compiler/js-inlining-heuristic.cc @@ -22,9 +22,35 @@ namespace compiler { } while (false) namespace { -bool IsSmall(BytecodeArrayRef bytecode) { +bool IsSmall(BytecodeArrayRef const& bytecode) { return bytecode.length() <= FLAG_max_inlined_bytecode_size_small; } + +bool CanConsiderForInlining(JSHeapBroker* broker, + SharedFunctionInfoRef const& shared, + FeedbackVectorRef const& feedback_vector) { + if (!shared.IsInlineable()) return false; + DCHECK(shared.HasBytecodeArray()); + if (!shared.IsSerializedForCompilation(feedback_vector)) { + TRACE_BROKER_MISSING( + broker, "data for " << shared << " (not serialized for compilation)"); + return false; + } + return true; +} + +bool CanConsiderForInlining(JSHeapBroker* broker, + JSFunctionRef const& function) { + if (!function.has_feedback_vector()) return false; + if (!function.serialized()) { + TRACE_BROKER_MISSING( + broker, "data for " << function << " (cannot consider for inlining)"); + return false; + } + return CanConsiderForInlining(broker, function.shared(), + function.feedback_vector()); +} + } // namespace JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( @@ -38,11 +64,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( if (m.HasValue() && m.Ref(broker()).IsJSFunction()) { out.functions[0] = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = out.functions[0].value(); - if (function.IsSerializedForCompilation()) { + if (CanConsiderForInlining(broker(), function)) { out.bytecode[0] = function.shared().GetBytecodeArray(); + out.num_functions = 1; + return out; } - out.num_functions = 1; - return out; } if (m.IsPhi()) { int const value_input_count = m.node()->op()->ValueInputCount(); @@ -59,7 +85,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( out.functions[n] = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = out.functions[n].value(); - if (function.IsSerializedForCompilation()) { + if (CanConsiderForInlining(broker(), function)) { out.bytecode[n] = function.shared().GetBytecodeArray(); } } @@ -67,11 +93,14 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( return out; } if (m.IsJSCreateClosure()) { - CreateClosureParameters const& p = CreateClosureParametersOf(m.op()); DCHECK(!out.functions[0].has_value()); - out.shared_info = SharedFunctionInfoRef(broker(), p.shared_info()); - SharedFunctionInfoRef shared_info = out.shared_info.value(); - if (shared_info.HasBytecodeArray()) { + CreateClosureParameters const& p = CreateClosureParametersOf(m.op()); + FeedbackCellRef feedback_cell(broker(), p.feedback_cell()); + SharedFunctionInfoRef shared_info(broker(), p.shared_info()); + out.shared_info = shared_info; + if (feedback_cell.value().IsFeedbackVector() && + CanConsiderForInlining(broker(), shared_info, + feedback_cell.value().AsFeedbackVector())) { out.bytecode[0] = shared_info.GetBytecodeArray(); } out.num_functions = 1; @@ -135,7 +164,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { SharedFunctionInfoRef shared = candidate.functions[i].has_value() ? candidate.functions[i].value().shared() : candidate.shared_info.value(); - candidate.can_inline_function[i] = shared.IsInlineable(); + candidate.can_inline_function[i] = candidate.bytecode[i].has_value(); + CHECK_IMPLIES(candidate.can_inline_function[i], shared.IsInlineable()); // Do not allow direct recursion i.e. f() -> f(). We still allow indirect // recurion like f() -> g() -> f(). The indirect recursion is helpful in // cases where f() is a small dispatch function that calls the appropriate @@ -151,14 +181,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { node->id(), node->op()->mnemonic()); candidate.can_inline_function[i] = false; } - // A function reaching this point should always have its bytecode - // serialized. - BytecodeArrayRef bytecode = candidate.bytecode[i].value(); if (candidate.can_inline_function[i]) { can_inline_candidate = true; + BytecodeArrayRef bytecode = candidate.bytecode[i].value(); candidate.total_size += bytecode.length(); + candidate_is_small = candidate_is_small && IsSmall(bytecode); } - candidate_is_small = candidate_is_small && IsSmall(bytecode); } if (!can_inline_candidate) return NoChange(); diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc index 51179f1956f..6c071438cc5 100644 --- a/chromium/v8/src/compiler/js-inlining.cc +++ b/chromium/v8/src/compiler/js-inlining.cc @@ -321,7 +321,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget( // TODO(turbofan): We might consider to eagerly create the feedback vector // in such a case (in {DetermineCallContext} below) eventually. - FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell())); + FeedbackCellRef cell(broker(), p.feedback_cell()); if (!cell.value().IsFeedbackVector()) return base::nullopt; return SharedFunctionInfoRef(broker(), p.shared_info()); @@ -413,11 +413,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) { Node* exception_target = nullptr; NodeProperties::IsExceptionalCall(node, &exception_target); - // JSInliningHeuristic has already filtered candidates without a - // BytecodeArray by calling SharedFunctionInfoRef::IsInlineable. For the ones - // passing the IsInlineable check, The broker holds a reference to the - // bytecode array, which prevents it from getting flushed. - // Therefore, the following check should always hold true. + // JSInliningHeuristic has already filtered candidates without a BytecodeArray + // by calling SharedFunctionInfoRef::IsInlineable. For the ones passing the + // IsInlineable check, the broker holds a reference to the bytecode array, + // which prevents it from getting flushed. Therefore, the following check + // should always hold true. CHECK(shared_info->is_compiled()); if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) { @@ -428,17 +428,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) { TRACE("Inlining " << *shared_info << " into " << outer_shared_info << ((exception_target != nullptr) ? " (inside try-block)" : "")); - // Determine the targets feedback vector and its context. + // Determine the target's feedback vector and its context. Node* context; FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context); - - if (FLAG_concurrent_inlining && - !shared_info->IsSerializedForCompilation(feedback_vector)) { - // TODO(neis): Should this be a broker message? - TRACE("Missed opportunity to inline a function (" - << *shared_info << " with " << feedback_vector << ")"); - return NoChange(); - } + CHECK(shared_info->IsSerializedForCompilation(feedback_vector)); // ---------------------------------------------------------------- // After this point, we've made a decision to inline this function. diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc index 9f950c808c2..80c620034b2 100644 --- a/chromium/v8/src/compiler/js-native-context-specialization.cc +++ b/chromium/v8/src/compiler/js-native-context-specialization.cc @@ -784,12 +784,15 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( Node* node, Node* receiver, Node* value, NameRef const& name, AccessMode access_mode, Node* key) { base::Optional<PropertyCellRef> cell = - native_context().global_proxy_object().GetPropertyCell(name); + native_context().global_object().GetPropertyCell(name); return cell.has_value() ? ReduceGlobalAccess(node, receiver, value, name, access_mode, key, *cell) : NoChange(); } +// TODO(neis): Try to merge this with ReduceNamedAccess by introducing a new +// PropertyAccessInfo kind for global accesses and using the existing mechanism +// for building loads/stores. Reduction JSNativeContextSpecialization::ReduceGlobalAccess( Node* node, Node* receiver, Node* value, NameRef const& name, AccessMode access_mode, Node* key, PropertyCellRef const& property_cell) { @@ -838,15 +841,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( effect = BuildCheckEqualsName(name, key, effect, control); } - // Check if we have a {receiver} to validate. If so, we need to check that - // the {receiver} is actually the JSGlobalProxy for the native context that - // we are specializing to. + // If we have a {receiver} to validate, we do so by checking that its map is + // the (target) global proxy's map. This guarantees that in fact the receiver + // is the global proxy. if (receiver != nullptr) { - Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver, - jsgraph()->HeapConstant(global_proxy())); effect = graph()->NewNode( - simplified()->CheckIf(DeoptimizeReason::kReceiverNotAGlobalProxy), - check, effect, control); + simplified()->CheckMaps( + CheckMapsFlag::kNone, + ZoneHandleSet<Map>( + HeapObjectRef(broker(), global_proxy()).map().object())), + receiver, effect, control); } if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) { @@ -1050,28 +1054,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { } } -void JSNativeContextSpecialization::FilterMapsAndGetPropertyAccessInfos( - NamedAccessFeedback const& feedback, AccessMode access_mode, Node* receiver, - Node* effect, ZoneVector<PropertyAccessInfo>* access_infos) { - ZoneVector<Handle<Map>> receiver_maps(zone()); - - // Either infer maps from the graph or use the feedback. - if (!InferReceiverMaps(receiver, effect, &receiver_maps)) { - receiver_maps = feedback.maps(); - } - RemoveImpossibleReceiverMaps(receiver, &receiver_maps); - - for (Handle<Map> map_handle : receiver_maps) { - MapRef map(broker(), map_handle); - if (map.is_deprecated()) continue; - PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( - map, feedback.name(), access_mode, dependencies(), - FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized - : SerializationPolicy::kSerializeIfNeeded); - access_infos->push_back(access_info); - } -} - Reduction JSNativeContextSpecialization::ReduceNamedAccess( Node* node, Node* value, NamedAccessFeedback const& feedback, AccessMode access_mode, Node* key) { @@ -1081,36 +1063,54 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreNamedOwn || node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral || - node->opcode() == IrOpcode::kJSHasProperty || - node->opcode() == IrOpcode::kJSGetIterator); + node->opcode() == IrOpcode::kJSHasProperty); Node* receiver = NodeProperties::GetValueInput(node, 0); Node* context = NodeProperties::GetContextInput(node); Node* frame_state = NodeProperties::GetFrameStateInput(node); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); - ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone()); - ZoneVector<PropertyAccessInfo> access_infos(zone()); - FilterMapsAndGetPropertyAccessInfos(feedback, access_mode, receiver, effect, - &access_infos_for_feedback); - AccessInfoFactory access_info_factory(broker(), dependencies(), - graph()->zone()); - if (!access_info_factory.FinalizePropertyAccessInfos( - access_infos_for_feedback, access_mode, &access_infos)) { - return NoChange(); + // Either infer maps from the graph or use the feedback. + ZoneVector<Handle<Map>> receiver_maps(zone()); + if (!InferReceiverMaps(receiver, effect, &receiver_maps)) { + receiver_maps = feedback.maps(); } + RemoveImpossibleReceiverMaps(receiver, &receiver_maps); - // Check if we have an access o.x or o.x=v where o is the current - // native contexts' global proxy, and turn that into a direct access - // to the current native context's global object instead. - if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) { - MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]); - if (receiver_map.IsMapOfTargetGlobalProxy()) { + // Check if we have an access o.x or o.x=v where o is the target native + // contexts' global proxy, and turn that into a direct access to the + // corresponding global object instead. + if (receiver_maps.size() == 1) { + MapRef receiver_map(broker(), receiver_maps[0]); + if (receiver_map.equals( + broker()->target_native_context().global_proxy_object().map()) && + !broker()->target_native_context().global_object().IsDetached()) { return ReduceGlobalAccess(node, receiver, value, feedback.name(), access_mode, key); } } + ZoneVector<PropertyAccessInfo> access_infos(zone()); + { + ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone()); + for (Handle<Map> map_handle : receiver_maps) { + MapRef map(broker(), map_handle); + if (map.is_deprecated()) continue; + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + map, feedback.name(), access_mode, dependencies(), + FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized + : SerializationPolicy::kSerializeIfNeeded); + access_infos_for_feedback.push_back(access_info); + } + + AccessInfoFactory access_info_factory(broker(), dependencies(), + graph()->zone()); + if (!access_info_factory.FinalizePropertyAccessInfos( + access_infos_for_feedback, access_mode, &access_infos)) { + return NoChange(); + } + } + // Ensure that {key} matches the specified name (if {key} is given). if (key != nullptr) { effect = BuildCheckEqualsName(feedback.name(), key, effect, control); @@ -1332,24 +1332,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( return Replace(value); } -Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( - Node* node, Node* value, FeedbackSource const& source, NameRef const& name, - AccessMode access_mode) { - DCHECK(node->opcode() == IrOpcode::kJSLoadNamed || - node->opcode() == IrOpcode::kJSStoreNamed || - node->opcode() == IrOpcode::kJSStoreNamedOwn); - Node* const receiver = NodeProperties::GetValueInput(node, 0); - - // Optimize accesses to the current native context's global proxy. - HeapObjectMatcher m(receiver); - if (m.HasValue() && - m.Ref(broker()).equals(native_context().global_proxy_object())) { - return ReduceGlobalAccess(node, nullptr, value, name, access_mode); - } - - return ReducePropertyAccess(node, nullptr, name, value, source, access_mode); -} - Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); @@ -1388,18 +1370,134 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { } if (!p.feedback().IsValid()) return NoChange(); - return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), - FeedbackSource(p.feedback()), name, - AccessMode::kLoad); + return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(), + FeedbackSource(p.feedback()), AccessMode::kLoad); } Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode()); - PropertyAccess const& p = PropertyAccessOf(node->op()); - NameRef name(broker(), factory()->iterator_symbol()); + GetIteratorParameters const& p = GetIteratorParametersOf(node->op()); - return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(), - FeedbackSource(p.feedback()), AccessMode::kLoad); + Node* receiver = NodeProperties::GetValueInput(node, 0); + Node* context = NodeProperties::GetContextInput(node); + Node* frame_state = NodeProperties::GetFrameStateInput(node); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + + Node* iterator_exception_node = nullptr; + Node* if_exception_merge = nullptr; + Node* if_exception_effect_phi = nullptr; + Node* if_exception_phi = nullptr; + bool has_exception_node = + NodeProperties::IsExceptionalCall(node, &iterator_exception_node); + if (has_exception_node) { + // If there exists an IfException node for the current {node}, we need + // exception handling for all the desugared nodes. Create a combination + // of Merge+Phi+EffectPhi nodes that consumes the exception paths from + // from all the desugared nodes including the original exception node. + // Usages of the original exception node are then rewired to the newly + // created combination of Merge+Phi+EffectPhi. Here, use dead_node as a + // placeholder for the original exception node until its uses are rewired. + + Node* dead_node = jsgraph()->Dead(); + if_exception_merge = graph()->NewNode(common()->Merge(1), dead_node); + if_exception_effect_phi = + graph()->NewNode(common()->EffectPhi(1), dead_node, if_exception_merge); + if_exception_phi = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 1), + dead_node, if_exception_merge); + ReplaceWithValue(iterator_exception_node, if_exception_phi, + if_exception_effect_phi, if_exception_merge); + if_exception_merge->ReplaceInput(0, iterator_exception_node); + if_exception_effect_phi->ReplaceInput(0, iterator_exception_node); + if_exception_phi->ReplaceInput(0, iterator_exception_node); + } + + // Load iterator property operator + Handle<Name> iterator_symbol = factory()->iterator_symbol(); + const Operator* load_op = + javascript()->LoadNamed(iterator_symbol, p.loadFeedback()); + + // Lazy deopt of the load iterator property + Node* call_slot = jsgraph()->SmiConstant(p.callFeedback().slot.ToInt()); + Node* call_feedback = jsgraph()->HeapConstant(p.callFeedback().vector); + Node* lazy_deopt_parameters[] = {receiver, call_slot, call_feedback}; + Node* lazy_deopt_frame_state = CreateStubBuiltinContinuationFrameState( + jsgraph(), Builtins::kGetIteratorWithFeedbackLazyDeoptContinuation, + context, lazy_deopt_parameters, arraysize(lazy_deopt_parameters), + frame_state, ContinuationFrameStateMode::LAZY); + Node* load_property = graph()->NewNode( + load_op, receiver, context, lazy_deopt_frame_state, effect, control); + effect = load_property; + control = load_property; + + // Handle exception path for the load named property + if (has_exception_node) { + control = + AppendExceptionHandling(effect, control, if_exception_merge, + if_exception_phi, if_exception_effect_phi); + } + + // Eager deopt of call iterator property + Node* parameters[] = {receiver, load_property, call_slot, call_feedback}; + Node* eager_deopt_frame_state = CreateStubBuiltinContinuationFrameState( + jsgraph(), Builtins::kCallIteratorWithFeedback, context, parameters, + arraysize(parameters), frame_state, ContinuationFrameStateMode::EAGER); + Node* deopt_checkpoint = graph()->NewNode( + common()->Checkpoint(), eager_deopt_frame_state, effect, control); + effect = deopt_checkpoint; + + // Call iterator property operator + ProcessedFeedback const& feedback = + broker()->GetFeedbackForCall(p.callFeedback()); + SpeculationMode mode = feedback.IsInsufficient() + ? SpeculationMode::kDisallowSpeculation + : feedback.AsCall().speculation_mode(); + const Operator* call_op = + javascript()->Call(2, CallFrequency(), p.callFeedback(), + ConvertReceiverMode::kNotNullOrUndefined, mode); + Node* call_property = graph()->NewNode(call_op, load_property, receiver, + context, frame_state, effect, control); + effect = call_property; + control = call_property; + if (has_exception_node) { + control = + AppendExceptionHandling(effect, control, if_exception_merge, + if_exception_phi, if_exception_effect_phi); + } + + // Check if the call property returns a valid JSReceiver else throw an invalid + // iterator runtime exception + Node* is_receiver = + graph()->NewNode(simplified()->ObjectIsReceiver(), call_property); + Node* branch_node = graph()->NewNode( + common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), + is_receiver, control); + { + // Create a version of effect and control for the false path of the branch + Node* effect = call_property; + Node* control = call_property; + Node* if_not_receiver = graph()->NewNode(common()->IfFalse(), branch_node); + control = if_not_receiver; + const Operator* call_runtime_op = + javascript()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid, 0); + Node* call_runtime = graph()->NewNode(call_runtime_op, context, frame_state, + effect, control); + control = call_runtime; + effect = call_runtime; + if (has_exception_node) { + control = + AppendExceptionHandling(effect, control, if_exception_merge, + if_exception_phi, if_exception_effect_phi); + } + Node* throw_node = + graph()->NewNode(common()->Throw(), call_runtime, control); + NodeProperties::MergeControlToEnd(graph(), common(), throw_node); + } + + Node* if_receiver = graph()->NewNode(common()->IfTrue(), branch_node); + ReplaceWithValue(node, call_property, effect, if_receiver); + return Replace(if_receiver); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { @@ -1408,9 +1506,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { Node* const value = NodeProperties::GetValueInput(node, 1); if (!p.feedback().IsValid()) return NoChange(); - return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), - NameRef(broker(), p.name()), - AccessMode::kStore); + return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value, + FeedbackSource(p.feedback()), AccessMode::kStore); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { @@ -1419,9 +1516,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { Node* const value = NodeProperties::GetValueInput(node, 1); if (!p.feedback().IsValid()) return NoChange(); - return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), - NameRef(broker(), p.name()), - AccessMode::kStoreInLiteral); + return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()), value, + FeedbackSource(p.feedback()), + AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( @@ -1578,9 +1675,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // NoElementsProtector. for (ElementAccessInfo const& access_info : access_infos) { if (IsFastElementsKind(access_info.elements_kind())) { - if (!isolate()->IsNoElementsProtectorIntact()) return NoChange(); - dependencies()->DependOnProtector( - PropertyCellRef(broker(), factory()->no_elements_protector())); + if (!dependencies()->DependOnNoElementsProtector()) return NoChange(); break; } } @@ -1819,8 +1914,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess( node->opcode() == IrOpcode::kJSHasProperty || node->opcode() == IrOpcode::kJSLoadNamed || node->opcode() == IrOpcode::kJSStoreNamed || - node->opcode() == IrOpcode::kJSStoreNamedOwn || - node->opcode() == IrOpcode::kJSGetIterator); + node->opcode() == IrOpcode::kJSStoreNamedOwn); DCHECK_GE(node->op()->ControlOutputCount(), 1); ProcessedFeedback const& feedback = @@ -2499,12 +2593,14 @@ JSNativeContextSpecialization::BuildElementAccess( if (typed_array.has_value()) { length = jsgraph()->Constant(static_cast<double>(typed_array->length())); - // Load the (known) base and external pointer for the {receiver}. The - // {external_pointer} might be invalid if the {buffer} was detached, so - // we need to make sure that any access is properly guarded. + DCHECK(!typed_array->is_on_heap()); + // Load the (known) data pointer for the {receiver} and set {base_pointer} + // and {external_pointer} to the values that will allow to generate typed + // element accesses using the known data pointer. + // The data pointer might be invalid if the {buffer} was detached, + // so we need to make sure that any access is properly guarded. base_pointer = jsgraph()->ZeroConstant(); - external_pointer = - jsgraph()->PointerConstant(typed_array->external_pointer()); + external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr()); } else { // Load the {receiver}s length. length = effect = graph()->NewNode( @@ -3168,6 +3264,22 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name, control); } +Node* JSNativeContextSpecialization::AppendExceptionHandling( + Node* effect, Node* control, Node* merge, Node* phi, Node* effect_phi) { + DCHECK_EQ(effect, control); + int input_count = merge->InputCount() + 1; + Node* if_exception = + graph()->NewNode(common()->IfException(), effect, control); + merge->InsertInput(graph()->zone(), 0, if_exception); + NodeProperties::ChangeOp(merge, common()->Merge(input_count)); + phi->InsertInput(graph()->zone(), 0, if_exception); + NodeProperties::ChangeOp( + phi, common()->Phi(MachineRepresentation::kTagged, input_count)); + effect_phi->InsertInput(graph()->zone(), 0, if_exception); + NodeProperties::ChangeOp(effect_phi, common()->EffectPhi(input_count)); + return graph()->NewNode(common()->IfSuccess(), control); +} + bool JSNativeContextSpecialization::CanTreatHoleAsUndefined( ZoneVector<Handle<Map>> const& receiver_maps) { // Check if all {receiver_maps} have one of the initial Array.prototype diff --git a/chromium/v8/src/compiler/js-native-context-specialization.h b/chromium/v8/src/compiler/js-native-context-specialization.h index a0707b98303..429be0bb242 100644 --- a/chromium/v8/src/compiler/js-native-context-specialization.h +++ b/chromium/v8/src/compiler/js-native-context-specialization.h @@ -101,10 +101,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final base::Optional<NameRef> static_name, Node* value, FeedbackSource const& source, AccessMode access_mode); - Reduction ReduceNamedAccessFromNexus(Node* node, Node* value, - FeedbackSource const& source, - NameRef const& name, - AccessMode access_mode); Reduction ReduceNamedAccess(Node* node, Node* value, NamedAccessFeedback const& processed, AccessMode access_mode, Node* key = nullptr); @@ -207,6 +203,12 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Node* BuildCheckEqualsName(NameRef const& name, Node* value, Node* effect, Node* control); + // Attach a pair of success and exception paths on a given control path. + // The exception is joined to the Merge+Phi+EffectPhi nodes while the success + // path is returned. + Node* AppendExceptionHandling(Node* effect, Node* control, Node* merge, + Node* phi, Node* effect_phi); + // Checks if we can turn the hole into undefined when loading an element // from an object with one of the {receiver_maps}; sets up appropriate // code dependencies and might use the array protector cell. @@ -219,11 +221,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final ElementAccessFeedback const& feedback, Node* receiver, Node* effect) const; - void FilterMapsAndGetPropertyAccessInfos( - NamedAccessFeedback const& feedback, AccessMode access_mode, - Node* receiver, Node* effect, - ZoneVector<PropertyAccessInfo>* access_infos); - // Try to infer maps for the given {receiver} at the current {effect}. bool InferReceiverMaps(Node* receiver, Node* effect, ZoneVector<Handle<Map>>* receiver_maps) const; diff --git a/chromium/v8/src/compiler/js-operator.cc b/chromium/v8/src/compiler/js-operator.cc index d0581b59a5a..42e5f900576 100644 --- a/chromium/v8/src/compiler/js-operator.cc +++ b/chromium/v8/src/compiler/js-operator.cc @@ -11,6 +11,7 @@ #include "src/compiler/operator.h" #include "src/handles/handles-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/template-objects.h" namespace v8 { namespace internal { @@ -284,8 +285,7 @@ bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) { PropertyAccess const& PropertyAccessOf(const Operator* op) { DCHECK(op->opcode() == IrOpcode::kJSHasProperty || op->opcode() == IrOpcode::kJSLoadProperty || - op->opcode() == IrOpcode::kJSStoreProperty || - op->opcode() == IrOpcode::kJSGetIterator); + op->opcode() == IrOpcode::kJSStoreProperty); return OpParameter<PropertyAccess>(op); } @@ -473,6 +473,34 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf( return OpParameter<CreateBoundFunctionParameters>(op); } +bool operator==(GetTemplateObjectParameters const& lhs, + GetTemplateObjectParameters const& rhs) { + return lhs.description().location() == rhs.description().location() && + lhs.shared().location() == rhs.shared().location() && + lhs.feedback() == rhs.feedback(); +} + +bool operator!=(GetTemplateObjectParameters const& lhs, + GetTemplateObjectParameters const& rhs) { + return !(lhs == rhs); +} + +size_t hash_value(GetTemplateObjectParameters const& p) { + return base::hash_combine(p.description().location(), p.shared().location(), + FeedbackSource::Hash()(p.feedback())); +} + +std::ostream& operator<<(std::ostream& os, + GetTemplateObjectParameters const& p) { + return os << Brief(*p.description()) << ", " << Brief(*p.shared()); +} + +const GetTemplateObjectParameters& GetTemplateObjectParametersOf( + const Operator* op) { + DCHECK(op->opcode() == IrOpcode::kJSGetTemplateObject); + return OpParameter<GetTemplateObjectParameters>(op); +} + bool operator==(CreateClosureParameters const& lhs, CreateClosureParameters const& rhs) { return lhs.allocation() == rhs.allocation() && @@ -562,6 +590,31 @@ const CloneObjectParameters& CloneObjectParametersOf(const Operator* op) { return OpParameter<CloneObjectParameters>(op); } +std::ostream& operator<<(std::ostream& os, GetIteratorParameters const& p) { + return os << p.loadFeedback() << ", " << p.callFeedback(); +} + +bool operator==(GetIteratorParameters const& lhs, + GetIteratorParameters const& rhs) { + return lhs.loadFeedback() == rhs.loadFeedback() && + lhs.callFeedback() == rhs.callFeedback(); +} + +bool operator!=(GetIteratorParameters const& lhs, + GetIteratorParameters const& rhs) { + return !(lhs == rhs); +} + +GetIteratorParameters const& GetIteratorParametersOf(const Operator* op) { + DCHECK(op->opcode() == IrOpcode::kJSGetIterator); + return OpParameter<GetIteratorParameters>(op); +} + +size_t hash_value(GetIteratorParameters const& p) { + return base::hash_combine(FeedbackSource::Hash()(p.loadFeedback()), + FeedbackSource::Hash()(p.callFeedback())); +} + size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); } std::ostream& operator<<(std::ostream& os, ForInMode mode) { @@ -957,9 +1010,10 @@ const Operator* JSOperatorBuilder::LoadProperty( access); // parameter } -const Operator* JSOperatorBuilder::GetIterator(FeedbackSource const& feedback) { - PropertyAccess access(LanguageMode::kSloppy, feedback); - return new (zone()) Operator1<PropertyAccess>( // -- +const Operator* JSOperatorBuilder::GetIterator( + FeedbackSource const& load_feedback, FeedbackSource const& call_feedback) { + GetIteratorParameters access(load_feedback, call_feedback); + return new (zone()) Operator1<GetIteratorParameters>( // -- IrOpcode::kJSGetIterator, Operator::kNoProperties, // opcode "JSGetIterator", // name 1, 1, 1, 1, 1, 2, // counts @@ -1257,6 +1311,18 @@ const Operator* JSOperatorBuilder::CreateLiteralObject( parameters); // parameter } +const Operator* JSOperatorBuilder::GetTemplateObject( + Handle<TemplateObjectDescription> description, + Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback) { + GetTemplateObjectParameters parameters(description, shared, feedback); + return new (zone()) Operator1<GetTemplateObjectParameters>( // -- + IrOpcode::kJSGetTemplateObject, // opcode + Operator::kEliminatable, // properties + "JSGetTemplateObject", // name + 0, 1, 1, 1, 1, 0, // counts + parameters); // parameter +} + const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback, int literal_flags) { CloneObjectParameters parameters(feedback, literal_flags); diff --git a/chromium/v8/src/compiler/js-operator.h b/chromium/v8/src/compiler/js-operator.h index f795a2f4029..47b0fff05a6 100644 --- a/chromium/v8/src/compiler/js-operator.h +++ b/chromium/v8/src/compiler/js-operator.h @@ -409,13 +409,13 @@ class StoreGlobalParameters final { : language_mode_(language_mode), name_(name), feedback_(feedback) {} LanguageMode language_mode() const { return language_mode_; } - const FeedbackSource& feedback() const { return feedback_; } - const Handle<Name>& name() const { return name_; } + FeedbackSource const& feedback() const { return feedback_; } + Handle<Name> const& name() const { return name_; } private: - const LanguageMode language_mode_; - const Handle<Name> name_; - const FeedbackSource feedback_; + LanguageMode const language_mode_; + Handle<Name> const name_; + FeedbackSource const feedback_; }; bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&); @@ -598,6 +598,35 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&); const CreateClosureParameters& CreateClosureParametersOf(const Operator* op); +class GetTemplateObjectParameters final { + public: + GetTemplateObjectParameters(Handle<TemplateObjectDescription> description, + Handle<SharedFunctionInfo> shared, + FeedbackSource const& feedback) + : description_(description), shared_(shared), feedback_(feedback) {} + + Handle<TemplateObjectDescription> description() const { return description_; } + Handle<SharedFunctionInfo> shared() const { return shared_; } + FeedbackSource const& feedback() const { return feedback_; } + + private: + Handle<TemplateObjectDescription> const description_; + Handle<SharedFunctionInfo> const shared_; + FeedbackSource const feedback_; +}; + +bool operator==(GetTemplateObjectParameters const&, + GetTemplateObjectParameters const&); +bool operator!=(GetTemplateObjectParameters const&, + GetTemplateObjectParameters const&); + +size_t hash_value(GetTemplateObjectParameters const&); + +std::ostream& operator<<(std::ostream&, GetTemplateObjectParameters const&); + +const GetTemplateObjectParameters& GetTemplateObjectParametersOf( + const Operator* op); + // Defines shared information for the literal that should be created. This is // used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and // JSCreateLiteralRegExp operators. @@ -653,6 +682,31 @@ std::ostream& operator<<(std::ostream&, CloneObjectParameters const&); const CloneObjectParameters& CloneObjectParametersOf(const Operator* op); +// Defines the shared information for the iterator symbol thats loaded and +// called. This is used as a parameter by JSGetIterator operator. +class GetIteratorParameters final { + public: + GetIteratorParameters(const FeedbackSource& load_feedback, + const FeedbackSource& call_feedback) + : load_feedback_(load_feedback), call_feedback_(call_feedback) {} + + FeedbackSource const& loadFeedback() const { return load_feedback_; } + FeedbackSource const& callFeedback() const { return call_feedback_; } + + private: + FeedbackSource const load_feedback_; + FeedbackSource const call_feedback_; +}; + +bool operator==(GetIteratorParameters const&, GetIteratorParameters const&); +bool operator!=(GetIteratorParameters const&, GetIteratorParameters const&); + +size_t hash_value(GetIteratorParameters const&); + +std::ostream& operator<<(std::ostream&, GetIteratorParameters const&); + +const GetIteratorParameters& GetIteratorParametersOf(const Operator* op); + // Descriptor used by the JSForInPrepare and JSForInNext opcodes. enum class ForInMode : uint8_t { kUseEnumCacheKeysAndIndices, @@ -742,7 +796,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback); const Operator* CreateArrayFromIterable(); const Operator* CreateEmptyLiteralObject(); - const Operator* CreateLiteralObject( Handle<ObjectBoilerplateDescription> constant, FeedbackSource const& feedback, int literal_flags, @@ -753,6 +806,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final FeedbackSource const& feedback, int literal_flags); + const Operator* GetTemplateObject( + Handle<TemplateObjectDescription> description, + Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback); + const Operator* CallForwardVarargs(size_t arity, uint32_t start_index); const Operator* Call( size_t arity, CallFrequency const& frequency = CallFrequency(), @@ -856,7 +913,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* ParseInt(); const Operator* RegExpTest(); - const Operator* GetIterator(FeedbackSource const& feedback); + const Operator* GetIterator(FeedbackSource const& load_feedback, + FeedbackSource const& call_feedback); private: Zone* zone() const { return zone_; } diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.cc b/chromium/v8/src/compiler/js-type-hint-lowering.cc index e1ff928cec6..9a6b367ddf1 100644 --- a/chromium/v8/src/compiler/js-type-hint-lowering.cc +++ b/chromium/v8/src/compiler/js-type-hint-lowering.cc @@ -482,12 +482,32 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation( return LoweringResult::NoChange(); } +JSTypeHintLowering::LoweringResult +JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op, + Node* receiver, Node* effect, + Node* control, + FeedbackSlot load_slot, + FeedbackSlot call_slot) const { + DCHECK_EQ(IrOpcode::kJSGetIterator, op->opcode()); + // Insert soft deopt if the load feedback is invalid. + if (Node* node = TryBuildSoftDeopt( + load_slot, effect, control, + DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) { + return LoweringResult::Exit(node); + } + // Insert soft deopt if the call feedback is invalid. + if (Node* node = TryBuildSoftDeopt( + call_slot, effect, control, + DeoptimizeReason::kInsufficientTypeFeedbackForCall)) { + return LoweringResult::Exit(node); + } + return LoweringResult::NoChange(); +} + JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation( const Operator* op, Node* receiver, Node* effect, Node* control, FeedbackSlot slot) const { - // JSGetIterator involves a named load of the Symbol.iterator property. - DCHECK(op->opcode() == IrOpcode::kJSLoadNamed || - op->opcode() == IrOpcode::kJSGetIterator); + DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode()); if (Node* node = TryBuildSoftDeopt( slot, effect, control, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) { diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.h b/chromium/v8/src/compiler/js-type-hint-lowering.h index 3e46fb2ec2a..303e2f8dcfa 100644 --- a/chromium/v8/src/compiler/js-type-hint-lowering.h +++ b/chromium/v8/src/compiler/js-type-hint-lowering.h @@ -134,6 +134,13 @@ class JSTypeHintLowering { int arg_count, Node* effect, Node* control, FeedbackSlot slot) const; + + // Potential reduction of property access and call operations. + LoweringResult ReduceGetIteratorOperation(const Operator* op, Node* obj, + Node* effect, Node* control, + FeedbackSlot load_slot, + FeedbackSlot call_slot) const; + // Potential reduction of property access operations. LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj, Node* effect, Node* control, diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc index 8caafe6aadf..035457c62be 100644 --- a/chromium/v8/src/compiler/js-typed-lowering.cc +++ b/chromium/v8/src/compiler/js-typed-lowering.cc @@ -17,6 +17,7 @@ #include "src/compiler/operator-properties.h" #include "src/compiler/type-cache.h" #include "src/compiler/types.h" +#include "src/execution/protectors.h" #include "src/objects/js-generator.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" @@ -567,9 +568,10 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { Node* length = graph()->NewNode(simplified()->NumberAdd(), left_length, right_length); - CellRef string_length_protector(broker(), - factory()->string_length_protector()); - if (string_length_protector.value().AsSmi() == Isolate::kProtectorValid) { + PropertyCellRef string_length_protector( + broker(), factory()->string_length_protector()); + if (string_length_protector.value().AsSmi() == + Protectors::kProtectorValid) { // We can just deoptimize if the {length} is out-of-bounds. Besides // generating a shorter code sequence than the version below, this // has the additional benefit of not holding on to the lazy {frame_state} @@ -2025,8 +2027,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) { ExternalReference const ref = ExternalReference::address_of_pending_message_obj(isolate()); node->ReplaceInput(0, jsgraph()->ExternalConstant(ref)); - NodeProperties::ChangeOp( - node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue())); + NodeProperties::ChangeOp(node, simplified()->LoadMessage()); return Changed(node); } @@ -2037,8 +2038,7 @@ Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) { Node* value = NodeProperties::GetValueInput(node, 0); node->ReplaceInput(0, jsgraph()->ExternalConstant(ref)); node->ReplaceInput(1, value); - NodeProperties::ChangeOp( - node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue())); + NodeProperties::ChangeOp(node, simplified()->StoreMessage()); return Changed(node); } diff --git a/chromium/v8/src/compiler/machine-graph-verifier.cc b/chromium/v8/src/compiler/machine-graph-verifier.cc index 4c7ee1d1410..f6b747c04da 100644 --- a/chromium/v8/src/compiler/machine-graph-verifier.cc +++ b/chromium/v8/src/compiler/machine-graph-verifier.cc @@ -241,7 +241,7 @@ class MachineRepresentationInferrer { MachineType::PointerRepresentation(); break; case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: representation_vector_[node->id()] = MachineType::PointerRepresentation(); break; @@ -437,7 +437,7 @@ class MachineRepresentationChecker { MachineRepresentation::kWord64); break; case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: case IrOpcode::kTaggedPoisonOnSpeculation: CheckValueInputIsTagged(node, 0); break; @@ -461,7 +461,7 @@ class MachineRepresentationChecker { CheckValueInputForFloat64Op(node, 0); break; case IrOpcode::kWord64Equal: - if (Is64()) { + if (Is64() && !COMPRESS_POINTERS_BOOL) { CheckValueInputIsTaggedOrPointer(node, 0); CheckValueInputIsTaggedOrPointer(node, 1); if (!is_stub_) { @@ -1007,6 +1007,13 @@ class MachineRepresentationChecker { return IsAnyCompressed(actual); case MachineRepresentation::kTaggedSigned: case MachineRepresentation::kTaggedPointer: + // TODO(tebbi): At the moment, the machine graph doesn't contain + // reliable information if a node is kTaggedSigned, kTaggedPointer or + // kTagged, and often this is context-dependent. We should at least + // check for obvious violations: kTaggedSigned where we expect + // kTaggedPointer and the other way around, but at the moment, this + // happens in dead code. + return IsAnyTagged(actual); case MachineRepresentation::kCompressedSigned: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kFloat32: diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc index 11124579f61..38013d228c5 100644 --- a/chromium/v8/src/compiler/machine-operator-reducer.cc +++ b/chromium/v8/src/compiler/machine-operator-reducer.cc @@ -681,7 +681,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { Int64Matcher m(node->InputAt(0)); if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value())); if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0)); - if (m.IsBitcastTaggedSignedToWord()) { + if (m.IsBitcastTaggedToWordForTagAndSmiBits()) { Int64Matcher n(m.node()->InputAt(0)); if (n.IsChangeCompressedToTagged()) { DCHECK(machine()->Is64() && SmiValuesAre31Bits()); @@ -725,7 +725,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { case IrOpcode::kFloat64RoundDown: return ReduceFloat64RoundDown(node); case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: { + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: { NodeMatcher m(node->InputAt(0)); if (m.IsBitcastWordToTaggedSigned()) { RelaxEffectsAndControls(node); diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc index 0355534408d..b450fb60da8 100644 --- a/chromium/v8/src/compiler/machine-operator.cc +++ b/chromium/v8/src/compiler/machine-operator.cc @@ -146,7 +146,8 @@ MachineType AtomicOpType(Operator const* op) { V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ - V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \ + V(Simd128ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ + V(BitcastTaggedToWordForTagAndSmiBits, Operator::kNoProperties, 1, 0, 1) \ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \ V(BitcastWord32ToCompressedSigned, Operator::kNoProperties, 1, 0, 1) \ V(BitcastCompressedSignedToWord32, Operator::kNoProperties, 1, 0, 1) \ @@ -255,6 +256,7 @@ MachineType AtomicOpType(Operator const* op) { V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Sqrt, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Add, Operator::kCommutative, 2, 0, 1) \ V(F64x2Sub, Operator::kNoProperties, 2, 0, 1) \ V(F64x2Mul, Operator::kCommutative, 2, 0, 1) \ @@ -265,11 +267,14 @@ MachineType AtomicOpType(Operator const* op) { V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \ V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \ V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \ + V(F64x2Qfma, Operator::kNoProperties, 3, 0, 1) \ + V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \ V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \ V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \ V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \ + V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \ V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \ V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \ V(F32x4Add, Operator::kCommutative, 2, 0, 1) \ @@ -283,6 +288,8 @@ MachineType AtomicOpType(Operator const* op) { V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \ V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \ + V(F32x4Qfma, Operator::kNoProperties, 3, 0, 1) \ + V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \ V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \ V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \ V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \ @@ -395,6 +402,7 @@ MachineType AtomicOpType(Operator const* op) { V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \ + V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1) \ V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1) // The format is: diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h index 17db145f58e..1bd806eefbe 100644 --- a/chromium/v8/src/compiler/machine-operator.h +++ b/chromium/v8/src/compiler/machine-operator.h @@ -239,6 +239,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const OptionalOperator Word64ReverseBits(); const Operator* Word32ReverseBytes(); const Operator* Word64ReverseBytes(); + const Operator* Simd128ReverseBytes(); const OptionalOperator Int32AbsWithOverflow(); const OptionalOperator Int64AbsWithOverflow(); @@ -301,8 +302,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final // This operator reinterprets the bits of a tagged pointer as a word. const Operator* BitcastTaggedToWord(); - // This operator reinterprets the bits of a Smi as a word. - const Operator* BitcastTaggedSignedToWord(); + // This operator reinterprets the bits of a tagged value as a word preserving + // non-pointer bits (all the bits that are not modified by GC): + // 1) smi tag + // 2) weak tag + // 3) smi payload if the tagged value is a smi. + // Note, that it's illegal to "look" at the pointer bits of non-smi values. + const Operator* BitcastTaggedToWordForTagAndSmiBits(); // This operator reinterprets the bits of a tagged MaybeObject pointer as // word. @@ -477,6 +483,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F64x2Splat(); const Operator* F64x2Abs(); const Operator* F64x2Neg(); + const Operator* F64x2Sqrt(); const Operator* F64x2Add(); const Operator* F64x2Sub(); const Operator* F64x2Mul(); @@ -489,6 +496,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F64x2Ne(); const Operator* F64x2Lt(); const Operator* F64x2Le(); + const Operator* F64x2Qfma(); + const Operator* F64x2Qfms(); const Operator* F32x4Splat(); const Operator* F32x4ExtractLane(int32_t); @@ -497,6 +506,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F32x4UConvertI32x4(); const Operator* F32x4Abs(); const Operator* F32x4Neg(); + const Operator* F32x4Sqrt(); const Operator* F32x4RecipApprox(); const Operator* F32x4RecipSqrtApprox(); const Operator* F32x4Add(); @@ -510,6 +520,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F32x4Ne(); const Operator* F32x4Lt(); const Operator* F32x4Le(); + const Operator* F32x4Qfma(); + const Operator* F32x4Qfms(); const Operator* I64x2Splat(); const Operator* I64x2ExtractLane(int32_t); @@ -632,6 +644,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* S128Not(); const Operator* S128Select(); + const Operator* S8x16Swizzle(); const Operator* S8x16Shuffle(const uint8_t shuffle[16]); const Operator* S1x2AnyTrue(); diff --git a/chromium/v8/src/compiler/map-inference.cc b/chromium/v8/src/compiler/map-inference.cc index 1e2434f4ae6..6ce036aa0bd 100644 --- a/chromium/v8/src/compiler/map-inference.cc +++ b/chromium/v8/src/compiler/map-inference.cc @@ -91,6 +91,13 @@ MapHandles const& MapInference::GetMaps() { return maps_; } +bool MapInference::Is(Handle<Map> expected_map) { + if (!HaveMaps()) return false; + const MapHandles& maps = GetMaps(); + if (maps.size() != 1) return false; + return maps[0].equals(expected_map); +} + void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control, const FeedbackSource& feedback) { diff --git a/chromium/v8/src/compiler/map-inference.h b/chromium/v8/src/compiler/map-inference.h index acba2eb0f2f..498b6bc15e7 100644 --- a/chromium/v8/src/compiler/map-inference.h +++ b/chromium/v8/src/compiler/map-inference.h @@ -55,6 +55,7 @@ class MapInference { V8_WARN_UNUSED_RESULT MapHandles const& GetMaps(); V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes( std::function<bool(InstanceType)> f); + V8_WARN_UNUSED_RESULT bool Is(Handle<Map> expected_map); // These methods provide a guard. // diff --git a/chromium/v8/src/compiler/memory-lowering.cc b/chromium/v8/src/compiler/memory-lowering.cc new file mode 100644 index 00000000000..1e112e8e824 --- /dev/null +++ b/chromium/v8/src/compiler/memory-lowering.cc @@ -0,0 +1,551 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/memory-lowering.h" + +#include "src/codegen/interface-descriptors.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/linkage.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" +#include "src/roots/roots-inl.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// An allocation group represents a set of allocations that have been folded +// together. +class MemoryLowering::AllocationGroup final : public ZoneObject { + public: + AllocationGroup(Node* node, AllocationType allocation, Zone* zone); + AllocationGroup(Node* node, AllocationType allocation, Node* size, + Zone* zone); + ~AllocationGroup() = default; + + void Add(Node* object); + bool Contains(Node* object) const; + bool IsYoungGenerationAllocation() const { + return allocation() == AllocationType::kYoung; + } + + AllocationType allocation() const { return allocation_; } + Node* size() const { return size_; } + + private: + ZoneSet<NodeId> node_ids_; + AllocationType const allocation_; + Node* const size_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup); +}; + +MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone, + PoisoningMitigationLevel poisoning_level, + AllocationFolding allocation_folding, + WriteBarrierAssertFailedCallback callback, + const char* function_debug_name) + : jsgraph_(jsgraph), + zone_(zone), + graph_assembler_(jsgraph, nullptr, nullptr, zone), + allocation_folding_(allocation_folding), + poisoning_level_(poisoning_level), + write_barrier_assert_failed_(callback), + function_debug_name_(function_debug_name) {} + +Reduction MemoryLowering::Reduce(Node* node) { + switch (node->opcode()) { + case IrOpcode::kAllocate: + // Allocate nodes were purged from the graph in effect-control + // linearization. + UNREACHABLE(); + case IrOpcode::kAllocateRaw: + return ReduceAllocateRaw(node); + case IrOpcode::kLoadFromObject: + return ReduceLoadFromObject(node); + case IrOpcode::kLoadElement: + return ReduceLoadElement(node); + case IrOpcode::kLoadField: + return ReduceLoadField(node); + case IrOpcode::kStoreToObject: + return ReduceStoreToObject(node); + case IrOpcode::kStoreElement: + return ReduceStoreElement(node); + case IrOpcode::kStoreField: + return ReduceStoreField(node); + case IrOpcode::kStore: + return ReduceStore(node); + default: + return NoChange(); + } +} + +#define __ gasm()-> + +Reduction MemoryLowering::ReduceAllocateRaw( + Node* node, AllocationType allocation_type, + AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) { + DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); + DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding, + state_ptr != nullptr); + Node* value; + Node* size = node->InputAt(0); + Node* effect = node->InputAt(1); + Node* control = node->InputAt(2); + + gasm()->Reset(effect, control); + + Node* allocate_builtin; + if (allocation_type == AllocationType::kYoung) { + if (allow_large_objects == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInYoungGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); + } + } else { + if (allow_large_objects == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInOldGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); + } + } + + // Determine the top/limit addresses. + Node* top_address = __ ExternalConstant( + allocation_type == AllocationType::kYoung + ? ExternalReference::new_space_allocation_top_address(isolate()) + : ExternalReference::old_space_allocation_top_address(isolate())); + Node* limit_address = __ ExternalConstant( + allocation_type == AllocationType::kYoung + ? ExternalReference::new_space_allocation_limit_address(isolate()) + : ExternalReference::old_space_allocation_limit_address(isolate())); + + // Check if we can fold this allocation into a previous allocation represented + // by the incoming {state}. + IntPtrMatcher m(size); + if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new && + allocation_folding_ == AllocationFolding::kDoAllocationFolding) { + intptr_t const object_size = m.Value(); + AllocationState const* state = *state_ptr; + if (state->size() <= kMaxRegularHeapObjectSize - object_size && + state->group()->allocation() == allocation_type) { + // We can fold this Allocate {node} into the allocation {group} + // represented by the given {state}. Compute the upper bound for + // the new {state}. + intptr_t const state_size = state->size() + object_size; + + // Update the reservation check to the actual maximum upper bound. + AllocationGroup* const group = state->group(); + if (machine()->Is64()) { + if (OpParameter<int64_t>(group->size()->op()) < state_size) { + NodeProperties::ChangeOp(group->size(), + common()->Int64Constant(state_size)); + } + } else { + if (OpParameter<int32_t>(group->size()->op()) < state_size) { + NodeProperties::ChangeOp( + group->size(), + common()->Int32Constant(static_cast<int32_t>(state_size))); + } + } + + // Update the allocation top with the new object allocation. + // TODO(bmeurer): Defer writing back top as much as possible. + Node* top = __ IntAdd(state->top(), size); + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + top_address, __ IntPtrConstant(0), top); + + // Compute the effective inner allocated address. + value = __ BitcastWordToTagged( + __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag))); + effect = __ ExtractCurrentEffect(); + control = __ ExtractCurrentControl(); + + // Extend the allocation {group}. + group->Add(value); + *state_ptr = + AllocationState::Open(group, state_size, top, effect, zone()); + } else { + auto call_runtime = __ MakeDeferredLabel(); + auto done = __ MakeLabel(MachineType::PointerRepresentation()); + + // Setup a mutable reservation size node; will be patched as we fold + // additional allocations into this new group. + Node* size = __ UniqueIntPtrConstant(object_size); + + // Load allocation top and limit. + Node* top = + __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); + Node* limit = + __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); + + // Check if we need to collect garbage before we can start bump pointer + // allocation (always done for folded allocations). + Node* check = __ UintLessThan(__ IntAdd(top, size), limit); + + __ GotoIfNot(check, &call_runtime); + __ Goto(&done, top); + + __ Bind(&call_runtime); + { + if (!allocate_operator_.is_set()) { + auto descriptor = AllocateDescriptor{}; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), descriptor, descriptor.GetStackParameterCount(), + CallDescriptor::kCanUseRoots, Operator::kNoThrow); + allocate_operator_.set(common()->Call(call_descriptor)); + } + Node* vfalse = __ BitcastTaggedToWord( + __ Call(allocate_operator_.get(), allocate_builtin, size)); + vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); + __ Goto(&done, vfalse); + } + + __ Bind(&done); + + // Compute the new top and write it back. + top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size)); + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + top_address, __ IntPtrConstant(0), top); + + // Compute the initial object address. + value = __ BitcastWordToTagged( + __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag))); + effect = __ ExtractCurrentEffect(); + control = __ ExtractCurrentControl(); + + // Start a new allocation group. + AllocationGroup* group = + new (zone()) AllocationGroup(value, allocation_type, size, zone()); + *state_ptr = + AllocationState::Open(group, object_size, top, effect, zone()); + } + } else { + auto call_runtime = __ MakeDeferredLabel(); + auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); + + // Load allocation top and limit. + Node* top = + __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); + Node* limit = + __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); + + // Compute the new top. + Node* new_top = __ IntAdd(top, size); + + // Check if we can do bump pointer allocation here. + Node* check = __ UintLessThan(new_top, limit); + __ GotoIfNot(check, &call_runtime); + if (allow_large_objects == AllowLargeObjects::kTrue) { + __ GotoIfNot( + __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)), + &call_runtime); + } + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + top_address, __ IntPtrConstant(0), new_top); + __ Goto(&done, __ BitcastWordToTagged( + __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); + + __ Bind(&call_runtime); + if (!allocate_operator_.is_set()) { + auto descriptor = AllocateDescriptor{}; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), descriptor, descriptor.GetStackParameterCount(), + CallDescriptor::kCanUseRoots, Operator::kNoThrow); + allocate_operator_.set(common()->Call(call_descriptor)); + } + __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); + + __ Bind(&done); + value = done.PhiAt(0); + effect = __ ExtractCurrentEffect(); + control = __ ExtractCurrentControl(); + + if (state_ptr) { + // Create an unfoldable allocation group. + AllocationGroup* group = + new (zone()) AllocationGroup(value, allocation_type, zone()); + *state_ptr = AllocationState::Closed(group, effect, zone()); + } + } + + // Replace all effect uses of {node} with the {effect} and replace + // all value uses of {node} with the {value}. + for (Edge edge : node->use_edges()) { + if (NodeProperties::IsEffectEdge(edge)) { + edge.UpdateTo(effect); + } else if (NodeProperties::IsValueEdge(edge)) { + edge.UpdateTo(value); + } else { + DCHECK(NodeProperties::IsControlEdge(edge)); + edge.UpdateTo(control); + } + } + + // Kill the {node} to make sure we don't leave dangling dead uses. + node->Kill(); + + return Replace(value); +} + +Reduction MemoryLowering::ReduceLoadFromObject(Node* node) { + DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); + ObjectAccess const& access = ObjectAccessOf(node->op()); + NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); + return Changed(node); +} + +Reduction MemoryLowering::ReduceLoadElement(Node* node) { + DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); + ElementAccess const& access = ElementAccessOf(node->op()); + Node* index = node->InputAt(1); + node->ReplaceInput(1, ComputeIndex(access, index)); + MachineType type = access.machine_type; + if (NeedsPoisoning(access.load_sensitivity)) { + NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); + } else { + NodeProperties::ChangeOp(node, machine()->Load(type)); + } + return Changed(node); +} + +Reduction MemoryLowering::ReduceLoadField(Node* node) { + DCHECK_EQ(IrOpcode::kLoadField, node->opcode()); + FieldAccess const& access = FieldAccessOf(node->op()); + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); + node->InsertInput(graph()->zone(), 1, offset); + MachineType type = access.machine_type; + if (NeedsPoisoning(access.load_sensitivity)) { + NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); + } else { + NodeProperties::ChangeOp(node, machine()->Load(type)); + } + return Changed(node); +} + +Reduction MemoryLowering::ReduceStoreToObject(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); + ObjectAccess const& access = ObjectAccessOf(node->op()); + Node* object = node->InputAt(0); + Node* value = node->InputAt(2); + WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( + node, object, value, state, access.write_barrier_kind); + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + access.machine_type.representation(), write_barrier_kind))); + return Changed(node); +} + +Reduction MemoryLowering::ReduceStoreElement(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStoreElement, node->opcode()); + ElementAccess const& access = ElementAccessOf(node->op()); + Node* object = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + node->ReplaceInput(1, ComputeIndex(access, index)); + WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( + node, object, value, state, access.write_barrier_kind); + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + access.machine_type.representation(), write_barrier_kind))); + return Changed(node); +} + +Reduction MemoryLowering::ReduceStoreField(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStoreField, node->opcode()); + FieldAccess const& access = FieldAccessOf(node->op()); + Node* object = node->InputAt(0); + Node* value = node->InputAt(1); + WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( + node, object, value, state, access.write_barrier_kind); + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); + node->InsertInput(graph()->zone(), 1, offset); + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + access.machine_type.representation(), write_barrier_kind))); + return Changed(node); +} + +Reduction MemoryLowering::ReduceStore(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStore, node->opcode()); + StoreRepresentation representation = StoreRepresentationOf(node->op()); + Node* object = node->InputAt(0); + Node* value = node->InputAt(2); + WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( + node, object, value, state, representation.write_barrier_kind()); + if (write_barrier_kind != representation.write_barrier_kind()) { + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + representation.representation(), write_barrier_kind))); + return Changed(node); + } + return NoChange(); +} + +Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) { + int const element_size_shift = + ElementSizeLog2Of(access.machine_type.representation()); + if (element_size_shift) { + index = __ WordShl(index, __ IntPtrConstant(element_size_shift)); + } + int const fixed_offset = access.header_size - access.tag(); + if (fixed_offset) { + index = __ IntAdd(index, __ IntPtrConstant(fixed_offset)); + } + return index; +} + +#undef __ + +namespace { + +bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) { + while (true) { + switch (value->opcode()) { + case IrOpcode::kBitcastWordToTaggedSigned: + case IrOpcode::kChangeTaggedSignedToCompressedSigned: + case IrOpcode::kChangeTaggedToCompressedSigned: + return false; + case IrOpcode::kChangeTaggedPointerToCompressedPointer: + case IrOpcode::kChangeTaggedToCompressed: + value = NodeProperties::GetValueInput(value, 0); + continue; + case IrOpcode::kHeapConstant: { + RootIndex root_index; + if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()), + &root_index) && + RootsTable::IsImmortalImmovable(root_index)) { + return false; + } + break; + } + default: + break; + } + return true; + } +} + +} // namespace + +Reduction MemoryLowering::ReduceAllocateRaw(Node* node) { + DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); + const AllocateParameters& allocation = AllocateParametersOf(node->op()); + return ReduceAllocateRaw(node, allocation.allocation_type(), + allocation.allow_large_objects(), nullptr); +} + +WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind( + Node* node, Node* object, Node* value, AllocationState const* state, + WriteBarrierKind write_barrier_kind) { + if (state && state->IsYoungGenerationAllocation() && + state->group()->Contains(object)) { + write_barrier_kind = kNoWriteBarrier; + } + if (!ValueNeedsWriteBarrier(value, isolate())) { + write_barrier_kind = kNoWriteBarrier; + } + if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) { + write_barrier_assert_failed_(node, object, function_debug_name_, zone()); + } + return write_barrier_kind; +} + +bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const { + // Safe loads do not need poisoning. + if (load_sensitivity == LoadSensitivity::kSafe) return false; + + switch (poisoning_level_) { + case PoisoningMitigationLevel::kDontPoison: + return false; + case PoisoningMitigationLevel::kPoisonAll: + return true; + case PoisoningMitigationLevel::kPoisonCriticalOnly: + return load_sensitivity == LoadSensitivity::kCritical; + } + UNREACHABLE(); +} + +MemoryLowering::AllocationGroup::AllocationGroup(Node* node, + AllocationType allocation, + Zone* zone) + : node_ids_(zone), allocation_(allocation), size_(nullptr) { + node_ids_.insert(node->id()); +} + +MemoryLowering::AllocationGroup::AllocationGroup(Node* node, + AllocationType allocation, + Node* size, Zone* zone) + : node_ids_(zone), allocation_(allocation), size_(size) { + node_ids_.insert(node->id()); +} + +void MemoryLowering::AllocationGroup::Add(Node* node) { + node_ids_.insert(node->id()); +} + +bool MemoryLowering::AllocationGroup::Contains(Node* node) const { + // Additions should stay within the same allocated object, so it's safe to + // ignore them. + while (node_ids_.find(node->id()) == node_ids_.end()) { + switch (node->opcode()) { + case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastWordToTagged: + case IrOpcode::kInt32Add: + case IrOpcode::kInt64Add: + node = NodeProperties::GetValueInput(node, 0); + break; + default: + return false; + } + } + return true; +} + +MemoryLowering::AllocationState::AllocationState() + : group_(nullptr), + size_(std::numeric_limits<int>::max()), + top_(nullptr), + effect_(nullptr) {} + +MemoryLowering::AllocationState::AllocationState(AllocationGroup* group, + Node* effect) + : group_(group), + size_(std::numeric_limits<int>::max()), + top_(nullptr), + effect_(effect) {} + +MemoryLowering::AllocationState::AllocationState(AllocationGroup* group, + intptr_t size, Node* top, + Node* effect) + : group_(group), size_(size), top_(top), effect_(effect) {} + +bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const { + return group() && group()->IsYoungGenerationAllocation(); +} + +Graph* MemoryLowering::graph() const { return jsgraph()->graph(); } + +Isolate* MemoryLowering::isolate() const { return jsgraph()->isolate(); } + +CommonOperatorBuilder* MemoryLowering::common() const { + return jsgraph()->common(); +} + +MachineOperatorBuilder* MemoryLowering::machine() const { + return jsgraph()->machine(); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/compiler/memory-lowering.h b/chromium/v8/src/compiler/memory-lowering.h new file mode 100644 index 00000000000..a1f1fc18618 --- /dev/null +++ b/chromium/v8/src/compiler/memory-lowering.h @@ -0,0 +1,136 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MEMORY_LOWERING_H_ +#define V8_COMPILER_MEMORY_LOWERING_H_ + +#include "src/compiler/graph-assembler.h" +#include "src/compiler/graph-reducer.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class CommonOperatorBuilder; +struct ElementAccess; +class Graph; +class JSGraph; +class MachineOperatorBuilder; +class Node; +class Operator; + +// Provides operations to lower all simplified memory access and allocation +// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine +// operators. +class MemoryLowering final : public Reducer { + public: + enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding }; + class AllocationGroup; + + // An allocation state is propagated on the effect paths through the graph. + class AllocationState final : public ZoneObject { + public: + static AllocationState const* Empty(Zone* zone) { + return new (zone) AllocationState(); + } + static AllocationState const* Closed(AllocationGroup* group, Node* effect, + Zone* zone) { + return new (zone) AllocationState(group, effect); + } + static AllocationState const* Open(AllocationGroup* group, intptr_t size, + Node* top, Node* effect, Zone* zone) { + return new (zone) AllocationState(group, size, top, effect); + } + + bool IsYoungGenerationAllocation() const; + + AllocationGroup* group() const { return group_; } + Node* top() const { return top_; } + Node* effect() const { return effect_; } + intptr_t size() const { return size_; } + + private: + AllocationState(); + explicit AllocationState(AllocationGroup* group, Node* effect); + AllocationState(AllocationGroup* group, intptr_t size, Node* top, + Node* effect); + + AllocationGroup* const group_; + // The upper bound of the combined allocated object size on the current path + // (max int if allocation folding is impossible on this path). + intptr_t const size_; + Node* const top_; + Node* const effect_; + + DISALLOW_COPY_AND_ASSIGN(AllocationState); + }; + + using WriteBarrierAssertFailedCallback = std::function<void( + Node* node, Node* object, const char* name, Zone* temp_zone)>; + + MemoryLowering( + JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, + AllocationFolding allocation_folding = + AllocationFolding::kDontAllocationFolding, + WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*, + Zone*) { UNREACHABLE(); }, + const char* function_debug_name = nullptr); + ~MemoryLowering() = default; + + const char* reducer_name() const override { return "MemoryReducer"; } + + // Perform memory lowering reduction on the given Node. + Reduction Reduce(Node* node) override; + + // Specific reducers for each optype to enable keeping track of + // AllocationState by the MemoryOptimizer. + Reduction ReduceAllocateRaw(Node* node, AllocationType allocation_type, + AllowLargeObjects allow_large_objects, + AllocationState const** state); + Reduction ReduceLoadFromObject(Node* node); + Reduction ReduceLoadElement(Node* node); + Reduction ReduceLoadField(Node* node); + Reduction ReduceStoreToObject(Node* node, + AllocationState const* state = nullptr); + Reduction ReduceStoreElement(Node* node, + AllocationState const* state = nullptr); + Reduction ReduceStoreField(Node* node, + AllocationState const* state = nullptr); + Reduction ReduceStore(Node* node, AllocationState const* state = nullptr); + + private: + Reduction ReduceAllocateRaw(Node* node); + WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object, + Node* value, + AllocationState const* state, + WriteBarrierKind); + Node* ComputeIndex(ElementAccess const& access, Node* node); + bool NeedsPoisoning(LoadSensitivity load_sensitivity) const; + + Graph* graph() const; + Isolate* isolate() const; + Zone* zone() const { return zone_; } + JSGraph* jsgraph() const { return jsgraph_; } + CommonOperatorBuilder* common() const; + MachineOperatorBuilder* machine() const; + GraphAssembler* gasm() { return &graph_assembler_; } + + SetOncePointer<const Operator> allocate_operator_; + JSGraph* const jsgraph_; + Zone* zone_; + GraphAssembler graph_assembler_; + AllocationFolding allocation_folding_; + PoisoningMitigationLevel poisoning_level_; + WriteBarrierAssertFailedCallback write_barrier_assert_failed_; + const char* function_debug_name_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryLowering); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_MEMORY_LOWERING_H_ diff --git a/chromium/v8/src/compiler/memory-optimizer.cc b/chromium/v8/src/compiler/memory-optimizer.cc index 8684f2ce3cf..6527dfb2877 100644 --- a/chromium/v8/src/compiler/memory-optimizer.cc +++ b/chromium/v8/src/compiler/memory-optimizer.cc @@ -11,90 +11,12 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" #include "src/compiler/node.h" -#include "src/compiler/simplified-operator.h" #include "src/roots/roots-inl.h" namespace v8 { namespace internal { namespace compiler { -MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, - PoisoningMitigationLevel poisoning_level, - AllocationFolding allocation_folding, - const char* function_debug_name, - TickCounter* tick_counter) - : jsgraph_(jsgraph), - empty_state_(AllocationState::Empty(zone)), - pending_(zone), - tokens_(zone), - zone_(zone), - graph_assembler_(jsgraph, nullptr, nullptr, zone), - poisoning_level_(poisoning_level), - allocation_folding_(allocation_folding), - function_debug_name_(function_debug_name), - tick_counter_(tick_counter) {} - -void MemoryOptimizer::Optimize() { - EnqueueUses(graph()->start(), empty_state()); - while (!tokens_.empty()) { - Token const token = tokens_.front(); - tokens_.pop(); - VisitNode(token.node, token.state); - } - DCHECK(pending_.empty()); - DCHECK(tokens_.empty()); -} - -MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, - AllocationType allocation, - Zone* zone) - : node_ids_(zone), allocation_(allocation), size_(nullptr) { - node_ids_.insert(node->id()); -} - -MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, - AllocationType allocation, - Node* size, Zone* zone) - : node_ids_(zone), allocation_(allocation), size_(size) { - node_ids_.insert(node->id()); -} - -void MemoryOptimizer::AllocationGroup::Add(Node* node) { - node_ids_.insert(node->id()); -} - -bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const { - // Additions should stay within the same allocated object, so it's safe to - // ignore them. - while (node_ids_.find(node->id()) == node_ids_.end()) { - switch (node->opcode()) { - case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastWordToTagged: - case IrOpcode::kInt32Add: - case IrOpcode::kInt64Add: - node = NodeProperties::GetValueInput(node, 0); - break; - default: - return false; - } - } - return true; -} - -MemoryOptimizer::AllocationState::AllocationState() - : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {} - -MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group) - : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {} - -MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group, - intptr_t size, Node* top) - : group_(group), size_(size), top_(top) {} - -bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const { - return group() && group()->IsYoungGenerationAllocation(); -} - namespace { bool CanAllocate(const Node* node) { @@ -221,8 +143,67 @@ Node* EffectPhiForPhi(Node* phi) { return nullptr; } +void WriteBarrierAssertFailed(Node* node, Node* object, const char* name, + Zone* temp_zone) { + std::stringstream str; + str << "MemoryOptimizer could not remove write barrier for node #" + << node->id() << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << node->id() << " to break in CSA code.\n"; + Node* object_position = object; + if (object_position->opcode() == IrOpcode::kPhi) { + object_position = EffectPhiForPhi(object_position); + } + Node* allocating_node = nullptr; + if (object_position && object_position->op()->EffectOutputCount() > 0) { + allocating_node = SearchAllocatingNode(node, object_position, temp_zone); + } + if (allocating_node) { + str << "\n There is a potentially allocating node in between:\n"; + str << " " << *allocating_node << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << allocating_node->id() << " to break there.\n"; + if (allocating_node->opcode() == IrOpcode::kCall) { + str << " If this is a never-allocating runtime call, you can add an " + "exception to Runtime::MayAllocate.\n"; + } + } else { + str << "\n It seems the store happened to something different than a " + "direct " + "allocation:\n"; + str << " " << *object << "\n"; + str << " Run mksnapshot with --csa-trap-on-node=" << name << "," + << object->id() << " to break there.\n"; + } + FATAL("%s", str.str().c_str()); +} + } // namespace +MemoryOptimizer::MemoryOptimizer( + JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, + MemoryLowering::AllocationFolding allocation_folding, + const char* function_debug_name, TickCounter* tick_counter) + : memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding, + WriteBarrierAssertFailed, function_debug_name), + jsgraph_(jsgraph), + empty_state_(AllocationState::Empty(zone)), + pending_(zone), + tokens_(zone), + zone_(zone), + tick_counter_(tick_counter) {} + +void MemoryOptimizer::Optimize() { + EnqueueUses(graph()->start(), empty_state()); + while (!tokens_.empty()) { + Token const token = tokens_.front(); + tokens_.pop(); + VisitNode(token.node, token.state); + } + DCHECK(pending_.empty()); + DCHECK(tokens_.empty()); +} + void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { tick_counter_->DoTick(); DCHECK(!node->IsDead()); @@ -259,8 +240,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { DCHECK_EQ(0, node->op()->EffectOutputCount()); } -#define __ gasm()-> - bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, const Edge edge) { if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) { @@ -293,13 +272,6 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node, void MemoryOptimizer::VisitAllocateRaw(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode()); - Node* value; - Node* size = node->InputAt(0); - Node* effect = node->InputAt(1); - Node* control = node->InputAt(2); - - gasm()->Reset(effect, control); - const AllocateParameters& allocation = AllocateParametersOf(node->op()); AllocationType allocation_type = allocation.allocation_type(); @@ -310,7 +282,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, if (allocation_type == AllocationType::kOld) { for (Edge const edge : node->use_edges()) { Node* const user = edge.from(); - if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { Node* child = user->InputAt(1); // In Pointer Compression we might have a Compress node between an @@ -339,299 +310,62 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, } } - Node* allocate_builtin; - if (allocation_type == AllocationType::kYoung) { - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInYoungGenerationStubConstant(); - } else { - allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); - } - } else { - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInOldGenerationStubConstant(); - } else { - allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); - } - } - - // Determine the top/limit addresses. - Node* top_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_top_address(isolate()) - : ExternalReference::old_space_allocation_top_address(isolate())); - Node* limit_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_limit_address(isolate()) - : ExternalReference::old_space_allocation_limit_address(isolate())); - - // Check if we can fold this allocation into a previous allocation represented - // by the incoming {state}. - IntPtrMatcher m(size); - if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) { - intptr_t const object_size = m.Value(); - if (allocation_folding_ == AllocationFolding::kDoAllocationFolding && - state->size() <= kMaxRegularHeapObjectSize - object_size && - state->group()->allocation() == allocation_type) { - // We can fold this Allocate {node} into the allocation {group} - // represented by the given {state}. Compute the upper bound for - // the new {state}. - intptr_t const state_size = state->size() + object_size; - - // Update the reservation check to the actual maximum upper bound. - AllocationGroup* const group = state->group(); - if (machine()->Is64()) { - if (OpParameter<int64_t>(group->size()->op()) < state_size) { - NodeProperties::ChangeOp(group->size(), - common()->Int64Constant(state_size)); - } - } else { - if (OpParameter<int32_t>(group->size()->op()) < state_size) { - NodeProperties::ChangeOp( - group->size(), - common()->Int32Constant(static_cast<int32_t>(state_size))); - } - } - - // Update the allocation top with the new object allocation. - // TODO(bmeurer): Defer writing back top as much as possible. - Node* top = __ IntAdd(state->top(), size); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), top); - - // Compute the effective inner allocated address. - value = __ BitcastWordToTagged( - __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag))); - - // Extend the allocation {group}. - group->Add(value); - state = AllocationState::Open(group, state_size, top, zone()); - } else { - auto call_runtime = __ MakeDeferredLabel(); - auto done = __ MakeLabel(MachineType::PointerRepresentation()); - - // Setup a mutable reservation size node; will be patched as we fold - // additional allocations into this new group. - Node* size = __ UniqueIntPtrConstant(object_size); - - // Load allocation top and limit. - Node* top = - __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); - Node* limit = - __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); - - // Check if we need to collect garbage before we can start bump pointer - // allocation (always done for folded allocations). - Node* check = __ UintLessThan(__ IntAdd(top, size), limit); - - __ GotoIfNot(check, &call_runtime); - __ Goto(&done, top); - - __ Bind(&call_runtime); - { - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } - Node* vfalse = __ BitcastTaggedToWord( - __ Call(allocate_operator_.get(), allocate_builtin, size)); - vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); - __ Goto(&done, vfalse); - } - - __ Bind(&done); - - // Compute the new top and write it back. - top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size)); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), top); - - // Compute the initial object address. - value = __ BitcastWordToTagged( - __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag))); - - // Start a new allocation group. - AllocationGroup* group = - new (zone()) AllocationGroup(value, allocation_type, size, zone()); - state = AllocationState::Open(group, object_size, top, zone()); - } - } else { - auto call_runtime = __ MakeDeferredLabel(); - auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); - - // Load allocation top and limit. - Node* top = - __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); - Node* limit = - __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); - - // Compute the new top. - Node* new_top = __ IntAdd(top, size); - - // Check if we can do bump pointer allocation here. - Node* check = __ UintLessThan(new_top, limit); - __ GotoIfNot(check, &call_runtime); - if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { - __ GotoIfNot( - __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)), - &call_runtime); - } - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - top_address, __ IntPtrConstant(0), new_top); - __ Goto(&done, __ BitcastWordToTagged( - __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); - - __ Bind(&call_runtime); - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } - __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); - - __ Bind(&done); - value = done.PhiAt(0); - - // Create an unfoldable allocation group. - AllocationGroup* group = - new (zone()) AllocationGroup(value, allocation_type, zone()); - state = AllocationState::Closed(group, zone()); - } - - effect = __ ExtractCurrentEffect(); - control = __ ExtractCurrentControl(); - - // Replace all effect uses of {node} with the {effect}, enqueue the - // effect uses for further processing, and replace all value uses of - // {node} with the {value}. - for (Edge edge : node->use_edges()) { - if (NodeProperties::IsEffectEdge(edge)) { - EnqueueUse(edge.from(), edge.index(), state); - edge.UpdateTo(effect); - } else if (NodeProperties::IsValueEdge(edge)) { - edge.UpdateTo(value); - } else { - DCHECK(NodeProperties::IsControlEdge(edge)); - edge.UpdateTo(control); - } - } - - // Kill the {node} to make sure we don't leave dangling dead uses. - node->Kill(); + memory_lowering()->ReduceAllocateRaw( + node, allocation_type, allocation.allow_large_objects(), &state); + EnqueueUses(state->effect(), state); } void MemoryOptimizer::VisitLoadFromObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); - ObjectAccess const& access = ObjectAccessOf(node->op()); - NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); + memory_lowering()->ReduceLoadFromObject(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreToObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); - ObjectAccess const& access = ObjectAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); - EnqueueUses(node, state); -} - -#undef __ - -void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { - DCHECK_EQ(IrOpcode::kCall, node->opcode()); - // If the call can allocate, we start with a fresh state. - if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { - state = empty_state(); - } + memory_lowering()->ReduceStoreToObject(node, state); EnqueueUses(node, state); } void MemoryOptimizer::VisitLoadElement(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); - ElementAccess const& access = ElementAccessOf(node->op()); - Node* index = node->InputAt(1); - node->ReplaceInput(1, ComputeIndex(access, index)); - MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + memory_lowering()->ReduceLoadElement(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadField, node->opcode()); - FieldAccess const& access = FieldAccessOf(node->op()); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - MachineType type = access.machine_type; - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + memory_lowering()->ReduceLoadField(node); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreElement(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreElement, node->opcode()); - ElementAccess const& access = ElementAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - node->ReplaceInput(1, ComputeIndex(access, index)); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); + memory_lowering()->ReduceStoreElement(node, state); EnqueueUses(node, state); } void MemoryOptimizer::VisitStoreField(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStoreField, node->opcode()); - FieldAccess const& access = FieldAccessOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(1); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, access.write_barrier_kind); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), write_barrier_kind))); + memory_lowering()->ReduceStoreField(node, state); EnqueueUses(node, state); } - void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kStore, node->opcode()); - StoreRepresentation representation = StoreRepresentationOf(node->op()); - Node* object = node->InputAt(0); - Node* value = node->InputAt(2); - WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( - node, object, value, state, representation.write_barrier_kind()); - if (write_barrier_kind != representation.write_barrier_kind()) { - NodeProperties::ChangeOp( - node, machine()->Store(StoreRepresentation( - representation.representation(), write_barrier_kind))); + memory_lowering()->ReduceStore(node, state); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { + DCHECK_EQ(IrOpcode::kCall, node->opcode()); + // If the call can allocate, we start with a fresh state. + if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { + state = empty_state(); } EnqueueUses(node, state); } @@ -641,109 +375,12 @@ void MemoryOptimizer::VisitOtherEffect(Node* node, EnqueueUses(node, state); } -Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) { - int const element_size_shift = - ElementSizeLog2Of(access.machine_type.representation()); - if (element_size_shift) { - index = graph()->NewNode(machine()->WordShl(), index, - jsgraph()->IntPtrConstant(element_size_shift)); - } - int const fixed_offset = access.header_size - access.tag(); - if (fixed_offset) { - index = graph()->NewNode(machine()->IntAdd(), index, - jsgraph()->IntPtrConstant(fixed_offset)); - } - return index; -} - -namespace { - -bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) { - while (true) { - switch (value->opcode()) { - case IrOpcode::kBitcastWordToTaggedSigned: - case IrOpcode::kChangeTaggedSignedToCompressedSigned: - case IrOpcode::kChangeTaggedToCompressedSigned: - return false; - case IrOpcode::kChangeTaggedPointerToCompressedPointer: - case IrOpcode::kChangeTaggedToCompressed: - value = NodeProperties::GetValueInput(value, 0); - continue; - case IrOpcode::kHeapConstant: { - RootIndex root_index; - if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()), - &root_index) && - RootsTable::IsImmortalImmovable(root_index)) { - return false; - } - break; - } - default: - break; - } - return true; - } -} - -void WriteBarrierAssertFailed(Node* node, Node* object, const char* name, - Zone* temp_zone) { - std::stringstream str; - str << "MemoryOptimizer could not remove write barrier for node #" - << node->id() << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << node->id() << " to break in CSA code.\n"; - Node* object_position = object; - if (object_position->opcode() == IrOpcode::kPhi) { - object_position = EffectPhiForPhi(object_position); - } - Node* allocating_node = nullptr; - if (object_position && object_position->op()->EffectOutputCount() > 0) { - allocating_node = SearchAllocatingNode(node, object_position, temp_zone); - } - if (allocating_node) { - str << "\n There is a potentially allocating node in between:\n"; - str << " " << *allocating_node << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << allocating_node->id() << " to break there.\n"; - if (allocating_node->opcode() == IrOpcode::kCall) { - str << " If this is a never-allocating runtime call, you can add an " - "exception to Runtime::MayAllocate.\n"; - } - } else { - str << "\n It seems the store happened to something different than a " - "direct " - "allocation:\n"; - str << " " << *object << "\n"; - str << " Run mksnapshot with --csa-trap-on-node=" << name << "," - << object->id() << " to break there.\n"; - } - FATAL("%s", str.str().c_str()); -} - -} // namespace - -WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind( - Node* node, Node* object, Node* value, AllocationState const* state, - WriteBarrierKind write_barrier_kind) { - if (state->IsYoungGenerationAllocation() && - state->group()->Contains(object)) { - write_barrier_kind = kNoWriteBarrier; - } - if (!ValueNeedsWriteBarrier(value, isolate())) { - write_barrier_kind = kNoWriteBarrier; - } - if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) { - WriteBarrierAssertFailed(node, object, function_debug_name_, zone()); - } - return write_barrier_kind; -} - MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( AllocationStates const& states) { // Check if all states are the same; or at least if all allocation // states belong to the same allocation group. AllocationState const* state = states.front(); - AllocationGroup* group = state->group(); + MemoryLowering::AllocationGroup* group = state->group(); for (size_t i = 1; i < states.size(); ++i) { if (states[i] != state) state = nullptr; if (states[i]->group() != group) group = nullptr; @@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( // TODO(bmeurer): We could potentially just create a Phi here to merge // the various tops; but we need to pay special attention not to create // an unschedulable graph. - state = AllocationState::Closed(group, zone()); + state = AllocationState::Closed(group, nullptr, zone()); } else { // The states are from different allocation groups. state = empty_state(); @@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index, Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); } -Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); } - -CommonOperatorBuilder* MemoryOptimizer::common() const { - return jsgraph()->common(); -} - -MachineOperatorBuilder* MemoryOptimizer::machine() const { - return jsgraph()->machine(); -} - -bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const { - // Safe loads do not need poisoning. - if (load_sensitivity == LoadSensitivity::kSafe) return false; - - switch (poisoning_level_) { - case PoisoningMitigationLevel::kDontPoison: - return false; - case PoisoningMitigationLevel::kPoisonAll: - return true; - case PoisoningMitigationLevel::kPoisonCriticalOnly: - return load_sensitivity == LoadSensitivity::kCritical; - } - UNREACHABLE(); -} - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/compiler/memory-optimizer.h b/chromium/v8/src/compiler/memory-optimizer.h index a663bf07ed6..0e0fc5684c0 100644 --- a/chromium/v8/src/compiler/memory-optimizer.h +++ b/chromium/v8/src/compiler/memory-optimizer.h @@ -5,7 +5,7 @@ #ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_ #define V8_COMPILER_MEMORY_OPTIMIZER_H_ -#include "src/compiler/graph-assembler.h" +#include "src/compiler/memory-lowering.h" #include "src/zone/zone-containers.h" namespace v8 { @@ -15,95 +15,29 @@ class TickCounter; namespace compiler { -// Forward declarations. -class CommonOperatorBuilder; -struct ElementAccess; -class Graph; class JSGraph; -class MachineOperatorBuilder; -class Node; -class Operator; +class Graph; // NodeIds are identifying numbers for nodes that can be used to index auxiliary // out-of-line data associated with each node. using NodeId = uint32_t; -// Lowers all simplified memory access and allocation related nodes (i.e. -// Allocate, LoadField, StoreField and friends) to machine operators. // Performs allocation folding and store write barrier elimination -// implicitly. +// implicitly, while lowering all simplified memory access and allocation +// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine +// operators. class MemoryOptimizer final { public: - enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding }; - MemoryOptimizer(JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, - AllocationFolding allocation_folding, + MemoryLowering::AllocationFolding allocation_folding, const char* function_debug_name, TickCounter* tick_counter); ~MemoryOptimizer() = default; void Optimize(); private: - // An allocation group represents a set of allocations that have been folded - // together. - class AllocationGroup final : public ZoneObject { - public: - AllocationGroup(Node* node, AllocationType allocation, Zone* zone); - AllocationGroup(Node* node, AllocationType allocation, Node* size, - Zone* zone); - ~AllocationGroup() = default; - - void Add(Node* object); - bool Contains(Node* object) const; - bool IsYoungGenerationAllocation() const { - return allocation() == AllocationType::kYoung; - } - - AllocationType allocation() const { return allocation_; } - Node* size() const { return size_; } - - private: - ZoneSet<NodeId> node_ids_; - AllocationType const allocation_; - Node* const size_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup); - }; - - // An allocation state is propagated on the effect paths through the graph. - class AllocationState final : public ZoneObject { - public: - static AllocationState const* Empty(Zone* zone) { - return new (zone) AllocationState(); - } - static AllocationState const* Closed(AllocationGroup* group, Zone* zone) { - return new (zone) AllocationState(group); - } - static AllocationState const* Open(AllocationGroup* group, intptr_t size, - Node* top, Zone* zone) { - return new (zone) AllocationState(group, size, top); - } - - bool IsYoungGenerationAllocation() const; - - AllocationGroup* group() const { return group_; } - Node* top() const { return top_; } - intptr_t size() const { return size_; } - - private: - AllocationState(); - explicit AllocationState(AllocationGroup* group); - AllocationState(AllocationGroup* group, intptr_t size, Node* top); - - AllocationGroup* const group_; - // The upper bound of the combined allocated object size on the current path - // (max int if allocation folding is impossible on this path). - intptr_t const size_; - Node* const top_; - - DISALLOW_COPY_AND_ASSIGN(AllocationState); - }; + using AllocationState = MemoryLowering::AllocationState; // An array of allocation states used to collect states on merges. using AllocationStates = ZoneVector<AllocationState const*>; @@ -127,44 +61,29 @@ class MemoryOptimizer final { void VisitStore(Node*, AllocationState const*); void VisitOtherEffect(Node*, AllocationState const*); - Node* ComputeIndex(ElementAccess const&, Node*); - WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object, - Node* value, - AllocationState const* state, - WriteBarrierKind); - AllocationState const* MergeStates(AllocationStates const& states); void EnqueueMerge(Node*, int, AllocationState const*); void EnqueueUses(Node*, AllocationState const*); void EnqueueUse(Node*, int, AllocationState const*); - bool NeedsPoisoning(LoadSensitivity load_sensitivity) const; - // Returns true if the AllocationType of the current AllocateRaw node that we // are visiting needs to be updated to kOld, due to propagation of tenuring // from outer to inner allocations. bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge); AllocationState const* empty_state() const { return empty_state_; } + MemoryLowering* memory_lowering() { return &memory_lowering_; } Graph* graph() const; - Isolate* isolate() const; JSGraph* jsgraph() const { return jsgraph_; } - CommonOperatorBuilder* common() const; - MachineOperatorBuilder* machine() const; Zone* zone() const { return zone_; } - GraphAssembler* gasm() { return &graph_assembler_; } - SetOncePointer<const Operator> allocate_operator_; - JSGraph* const jsgraph_; + MemoryLowering memory_lowering_; + JSGraph* jsgraph_; AllocationState const* const empty_state_; ZoneMap<NodeId, AllocationStates> pending_; ZoneQueue<Token> tokens_; Zone* const zone_; - GraphAssembler graph_assembler_; - PoisoningMitigationLevel poisoning_level_; - AllocationFolding allocation_folding_; - const char* function_debug_name_; TickCounter* const tick_counter_; DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer); diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h index 20698f4cd6d..82bc1795193 100644 --- a/chromium/v8/src/compiler/node-matchers.h +++ b/chromium/v8/src/compiler/node-matchers.h @@ -187,10 +187,11 @@ using Float64Matcher = FloatMatcher<double, IrOpcode::kFloat64Constant>; using NumberMatcher = FloatMatcher<double, IrOpcode::kNumberConstant>; // A pattern matcher for heap object constants. -struct HeapObjectMatcher final - : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> { - explicit HeapObjectMatcher(Node* node) - : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {} +template <IrOpcode::Value kHeapConstantOpcode> +struct HeapObjectMatcherImpl final + : public ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode> { + explicit HeapObjectMatcherImpl(Node* node) + : ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {} bool Is(Handle<HeapObject> const& value) const { return this->HasValue() && this->Value().address() == value.address(); @@ -201,6 +202,9 @@ struct HeapObjectMatcher final } }; +using HeapObjectMatcher = HeapObjectMatcherImpl<IrOpcode::kHeapConstant>; +using CompressedHeapObjectMatcher = + HeapObjectMatcherImpl<IrOpcode::kCompressedHeapConstant>; // A pattern matcher for external reference constants. struct ExternalReferenceMatcher final @@ -295,6 +299,8 @@ using Float64BinopMatcher = BinopMatcher<Float64Matcher, Float64Matcher>; using NumberBinopMatcher = BinopMatcher<NumberMatcher, NumberMatcher>; using HeapObjectBinopMatcher = BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>; +using CompressedHeapObjectBinopMatcher = + BinopMatcher<CompressedHeapObjectMatcher, CompressedHeapObjectMatcher>; template <class BinopMatcher, IrOpcode::Value kMulOpcode, IrOpcode::Value kShiftOpcode> diff --git a/chromium/v8/src/compiler/node.h b/chromium/v8/src/compiler/node.h index 76ea4bb1a9e..b4ff5f7185f 100644 --- a/chromium/v8/src/compiler/node.h +++ b/chromium/v8/src/compiler/node.h @@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE Node final { Uses uses() { return Uses(this); } - // Returns true if {owner} is the user of {this} node. + // Returns true if {owner} is the only user of {this} node. bool OwnedBy(Node* owner) const { return first_use_ && first_use_->from() == owner && !first_use_->next; } diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h index fe45d9276ac..76c6bfec2fe 100644 --- a/chromium/v8/src/compiler/opcodes.h +++ b/chromium/v8/src/compiler/opcodes.h @@ -156,7 +156,8 @@ V(JSCreateObject) \ V(JSCreatePromise) \ V(JSCreateStringIterator) \ - V(JSCreateTypedArray) + V(JSCreateTypedArray) \ + V(JSGetTemplateObject) #define JS_OBJECT_OP_LIST(V) \ JS_CREATE_OP_LIST(V) \ @@ -425,11 +426,14 @@ V(LoadFieldByIndex) \ V(LoadField) \ V(LoadElement) \ + V(LoadMessage) \ V(LoadTypedElement) \ V(LoadFromObject) \ V(LoadDataViewElement) \ + V(LoadStackArgument) \ V(StoreField) \ V(StoreElement) \ + V(StoreMessage) \ V(StoreTypedElement) \ V(StoreToObject) \ V(StoreDataViewElement) \ @@ -669,9 +673,10 @@ V(Word64Ctz) \ V(Word64ReverseBits) \ V(Word64ReverseBytes) \ + V(Simd128ReverseBytes) \ V(Int64AbsWithOverflow) \ V(BitcastTaggedToWord) \ - V(BitcastTaggedSignedToWord) \ + V(BitcastTaggedToWordForTagAndSmiBits) \ V(BitcastWordToTagged) \ V(BitcastWordToTaggedSigned) \ V(BitcastWord32ToCompressedSigned) \ @@ -749,6 +754,7 @@ V(F64x2ReplaceLane) \ V(F64x2Abs) \ V(F64x2Neg) \ + V(F64x2Sqrt) \ V(F64x2Add) \ V(F64x2Sub) \ V(F64x2Mul) \ @@ -759,6 +765,8 @@ V(F64x2Ne) \ V(F64x2Lt) \ V(F64x2Le) \ + V(F64x2Qfma) \ + V(F64x2Qfms) \ V(F32x4Splat) \ V(F32x4ExtractLane) \ V(F32x4ReplaceLane) \ @@ -766,6 +774,7 @@ V(F32x4UConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ V(F32x4Add) \ @@ -781,6 +790,8 @@ V(F32x4Le) \ V(F32x4Gt) \ V(F32x4Ge) \ + V(F32x4Qfma) \ + V(F32x4Qfms) \ V(I64x2Splat) \ V(I64x2ExtractLane) \ V(I64x2ReplaceLane) \ @@ -905,6 +916,7 @@ V(S128Or) \ V(S128Xor) \ V(S128Select) \ + V(S8x16Swizzle) \ V(S8x16Shuffle) \ V(S1x2AnyTrue) \ V(S1x2AllTrue) \ diff --git a/chromium/v8/src/compiler/operator-properties.cc b/chromium/v8/src/compiler/operator-properties.cc index 1fcc12291d9..731a6c8496c 100644 --- a/chromium/v8/src/compiler/operator-properties.cc +++ b/chromium/v8/src/compiler/operator-properties.cc @@ -41,6 +41,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) { case IrOpcode::kJSCreateEmptyLiteralObject: case IrOpcode::kJSCreateArrayFromIterable: case IrOpcode::kJSCreateLiteralRegExp: + case IrOpcode::kJSGetTemplateObject: case IrOpcode::kJSForInEnumerate: case IrOpcode::kJSForInNext: case IrOpcode::kJSForInPrepare: diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc index 8b2f4247898..b9648d91955 100644 --- a/chromium/v8/src/compiler/pipeline.cc +++ b/chromium/v8/src/compiler/pipeline.cc @@ -9,7 +9,6 @@ #include <memory> #include <sstream> -#include "src/base/adapters.h" #include "src/base/optional.h" #include "src/base/platform/elapsed-timer.h" #include "src/codegen/assembler-inl.h" @@ -97,6 +96,35 @@ namespace v8 { namespace internal { namespace compiler { +static constexpr char kCodegenZoneName[] = "codegen-zone"; +static constexpr char kGraphZoneName[] = "graph-zone"; +static constexpr char kInstructionZoneName[] = "instruction-zone"; +static constexpr char kMachineGraphVerifierZoneName[] = + "machine-graph-verifier-zone"; +static constexpr char kPipelineCompilationJobZoneName[] = + "pipeline-compilation-job-zone"; +static constexpr char kRegisterAllocationZoneName[] = + "register-allocation-zone"; +static constexpr char kRegisterAllocatorVerifierZoneName[] = + "register-allocator-verifier-zone"; +namespace { + +Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) { + Context current = closure->context(); + size_t distance = 0; + while (!current.IsNativeContext()) { + if (current.IsModuleContext()) { + return Just( + OuterContext(handle(current, current.GetIsolate()), distance)); + } + current = current.previous(); + distance++; + } + return Nothing<OuterContext>(); +} + +} // anonymous namespace + class PipelineData { public: // For main entry point. @@ -113,15 +141,16 @@ class PipelineData { roots_relative_addressing_enabled_( !isolate->serializer_enabled() && !isolate->IsGeneratingEmbeddedBuiltins()), - graph_zone_scope_(zone_stats_, ZONE_NAME), + graph_zone_scope_(zone_stats_, kGraphZoneName), graph_zone_(graph_zone_scope_.zone()), - instruction_zone_scope_(zone_stats_, ZONE_NAME), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, ZONE_NAME), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_(codegen_zone_scope_.zone()), broker_(new JSHeapBroker(isolate_, info_->zone(), info_->trace_heap_broker_enabled())), - register_allocation_zone_scope_(zone_stats_, ZONE_NAME), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), assembler_options_(AssemblerOptions::Default(isolate)) { PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData"); @@ -158,7 +187,7 @@ class PipelineData { may_have_unverifiable_graph_(false), zone_stats_(zone_stats), pipeline_statistics_(pipeline_statistics), - graph_zone_scope_(zone_stats_, ZONE_NAME), + graph_zone_scope_(zone_stats_, kGraphZoneName), graph_zone_(graph_zone_scope_.zone()), graph_(mcgraph->graph()), source_positions_(source_positions), @@ -166,11 +195,12 @@ class PipelineData { machine_(mcgraph->machine()), common_(mcgraph->common()), mcgraph_(mcgraph), - instruction_zone_scope_(zone_stats_, ZONE_NAME), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, ZONE_NAME), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, ZONE_NAME), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), assembler_options_(assembler_options) {} @@ -185,17 +215,18 @@ class PipelineData { info_(info), debug_name_(info_->GetDebugName()), zone_stats_(zone_stats), - graph_zone_scope_(zone_stats_, ZONE_NAME), + graph_zone_scope_(zone_stats_, kGraphZoneName), graph_zone_(graph_zone_scope_.zone()), graph_(graph), source_positions_(source_positions), node_origins_(node_origins), schedule_(schedule), - instruction_zone_scope_(zone_stats_, ZONE_NAME), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), instruction_zone_(instruction_zone_scope_.zone()), - codegen_zone_scope_(zone_stats_, ZONE_NAME), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, ZONE_NAME), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), jump_optimization_info_(jump_opt), assembler_options_(assembler_options) { @@ -218,13 +249,14 @@ class PipelineData { info_(info), debug_name_(info_->GetDebugName()), zone_stats_(zone_stats), - graph_zone_scope_(zone_stats_, ZONE_NAME), - instruction_zone_scope_(zone_stats_, ZONE_NAME), + graph_zone_scope_(zone_stats_, kGraphZoneName), + instruction_zone_scope_(zone_stats_, kInstructionZoneName), instruction_zone_(sequence->zone()), sequence_(sequence), - codegen_zone_scope_(zone_stats_, ZONE_NAME), + codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_(codegen_zone_scope_.zone()), - register_allocation_zone_scope_(zone_stats_, ZONE_NAME), + register_allocation_zone_scope_(zone_stats_, + kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), assembler_options_(AssemblerOptions::Default(isolate)) {} @@ -323,6 +355,20 @@ class PipelineData { return assembler_options_; } + void ChooseSpecializationContext() { + if (info()->is_function_context_specializing()) { + DCHECK(info()->has_context()); + specialization_context_ = + Just(OuterContext(handle(info()->context(), isolate()), 0)); + } else { + specialization_context_ = GetModuleContext(info()->closure()); + } + } + + Maybe<OuterContext> specialization_context() const { + return specialization_context_; + } + size_t* address_of_max_unoptimized_frame_height() { return &max_unoptimized_frame_height_; } @@ -531,6 +577,7 @@ class PipelineData { JumpOptimizationInfo* jump_optimization_info_ = nullptr; AssemblerOptions assembler_options_; + Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>(); // The maximal combined height of all inlined frames in their unoptimized // state. Calculated during instruction selection, applied during code @@ -548,12 +595,19 @@ class PipelineImpl final { template <typename Phase, typename... Args> void Run(Args&&... args); - // Step A. Run the graph creation and initial optimization passes. + // Step A.1. Serialize the data needed for the compilation front-end. + void Serialize(); + + // Step A.2. Run the graph creation and initial optimization passes. bool CreateGraph(); - // B. Run the concurrent optimization passes. + // Step B. Run the concurrent optimization passes. bool OptimizeGraph(Linkage* linkage); + // Alternative step B. Run minimal concurrent optimization passes for + // mid-tier. + bool OptimizeGraphForMidTier(Linkage* linkage); + // Substep B.1. Produce a scheduled graph. void ComputeScheduledGraph(); @@ -642,8 +696,6 @@ void PrintInlinedFunctionInfo( // compilation. For inlined functions print source position of their inlining. void PrintParticipatingSource(OptimizedCompilationInfo* info, Isolate* isolate) { - AllowDeferredHandleDereference allow_deference_for_print_code; - SourceIdAssigner id_assigner(info->inlined_functions().size()); PrintFunctionSource(info, isolate, -1, info->shared_info()); const auto& inlined = info->inlined_functions(); @@ -662,7 +714,6 @@ void PrintCode(Isolate* isolate, Handle<Code> code, } #ifdef ENABLE_DISASSEMBLER - AllowDeferredHandleDereference allow_deference_for_print_code; bool print_code = FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code && @@ -703,7 +754,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code, Handle<SharedFunctionInfo> shared = info->shared_info(); os << "source_position = " << shared->StartPosition() << "\n"; } - code->Disassemble(debug_name.get(), os); + code->Disassemble(debug_name.get(), os, isolate); os << "--- End code ---\n"; } #endif // ENABLE_DISASSEMBLER @@ -800,8 +851,10 @@ class PipelineRunScope { public: PipelineRunScope(PipelineData* data, const char* phase_name) : phase_scope_(data->pipeline_statistics(), phase_name), - zone_scope_(data->zone_stats(), ZONE_NAME), - origin_scope_(data->node_origins(), phase_name) {} + zone_scope_(data->zone_stats(), phase_name), + origin_scope_(data->node_origins(), phase_name) { + DCHECK_NOT_NULL(phase_name); + } Zone* zone() { return zone_scope_.zone(); } @@ -886,7 +939,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { PipelineCompilationJob(Isolate* isolate, Handle<SharedFunctionInfo> shared_info, Handle<JSFunction> function); - ~PipelineCompilationJob(); + ~PipelineCompilationJob() final; protected: Status PrepareJobImpl(Isolate* isolate) final; @@ -915,7 +968,8 @@ PipelineCompilationJob::PipelineCompilationJob( // we pass it to the CompilationJob constructor, but it is not // dereferenced there. : OptimizedCompilationJob(&compilation_info_, "TurboFan"), - zone_(function->GetIsolate()->allocator(), ZONE_NAME), + zone_(function->GetIsolate()->allocator(), + kPipelineCompilationJobZoneName), zone_stats_(function->GetIsolate()->allocator()), compilation_info_(&zone_, function->GetIsolate(), shared_info, function), pipeline_statistics_(CreatePipelineStatistics( @@ -976,9 +1030,16 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( compilation_info()->MarkAsAllocationFoldingEnabled(); } + // Determine whether to specialize the code for the function's context. + // We can't do this in the case of OSR, because we want to cache the + // generated code on the native context keyed on SharedFunctionInfo. + // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and + // allow context specialization for OSR code. if (compilation_info()->closure()->raw_feedback_cell().map() == - ReadOnlyRoots(isolate).one_closure_cell_map()) { + ReadOnlyRoots(isolate).one_closure_cell_map() && + !compilation_info()->is_osr()) { compilation_info()->MarkAsFunctionContextSpecializing(); + data_.ChooseSpecializationContext(); } if (compilation_info()->is_source_positions_enabled()) { @@ -999,9 +1060,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( // assembly. Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate); - if (!pipeline_.CreateGraph()) { - CHECK(!isolate->has_pending_exception()); - return AbortOptimization(BailoutReason::kGraphBuildingFailed); + pipeline_.Serialize(); + + if (!FLAG_concurrent_inlining) { + if (!pipeline_.CreateGraph()) { + CHECK(!isolate->has_pending_exception()); + return AbortOptimization(BailoutReason::kGraphBuildingFailed); + } } return SUCCEEDED; @@ -1012,7 +1077,21 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() { TRACE_DISABLED_BY_DEFAULT("v8.compile"), "v8.optimizingCompile.execute", this, TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "function", compilation_info()->shared_info()->TraceIDRef()); - if (!pipeline_.OptimizeGraph(linkage_)) return FAILED; + + if (FLAG_concurrent_inlining) { + if (!pipeline_.CreateGraph()) { + return AbortOptimization(BailoutReason::kGraphBuildingFailed); + } + } + + bool success; + if (FLAG_turboprop) { + success = pipeline_.OptimizeGraphForMidTier(linkage_); + } else { + success = pipeline_.OptimizeGraph(linkage_); + } + if (!success) return FAILED; + pipeline_.AssembleCode(linkage_); return SUCCEEDED; } @@ -1091,8 +1170,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { pipeline_(&data_), wasm_engine_(wasm_engine) {} - ~WasmHeapStubCompilationJob() = default; - protected: Status PrepareJobImpl(Isolate* isolate) final; Status ExecuteJobImpl() final; @@ -1119,7 +1196,7 @@ Pipeline::NewWasmHeapStubCompilationJob( CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind, std::unique_ptr<char[]> debug_name, const AssemblerOptions& options, SourcePositionTable* source_positions) { - return base::make_unique<WasmHeapStubCompilationJob>( + return std::make_unique<WasmHeapStubCompilationJob>( isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind, std::move(debug_name), options, source_positions); } @@ -1175,7 +1252,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( if (FLAG_print_opt_code) { CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); OFStream os(tracing_scope.file()); - code->Disassemble(compilation_info()->GetDebugName().get(), os); + code->Disassemble(compilation_info()->GetDebugName().get(), os, isolate); } #endif return SUCCEEDED; @@ -1212,38 +1289,10 @@ struct GraphBuilderPhase { } }; -namespace { - -Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) { - Context current = closure->context(); - size_t distance = 0; - while (!current.IsNativeContext()) { - if (current.IsModuleContext()) { - return Just( - OuterContext(handle(current, current.GetIsolate()), distance)); - } - current = current.previous(); - distance++; - } - return Nothing<OuterContext>(); -} - -Maybe<OuterContext> ChooseSpecializationContext( - Isolate* isolate, OptimizedCompilationInfo* info) { - if (info->is_function_context_specializing()) { - DCHECK(info->has_context()); - return Just(OuterContext(handle(info->context(), isolate), 0)); - } - return GetModuleContext(info->closure()); -} - -} // anonymous namespace - struct InliningPhase { static const char* phase_name() { return "V8.TFInlining"; } void Run(PipelineData* data, Zone* temp_zone) { - Isolate* isolate = data->isolate(); OptimizedCompilationInfo* info = data->info(); GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(), data->jsgraph()->Dead()); @@ -1260,7 +1309,7 @@ struct InliningPhase { data->dependencies()); JSContextSpecialization context_specialization( &graph_reducer, data->jsgraph(), data->broker(), - ChooseSpecializationContext(isolate, data->info()), + data->specialization_context(), data->info()->is_function_context_specializing() ? data->info()->closure() : MaybeHandle<JSFunction>()); @@ -1389,9 +1438,13 @@ struct SerializationPhase { flags |= SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness; } - RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(), - temp_zone, data->info()->closure(), - flags, data->info()->osr_offset()); + RunSerializerForBackgroundCompilation( + data->zone_stats(), data->broker(), data->dependencies(), + data->info()->closure(), flags, data->info()->osr_offset()); + if (data->specialization_context().IsJust()) { + ContextRef(data->broker(), + data->specialization_context().FromJust().context); + } } }; @@ -1682,8 +1735,8 @@ struct MemoryOptimizationPhase { MemoryOptimizer optimizer( data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(), data->info()->is_allocation_folding_enabled() - ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding - : MemoryOptimizer::AllocationFolding::kDontAllocationFolding, + ? MemoryLowering::AllocationFolding::kDoAllocationFolding + : MemoryLowering::AllocationFolding::kDontAllocationFolding, data->debug_name(), &data->info()->tick_counter()); optimizer.Optimize(); } @@ -1705,13 +1758,15 @@ struct LateOptimizationPhase { CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), data->broker(), data->common(), data->machine(), temp_zone); - SelectLowering select_lowering(data->jsgraph()->graph(), - data->jsgraph()->common()); -#ifdef V8_COMPRESS_POINTERS + SelectLowering select_lowering(data->jsgraph(), temp_zone); + // TODO(v8:7703, solanes): go back to using #if guards once + // FLAG_turbo_decompression_elimination gets removed. DecompressionElimination decompression_elimination( &graph_reducer, data->graph(), data->machine(), data->common()); - AddReducer(data, &graph_reducer, &decompression_elimination); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + AddReducer(data, &graph_reducer, &decompression_elimination); + } + USE(decompression_elimination); AddReducer(data, &graph_reducer, &branch_condition_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &machine_reducer); @@ -1738,6 +1793,23 @@ struct MachineOperatorOptimizationPhase { } }; +struct MidTierMachineLoweringPhase { + static const char* phase_name() { return "V8.TFMidTierMachineLoweringPhase"; } + + void Run(PipelineData* data, Zone* temp_zone) { + GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); + SelectLowering select_lowering(data->jsgraph(), temp_zone); + MemoryLowering memory_lowering(data->jsgraph(), temp_zone, + data->info()->GetPoisoningMitigationLevel()); + + AddReducer(data, &graph_reducer, &memory_lowering); + AddReducer(data, &graph_reducer, &select_lowering); + graph_reducer.ReduceGraph(); + } +}; + struct CsaEarlyOptimizationPhase { static const char* phase_name() { return "V8.CSAEarlyOptimization"; } @@ -1779,11 +1851,14 @@ struct CsaOptimizationPhase { CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), data->broker(), data->common(), data->machine(), temp_zone); -#ifdef V8_COMPRESS_POINTERS + // TODO(v8:7703, solanes): go back to using #if guards once + // FLAG_turbo_decompression_elimination gets removed. DecompressionElimination decompression_elimination( &graph_reducer, data->graph(), data->machine(), data->common()); - AddReducer(data, &graph_reducer, &decompression_elimination); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + AddReducer(data, &graph_reducer, &decompression_elimination); + } + USE(decompression_elimination); AddReducer(data, &graph_reducer, &branch_condition_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &machine_reducer); @@ -2077,7 +2152,7 @@ struct JumpThreadingPhase { void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) { ZoneVector<RpoNumber> result(temp_zone); - if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(), + if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(), frame_at_start)) { JumpThreading::ApplyForwarding(temp_zone, result, data->sequence()); } @@ -2102,7 +2177,7 @@ struct FinalizeCodePhase { struct PrintGraphPhase { - static const char* phase_name() { return nullptr; } + static const char* phase_name() { return "V8.TFPrintGraph"; } void Run(PipelineData* data, Zone* temp_zone, const char* phase) { OptimizedCompilationInfo* info = data->info(); @@ -2143,7 +2218,7 @@ struct PrintGraphPhase { struct VerifyGraphPhase { - static const char* phase_name() { return nullptr; } + static const char* phase_name() { return "V8.TFVerifyGraph"; } void Run(PipelineData* data, Zone* temp_zone, const bool untyped, bool values_only = false) { @@ -2176,10 +2251,10 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) { } } -bool PipelineImpl::CreateGraph() { +void PipelineImpl::Serialize() { PipelineData* data = this->data_; - data->BeginPhaseKind("V8.TFGraphCreation"); + data->BeginPhaseKind("V8.TFBrokerInitAndSerialization"); if (info()->trace_turbo_json_enabled() || info()->trace_turbo_graph_enabled()) { @@ -2203,15 +2278,19 @@ bool PipelineImpl::CreateGraph() { if (FLAG_concurrent_inlining) { Run<HeapBrokerInitializationPhase>(); Run<SerializationPhase>(); + data->broker()->StopSerializing(); } + data->EndPhaseKind(); +} + +bool PipelineImpl::CreateGraph() { + PipelineData* data = this->data_; + + data->BeginPhaseKind("V8.TFGraphCreation"); Run<GraphBuilderPhase>(); RunPrintAndVerify(GraphBuilderPhase::phase_name(), true); - if (FLAG_concurrent_inlining) { - Run<CopyMetadataForConcurrentCompilePhase>(); - } - // Perform function context specialization and inlining (if enabled). Run<InliningPhase>(); RunPrintAndVerify(InliningPhase::phase_name(), true); @@ -2222,12 +2301,13 @@ bool PipelineImpl::CreateGraph() { // Determine the Typer operation flags. { - if (is_sloppy(info()->shared_info()->language_mode()) && - info()->shared_info()->IsUserJavaScript()) { + SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info()); + if (is_sloppy(shared_info.language_mode()) && + shared_info.IsUserJavaScript()) { // Sloppy mode functions always have an Object for this. data->AddTyperFlag(Typer::kThisIsReceiver); } - if (IsClassConstructor(info()->shared_info()->kind())) { + if (IsClassConstructor(shared_info.kind())) { // Class constructors cannot be [[Call]]ed. data->AddTyperFlag(Typer::kNewTargetIsReceiver); } @@ -2235,12 +2315,7 @@ bool PipelineImpl::CreateGraph() { // Run the type-sensitive lowerings and optimizations on the graph. { - if (FLAG_concurrent_inlining) { - // TODO(neis): Remove CopyMetadataForConcurrentCompilePhase call once - // brokerization of JSNativeContextSpecialization is complete. - Run<CopyMetadataForConcurrentCompilePhase>(); - data->broker()->StopSerializing(); - } else { + if (!FLAG_concurrent_inlining) { Run<HeapBrokerInitializationPhase>(); Run<CopyMetadataForConcurrentCompilePhase>(); data->broker()->StopSerializing(); @@ -2359,6 +2434,70 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { return SelectInstructions(linkage); } +bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) { + PipelineData* data = this->data_; + + data->BeginPhaseKind("V8.TFLowering"); + + // Type the graph and keep the Typer running such that new nodes get + // automatically typed when they are created. + Run<TyperPhase>(data->CreateTyper()); + RunPrintAndVerify(TyperPhase::phase_name()); + Run<TypedLoweringPhase>(); + RunPrintAndVerify(TypedLoweringPhase::phase_name()); + + // TODO(9684): Consider rolling this into the preceeding phase or not creating + // LoopExit nodes at all. + Run<LoopExitEliminationPhase>(); + RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true); + + data->DeleteTyper(); + + if (FLAG_assert_types) { + Run<TypeAssertionsPhase>(); + RunPrintAndVerify(TypeAssertionsPhase::phase_name()); + } + + // Perform simplified lowering. This has to run w/o the Typer decorator, + // because we cannot compute meaningful types anyways, and the computed types + // might even conflict with the representation/truncation logic. + Run<SimplifiedLoweringPhase>(); + RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true); + + // From now on it is invalid to look at types on the nodes, because the types + // on the nodes might not make sense after representation selection due to the + // way we handle truncations; if we'd want to look at types afterwards we'd + // essentially need to re-type (large portions of) the graph. + + // In order to catch bugs related to type access after this point, we now + // remove the types from the nodes (currently only in Debug builds). +#ifdef DEBUG + Run<UntyperPhase>(); + RunPrintAndVerify(UntyperPhase::phase_name(), true); +#endif + + // Run generic lowering pass. + Run<GenericLoweringPhase>(); + RunPrintAndVerify(GenericLoweringPhase::phase_name(), true); + + data->BeginPhaseKind("V8.TFBlockBuilding"); + + Run<EffectControlLinearizationPhase>(); + RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true); + + Run<MidTierMachineLoweringPhase>(); + RunPrintAndVerify(MidTierMachineLoweringPhase::phase_name(), true); + + data->source_positions()->RemoveDecorator(); + if (data->info()->trace_turbo_json_enabled()) { + data->node_origins()->RemoveDecorator(); + } + + ComputeScheduledGraph(); + + return SelectInstructions(linkage); +} + MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, SourcePositionTable* source_positions, Code::Kind kind, @@ -2571,6 +2710,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info)); Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate); + pipeline.Serialize(); if (!pipeline.CreateGraph()) return MaybeHandle<Code>(); if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>(); pipeline.AssembleCode(&linkage); @@ -2628,7 +2768,7 @@ std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob( Isolate* isolate, Handle<JSFunction> function, bool has_script) { Handle<SharedFunctionInfo> shared = handle(function->shared(), function->GetIsolate()); - return base::make_unique<PipelineCompilationJob>(isolate, shared, function); + return std::make_unique<PipelineCompilationJob>(isolate, shared, function); } // static @@ -2709,7 +2849,7 @@ void Pipeline::GenerateCodeForWasmFunction( if (!pipeline.SelectInstructions(&linkage)) return; pipeline.AssembleCode(&linkage, instruction_buffer->CreateView()); - auto result = base::make_unique<wasm::WasmCompilationResult>(); + auto result = std::make_unique<wasm::WasmCompilationResult>(); CodeGenerator* code_generator = pipeline.code_generator(); code_generator->tasm()->GetCode( nullptr, &result->code_desc, code_generator->safepoint_table_builder(), @@ -2818,7 +2958,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) { << "--- End of " << data->debug_name() << " generated by TurboFan\n" << "--------------------------------------------------\n"; } - Zone temp_zone(data->allocator(), ZONE_NAME); + Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName); MachineGraphVerifier::Run( data->graph(), data->schedule(), linkage, data->info()->IsNotOptimizedFunctionOrWasmFunction(), @@ -2993,6 +3133,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage, MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) { PipelineData* data = this->data_; + data->BeginPhaseKind("V8.TFFinalizeCode"); if (data->broker() && retire_broker) { data->broker()->Retire(); } @@ -3007,7 +3148,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) { if (data->profiler_data()) { #ifdef ENABLE_DISASSEMBLER std::ostringstream os; - code->Disassemble(nullptr, os); + code->Disassemble(nullptr, os, isolate()); data->profiler_data()->SetCode(&os); #endif // ENABLE_DISASSEMBLER } @@ -3023,7 +3164,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) { << "\"data\":\""; #ifdef ENABLE_DISASSEMBLER std::stringstream disassembly_stream; - code->Disassemble(nullptr, disassembly_stream); + code->Disassemble(nullptr, disassembly_stream, isolate()); std::string disassembly_string(disassembly_stream.str()); for (const auto& c : disassembly_string) { json_of << AsEscapedUC16ForJSON(c); @@ -3043,6 +3184,7 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) { << "Finished compiling method " << info()->GetDebugName().get() << " using TurboFan" << std::endl; } + data->EndPhaseKind(); return code; } @@ -3100,7 +3242,8 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config, std::unique_ptr<Zone> verifier_zone; RegisterAllocatorVerifier* verifier = nullptr; if (run_verifier) { - verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME)); + verifier_zone.reset( + new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName)); verifier = new (verifier_zone.get()) RegisterAllocatorVerifier( verifier_zone.get(), config, data->sequence()); } diff --git a/chromium/v8/src/compiler/pipeline.h b/chromium/v8/src/compiler/pipeline.h index 3707bfb06e5..42f31472a9b 100644 --- a/chromium/v8/src/compiler/pipeline.h +++ b/chromium/v8/src/compiler/pipeline.h @@ -5,6 +5,8 @@ #ifndef V8_COMPILER_PIPELINE_H_ #define V8_COMPILER_PIPELINE_H_ +#include <memory> + // Clients of this interface shouldn't depend on lots of compiler internals. // Do not include anything from src/compiler here! #include "src/common/globals.h" diff --git a/chromium/v8/src/compiler/processed-feedback.h b/chromium/v8/src/compiler/processed-feedback.h index 17829863de1..1d1ee538d8b 100644 --- a/chromium/v8/src/compiler/processed-feedback.h +++ b/chromium/v8/src/compiler/processed-feedback.h @@ -18,7 +18,10 @@ class ElementAccessFeedback; class ForInFeedback; class GlobalAccessFeedback; class InstanceOfFeedback; +class LiteralFeedback; class NamedAccessFeedback; +class RegExpLiteralFeedback; +class TemplateObjectFeedback; class ProcessedFeedback : public ZoneObject { public: @@ -31,7 +34,10 @@ class ProcessedFeedback : public ZoneObject { kForIn, kGlobalAccess, kInstanceOf, + kLiteral, kNamedAccess, + kRegExpLiteral, + kTemplateObject, }; Kind kind() const { return kind_; } @@ -46,6 +52,9 @@ class ProcessedFeedback : public ZoneObject { GlobalAccessFeedback const& AsGlobalAccess() const; InstanceOfFeedback const& AsInstanceOf() const; NamedAccessFeedback const& AsNamedAccess() const; + LiteralFeedback const& AsLiteral() const; + RegExpLiteralFeedback const& AsRegExpLiteral() const; + TemplateObjectFeedback const& AsTemplateObject() const; protected: ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind); @@ -187,7 +196,9 @@ class SingleValueFeedback : public ProcessedFeedback { (K == kBinaryOperation && slot_kind == FeedbackSlotKind::kBinaryOp) || (K == kCompareOperation && slot_kind == FeedbackSlotKind::kCompareOp) || (K == kForIn && slot_kind == FeedbackSlotKind::kForIn) || - (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf)); + (K == kInstanceOf && slot_kind == FeedbackSlotKind::kInstanceOf) || + ((K == kLiteral || K == kRegExpLiteral || K == kTemplateObject) && + slot_kind == FeedbackSlotKind::kLiteral)); } T value() const { return value_; } @@ -202,6 +213,24 @@ class InstanceOfFeedback using SingleValueFeedback::SingleValueFeedback; }; +class LiteralFeedback + : public SingleValueFeedback<AllocationSiteRef, + ProcessedFeedback::kLiteral> { + using SingleValueFeedback::SingleValueFeedback; +}; + +class RegExpLiteralFeedback + : public SingleValueFeedback<JSRegExpRef, + ProcessedFeedback::kRegExpLiteral> { + using SingleValueFeedback::SingleValueFeedback; +}; + +class TemplateObjectFeedback + : public SingleValueFeedback<JSArrayRef, + ProcessedFeedback::kTemplateObject> { + using SingleValueFeedback::SingleValueFeedback; +}; + class BinaryOperationFeedback : public SingleValueFeedback<BinaryOperationHint, ProcessedFeedback::kBinaryOperation> { diff --git a/chromium/v8/src/compiler/raw-machine-assembler.cc b/chromium/v8/src/compiler/raw-machine-assembler.cc index e399b9c4f6b..c709729081c 100644 --- a/chromium/v8/src/compiler/raw-machine-assembler.cc +++ b/chromium/v8/src/compiler/raw-machine-assembler.cc @@ -690,15 +690,14 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* call_descriptor, return AddNode(common()->Call(call_descriptor), input_count, inputs); } -Node* RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor, - int input_count, Node* const* inputs) { +void RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor, + int input_count, Node* const* inputs) { // +1 is for target. DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1); Node* tail_call = MakeNode(common()->TailCall(call_descriptor), input_count, inputs); schedule()->AddTailCall(CurrentBlock(), tail_call); current_block_ = nullptr; - return tail_call; } namespace { diff --git a/chromium/v8/src/compiler/raw-machine-assembler.h b/chromium/v8/src/compiler/raw-machine-assembler.h index 46940df44f8..cbbb719d54d 100644 --- a/chromium/v8/src/compiler/raw-machine-assembler.h +++ b/chromium/v8/src/compiler/raw-machine-assembler.h @@ -131,7 +131,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { std::pair<MachineType, const Operator*> InsertDecompressionIfNeeded( MachineType type) { const Operator* decompress_op = nullptr; - if (COMPRESS_POINTERS_BOOL) { + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { switch (type.representation()) { case MachineRepresentation::kTaggedPointer: type = MachineType::CompressedPointer(); @@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { std::pair<MachineRepresentation, Node*> InsertCompressionIfNeeded( MachineRepresentation rep, Node* value) { - if (COMPRESS_POINTERS_BOOL) { + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { switch (rep) { case MachineRepresentation::kTaggedPointer: rep = MachineRepresentation::kCompressedPointer; @@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { object, value); } void OptimizedStoreMap(Node* object, Node* value) { - if (COMPRESS_POINTERS_BOOL) { + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { DCHECK(AccessBuilder::ForMap().machine_type.IsCompressedPointer()); value = AddNode(machine()->ChangeTaggedPointerToCompressedPointer(), value); @@ -736,8 +736,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* BitcastTaggedToWord(Node* a) { return AddNode(machine()->BitcastTaggedToWord(), a); } - Node* BitcastTaggedSignedToWord(Node* a) { - return AddNode(machine()->BitcastTaggedSignedToWord(), a); + Node* BitcastTaggedToWordForTagAndSmiBits(Node* a) { + return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a); } Node* BitcastMaybeObjectToWord(Node* a) { return AddNode(machine()->BitcastMaybeObjectToWord(), a); @@ -965,8 +965,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { // Tail call a given call descriptor and the given arguments. // The call target is passed as part of the {inputs} array. - Node* TailCallN(CallDescriptor* call_descriptor, int input_count, - Node* const* inputs); + void TailCallN(CallDescriptor* call_descriptor, int input_count, + Node* const* inputs); // Type representing C function argument with type info. using CFunctionArg = std::pair<MachineType, Node*>; diff --git a/chromium/v8/src/compiler/representation-change.cc b/chromium/v8/src/compiler/representation-change.cc index fd0cbabe668..ca1b1e221f3 100644 --- a/chromium/v8/src/compiler/representation-change.cc +++ b/chromium/v8/src/compiler/representation-change.cc @@ -1272,8 +1272,13 @@ Node* RepresentationChanger::GetBitRepresentationFor( } } } else if (output_rep == MachineRepresentation::kTaggedSigned) { - node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node, - jsgraph()->IntPtrConstant(0)); + if (COMPRESS_POINTERS_BOOL) { + node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node, + jsgraph()->Int32Constant(0)); + } else { + node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node, + jsgraph()->IntPtrConstant(0)); + } return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node, jsgraph()->Int32Constant(0)); } else if (output_rep == MachineRepresentation::kCompressed) { @@ -1546,14 +1551,17 @@ const Operator* RepresentationChanger::TaggedSignedOperatorFor( IrOpcode::Value opcode) { switch (opcode) { case IrOpcode::kSpeculativeNumberLessThan: - return machine()->Is32() ? machine()->Int32LessThan() - : machine()->Int64LessThan(); + return (COMPRESS_POINTERS_BOOL || machine()->Is32()) + ? machine()->Int32LessThan() + : machine()->Int64LessThan(); case IrOpcode::kSpeculativeNumberLessThanOrEqual: - return machine()->Is32() ? machine()->Int32LessThanOrEqual() - : machine()->Int64LessThanOrEqual(); + return (COMPRESS_POINTERS_BOOL || machine()->Is32()) + ? machine()->Int32LessThanOrEqual() + : machine()->Int64LessThanOrEqual(); case IrOpcode::kSpeculativeNumberEqual: - return machine()->Is32() ? machine()->Word32Equal() - : machine()->Word64Equal(); + return (COMPRESS_POINTERS_BOOL || machine()->Is32()) + ? machine()->Word32Equal() + : machine()->Word64Equal(); default: UNREACHABLE(); } diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc index bf23e436f68..2999cbfcd6e 100644 --- a/chromium/v8/src/compiler/scheduler.cc +++ b/chromium/v8/src/compiler/scheduler.cc @@ -6,7 +6,7 @@ #include <iomanip> -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/control-equivalence.h" diff --git a/chromium/v8/src/compiler/select-lowering.cc b/chromium/v8/src/compiler/select-lowering.cc index 4d5bb99053f..290306a966b 100644 --- a/chromium/v8/src/compiler/select-lowering.cc +++ b/chromium/v8/src/compiler/select-lowering.cc @@ -14,29 +14,39 @@ namespace v8 { namespace internal { namespace compiler { -SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common) - : common_(common), graph_(graph) {} +SelectLowering::SelectLowering(JSGraph* jsgraph, Zone* zone) + : graph_assembler_(jsgraph, nullptr, nullptr, zone), + start_(jsgraph->graph()->start()) {} SelectLowering::~SelectLowering() = default; - Reduction SelectLowering::Reduce(Node* node) { if (node->opcode() != IrOpcode::kSelect) return NoChange(); + return Changed(LowerSelect(node)); +} + +#define __ gasm()-> + +Node* SelectLowering::LowerSelect(Node* node) { SelectParameters const p = SelectParametersOf(node->op()); - Node* cond = node->InputAt(0); - Node* vthen = node->InputAt(1); - Node* velse = node->InputAt(2); - - // Create a diamond and a phi. - Diamond d(graph(), common(), cond, p.hint()); - node->ReplaceInput(0, vthen); - node->ReplaceInput(1, velse); - node->ReplaceInput(2, d.merge); - NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2)); - return Changed(node); + Node* condition = node->InputAt(0); + Node* vtrue = node->InputAt(1); + Node* vfalse = node->InputAt(2); + + gasm()->Reset(start(), start()); + + auto done = __ MakeLabel(p.representation()); + + __ GotoIf(condition, &done, vtrue); + __ Goto(&done, vfalse); + __ Bind(&done); + + return done.PhiAt(0); } +#undef __ + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/compiler/select-lowering.h b/chromium/v8/src/compiler/select-lowering.h index d8c12d4d546..53890a78981 100644 --- a/chromium/v8/src/compiler/select-lowering.h +++ b/chromium/v8/src/compiler/select-lowering.h @@ -5,33 +5,31 @@ #ifndef V8_COMPILER_SELECT_LOWERING_H_ #define V8_COMPILER_SELECT_LOWERING_H_ +#include "src/compiler/graph-assembler.h" #include "src/compiler/graph-reducer.h" namespace v8 { namespace internal { namespace compiler { -// Forward declarations. -class CommonOperatorBuilder; -class Graph; - - // Lowers Select nodes to diamonds. class SelectLowering final : public Reducer { public: - SelectLowering(Graph* graph, CommonOperatorBuilder* common); + SelectLowering(JSGraph* jsgraph, Zone* zone); ~SelectLowering() override; const char* reducer_name() const override { return "SelectLowering"; } Reduction Reduce(Node* node) override; + Node* LowerSelect(Node* node); + private: - CommonOperatorBuilder* common() const { return common_; } - Graph* graph() const { return graph_; } + GraphAssembler* gasm() { return &graph_assembler_; } + Node* start() { return start_; } - CommonOperatorBuilder* common_; - Graph* graph_; + GraphAssembler graph_assembler_; + Node* start_; }; } // namespace compiler diff --git a/chromium/v8/src/compiler/serializer-for-background-compilation.cc b/chromium/v8/src/compiler/serializer-for-background-compilation.cc index 20d405b7757..0391e8742d6 100644 --- a/chromium/v8/src/compiler/serializer-for-background-compilation.cc +++ b/chromium/v8/src/compiler/serializer-for-background-compilation.cc @@ -10,7 +10,9 @@ #include "src/compiler/access-info.h" #include "src/compiler/bytecode-analysis.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/functional-list.h" #include "src/compiler/js-heap-broker.h" +#include "src/compiler/zone-stats.h" #include "src/handles/handles-inl.h" #include "src/ic/call-optimization.h" #include "src/interpreter/bytecode-array-iterator.h" @@ -41,7 +43,6 @@ namespace compiler { V(CallRuntime) \ V(CloneObject) \ V(CreateArrayFromIterable) \ - V(CreateEmptyArrayLiteral) \ V(CreateEmptyObjectLiteral) \ V(CreateMappedArguments) \ V(CreateRestParameter) \ @@ -160,6 +161,7 @@ namespace compiler { V(CreateBlockContext) \ V(CreateCatchContext) \ V(CreateClosure) \ + V(CreateEmptyArrayLiteral) \ V(CreateEvalContext) \ V(CreateFunctionContext) \ V(CreateObjectLiteral) \ @@ -230,13 +232,41 @@ namespace compiler { UNCONDITIONAL_JUMPS_LIST(V) \ UNREACHABLE_BYTECODE_LIST(V) -template <typename T> -struct HandleComparator { - bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const { - return lhs.address() < rhs.address(); +template <typename T, typename EqualTo> +class FunctionalSet { + public: + void Add(T const& elem, Zone* zone) { + for (auto const& l : data_) { + if (equal_to(l, elem)) return; + } + data_.PushFront(elem, zone); + } + + bool Includes(FunctionalSet<T, EqualTo> const& other) const { + return std::all_of(other.begin(), other.end(), [&](T const& other_elem) { + return std::any_of(this->begin(), this->end(), [&](T const& this_elem) { + return equal_to(this_elem, other_elem); + }); + }); } + + bool IsEmpty() const { return data_.begin() == data_.end(); } + + void Clear() { data_.Clear(); } + + using iterator = typename FunctionalList<T>::iterator; + + iterator begin() const { return data_.begin(); } + iterator end() const { return data_.end(); } + + private: + static EqualTo equal_to; + FunctionalList<T> data_; }; +template <typename T, typename EqualTo> +EqualTo FunctionalSet<T, EqualTo>::equal_to; + struct VirtualContext { unsigned int distance; Handle<Context> context; @@ -245,21 +275,22 @@ struct VirtualContext { : distance(distance_in), context(context_in) { CHECK_GT(distance, 0); } - bool operator<(const VirtualContext& other) const { - return HandleComparator<Context>()(context, other.context) && - distance < other.distance; + bool operator==(const VirtualContext& other) const { + return context.equals(other.context) && distance == other.distance; } }; class FunctionBlueprint; -using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>; -using VirtualContextsSet = ZoneSet<VirtualContext>; -using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>; -using BlueprintsSet = ZoneSet<FunctionBlueprint>; +using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>; +using VirtualContextsSet = + FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>; +using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>; +using BlueprintsSet = + FunctionalSet<FunctionBlueprint, std::equal_to<FunctionBlueprint>>; class Hints { public: - explicit Hints(Zone* zone); + Hints() = default; static Hints SingleConstant(Handle<Object> constant, Zone* zone); @@ -268,12 +299,13 @@ class Hints { const BlueprintsSet& function_blueprints() const; const VirtualContextsSet& virtual_contexts() const; - void AddConstant(Handle<Object> constant); - void AddMap(Handle<Map> map); - void AddFunctionBlueprint(FunctionBlueprint function_blueprint); - void AddVirtualContext(VirtualContext virtual_context); + void AddConstant(Handle<Object> constant, Zone* zone); + void AddMap(Handle<Map> map, Zone* zone); + void AddFunctionBlueprint(FunctionBlueprint function_blueprint, Zone* zone); + void AddVirtualContext(VirtualContext virtual_context, Zone* zone); - void Add(const Hints& other); + void Add(const Hints& other, Zone* zone); + void AddFromChildSerializer(const Hints& other, Zone* zone); void Clear(); bool IsEmpty() const; @@ -292,6 +324,8 @@ class Hints { using HintsVector = ZoneVector<Hints>; +// A FunctionBlueprint is a SharedFunctionInfo and a FeedbackVector, plus +// Hints about the context in which a closure will be created from them. class FunctionBlueprint { public: FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone); @@ -304,13 +338,23 @@ class FunctionBlueprint { Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; } const Hints& context_hints() const { return context_hints_; } - bool operator<(const FunctionBlueprint& other) const { - // A feedback vector is never used for more than one SFI, so it can - // be used for strict ordering of blueprints. + bool operator==(const FunctionBlueprint& other) const { + // A feedback vector is never used for more than one SFI. Moreover, we can + // never have two blueprints with identical feedback vector (and SFI) but + // different hints, because: + // (1) A blueprint originates either (i) from the data associated with a + // CreateClosure bytecode, in which case two different CreateClosure + // bytecodes never have the same feedback vector, or (ii) from a + // JSFunction, in which case the hints are determined by the closure. + // (2) We never extend a blueprint's hints after construction. + // + // It is therefore sufficient to look at the feedback vector in order to + // decide equality. DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_), shared_.equals(other.shared_)); - return HandleComparator<FeedbackVector>()(feedback_vector_, - other.feedback_vector_); + SLOW_DCHECK(!feedback_vector_.equals(other.feedback_vector_) || + context_hints_.Equals(other.context_hints_)); + return feedback_vector_.equals(other.feedback_vector_); } private: @@ -319,6 +363,8 @@ class FunctionBlueprint { Hints context_hints_; }; +// A CompilationSubject is a FunctionBlueprint, optionally with a matching +// closure. class CompilationSubject { public: explicit CompilationSubject(FunctionBlueprint blueprint) @@ -336,24 +382,65 @@ class CompilationSubject { MaybeHandle<JSFunction> closure_; }; +// A Callee is either a JSFunction (which may not have a feedback vector), or a +// FunctionBlueprint. Note that this is different from CompilationSubject, which +// always has a FunctionBlueprint. +class Callee { + public: + explicit Callee(Handle<JSFunction> jsfunction) : jsfunction_(jsfunction) {} + explicit Callee(FunctionBlueprint const& blueprint) : blueprint_(blueprint) {} + + Handle<SharedFunctionInfo> shared(Isolate* isolate) const { + return blueprint_.has_value() + ? blueprint_->shared() + : handle(jsfunction_.ToHandleChecked()->shared(), isolate); + } + + bool HasFeedbackVector() const { + Handle<JSFunction> function; + return blueprint_.has_value() || + jsfunction_.ToHandleChecked()->has_feedback_vector(); + } + + CompilationSubject ToCompilationSubject(Isolate* isolate, Zone* zone) const { + CHECK(HasFeedbackVector()); + return blueprint_.has_value() + ? CompilationSubject(*blueprint_) + : CompilationSubject(jsfunction_.ToHandleChecked(), isolate, + zone); + } + + private: + MaybeHandle<JSFunction> const jsfunction_; + base::Optional<FunctionBlueprint> const blueprint_; +}; + +// If a list of arguments (hints) is shorter than the function's parameter +// count, this enum expresses what we know about the missing arguments. +enum MissingArgumentsPolicy { + kMissingArgumentsAreUndefined, // ... as in the JS undefined value + kMissingArgumentsAreUnknown, +}; + // The SerializerForBackgroundCompilation makes sure that the relevant function // data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later // optimizations in the compiler, is copied to the heap broker. class SerializerForBackgroundCompilation { public: SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags, - BailoutId osr_offset); + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, Handle<JSFunction> closure, + SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset); Hints Run(); // NOTE: Returns empty for an already-serialized function. class Environment; private: SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - CompilationSubject function, base::Optional<Hints> new_target, - const HintsVector& arguments, + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, CompilationSubject function, + base::Optional<Hints> new_target, const HintsVector& arguments, + MissingArgumentsPolicy padding, SerializerForBackgroundCompilationFlags flags); bool BailoutOnUninitialized(ProcessedFeedback const& feedback); @@ -365,36 +452,39 @@ class SerializerForBackgroundCompilation { SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) #undef DECLARE_VISIT_BYTECODE - // Returns whether the callee with the given SFI should be processed further, - // i.e. whether it's inlineable. - bool ProcessSFIForCallOrConstruct(Handle<SharedFunctionInfo> shared, + void ProcessSFIForCallOrConstruct(Callee const& callee, + base::Optional<Hints> new_target, const HintsVector& arguments, - SpeculationMode speculation_mode); - // Returns whether {function} should be serialized for compilation. - bool ProcessCalleeForCallOrConstruct(Handle<JSFunction> function, + SpeculationMode speculation_mode, + MissingArgumentsPolicy padding); + void ProcessCalleeForCallOrConstruct(Handle<Object> callee, + base::Optional<Hints> new_target, const HintsVector& arguments, - SpeculationMode speculation_mode); + SpeculationMode speculation_mode, + MissingArgumentsPolicy padding); void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target, const HintsVector& arguments, FeedbackSlot slot, - bool with_spread = false); - void ProcessCallVarArgs(ConvertReceiverMode receiver_mode, - Hints const& callee, interpreter::Register first_reg, - int reg_count, FeedbackSlot slot, - bool with_spread = false); + MissingArgumentsPolicy padding); + void ProcessCallVarArgs( + ConvertReceiverMode receiver_mode, Hints const& callee, + interpreter::Register first_reg, int reg_count, FeedbackSlot slot, + MissingArgumentsPolicy padding = kMissingArgumentsAreUndefined); void ProcessApiCall(Handle<SharedFunctionInfo> target, const HintsVector& arguments); void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target, Handle<Map> receiver); void ProcessBuiltinCall(Handle<SharedFunctionInfo> target, + base::Optional<Hints> new_target, const HintsVector& arguments, - SpeculationMode speculation_mode); + SpeculationMode speculation_mode, + MissingArgumentsPolicy padding); void ProcessJump(interpreter::BytecodeArrayIterator* iterator); void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, FeedbackSlot slot, AccessMode access_mode, bool honor_bailout_on_uninitialized); - void ProcessNamedPropertyAccess(Hints receiver, NameRef const& name, + void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, FeedbackSlot slot, AccessMode access_mode); void ProcessNamedAccess(Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode, Hints* new_accumulator_hints); @@ -411,7 +501,6 @@ class SerializerForBackgroundCompilation { void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints); void ProcessHintsForRegExpTest(Hints const& regexp_hints); PropertyAccessInfo ProcessMapForRegExpTest(MapRef map); - void ProcessHintsForFunctionCall(Hints const& target_hints); void ProcessHintsForFunctionBind(Hints const& receiver_hints); void ProcessHintsForObjectGetPrototype(Hints const& object_hints); void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor, @@ -456,7 +545,8 @@ class SerializerForBackgroundCompilation { Hints RunChildSerializer(CompilationSubject function, base::Optional<Hints> new_target, - const HintsVector& arguments, bool with_spread); + const HintsVector& arguments, + MissingArgumentsPolicy padding); // When (forward-)branching bytecodes are encountered, e.g. a conditional // jump, we call ContributeToJumpTargetEnvironment to "remember" the current @@ -475,14 +565,14 @@ class SerializerForBackgroundCompilation { JSHeapBroker* broker() const { return broker_; } CompilationDependencies* dependencies() const { return dependencies_; } - Zone* zone() const { return zone_; } + Zone* zone() { return zone_scope_.zone(); } Environment* environment() const { return environment_; } SerializerForBackgroundCompilationFlags flags() const { return flags_; } BailoutId osr_offset() const { return osr_offset_; } JSHeapBroker* const broker_; CompilationDependencies* const dependencies_; - Zone* const zone_; + ZoneStats::Scope zone_scope_; Environment* const environment_; ZoneUnorderedMap<int, Environment*> jump_target_environments_; SerializerForBackgroundCompilationFlags const flags_; @@ -490,11 +580,11 @@ class SerializerForBackgroundCompilation { }; void RunSerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags, - BailoutId osr_offset) { - SerializerForBackgroundCompilation serializer(broker, dependencies, zone, - closure, flags, osr_offset); + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, Handle<JSFunction> closure, + SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) { + SerializerForBackgroundCompilation serializer( + zone_stats, broker, dependencies, closure, flags, osr_offset); serializer.Run(); } @@ -505,14 +595,19 @@ FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared, const Hints& context_hints) : shared_(shared), feedback_vector_(feedback_vector), - context_hints_(context_hints) {} + context_hints_(context_hints) { + // The checked invariant rules out recursion and thus avoids complexity. + CHECK(context_hints_.function_blueprints().IsEmpty()); +} FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone) : shared_(handle(function->shared(), isolate)), - feedback_vector_(handle(function->feedback_vector(), isolate)), - context_hints_(zone) { - context_hints_.AddConstant(handle(function->context(), isolate)); + feedback_vector_(function->feedback_vector(), isolate), + context_hints_() { + context_hints_.AddConstant(handle(function->context(), isolate), zone); + // The checked invariant rules out recursion and thus avoids complexity. + CHECK(context_hints_.function_blueprints().IsEmpty()); } CompilationSubject::CompilationSubject(Handle<JSFunction> closure, @@ -521,25 +616,11 @@ CompilationSubject::CompilationSubject(Handle<JSFunction> closure, CHECK(closure->has_feedback_vector()); } -Hints::Hints(Zone* zone) - : virtual_contexts_(zone), - constants_(zone), - maps_(zone), - function_blueprints_(zone) {} - #ifdef ENABLE_SLOW_DCHECKS -namespace { -template <typename K, typename Compare> -bool SetIncludes(ZoneSet<K, Compare> const& lhs, - ZoneSet<K, Compare> const& rhs) { - return std::all_of(rhs.cbegin(), rhs.cend(), - [&](K const& x) { return lhs.find(x) != lhs.cend(); }); -} -} // namespace bool Hints::Includes(Hints const& other) const { - return SetIncludes(constants(), other.constants()) && - SetIncludes(function_blueprints(), other.function_blueprints()) && - SetIncludes(maps(), other.maps()); + return constants().Includes(other.constants()) && + function_blueprints().Includes(other.function_blueprints()) && + maps().Includes(other.maps()); } bool Hints::Equals(Hints const& other) const { return this->Includes(other) && other.Includes(*this); @@ -547,8 +628,8 @@ bool Hints::Equals(Hints const& other) const { #endif Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) { - Hints result(zone); - result.AddConstant(constant); + Hints result; + result.AddConstant(constant, zone); return result; } @@ -564,30 +645,49 @@ const VirtualContextsSet& Hints::virtual_contexts() const { return virtual_contexts_; } -void Hints::AddVirtualContext(VirtualContext virtual_context) { - virtual_contexts_.insert(virtual_context); +void Hints::AddVirtualContext(VirtualContext virtual_context, Zone* zone) { + virtual_contexts_.Add(virtual_context, zone); } -void Hints::AddConstant(Handle<Object> constant) { - constants_.insert(constant); +void Hints::AddConstant(Handle<Object> constant, Zone* zone) { + constants_.Add(constant, zone); } -void Hints::AddMap(Handle<Map> map) { maps_.insert(map); } +void Hints::AddMap(Handle<Map> map, Zone* zone) { maps_.Add(map, zone); } + +void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint, + Zone* zone) { + function_blueprints_.Add(function_blueprint, zone); +} -void Hints::AddFunctionBlueprint(FunctionBlueprint function_blueprint) { - function_blueprints_.insert(function_blueprint); +void Hints::Add(const Hints& other, Zone* zone) { + for (auto x : other.constants()) AddConstant(x, zone); + for (auto x : other.maps()) AddMap(x, zone); + for (auto x : other.function_blueprints()) AddFunctionBlueprint(x, zone); + for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone); } -void Hints::Add(const Hints& other) { - for (auto x : other.constants()) AddConstant(x); - for (auto x : other.maps()) AddMap(x); - for (auto x : other.function_blueprints()) AddFunctionBlueprint(x); - for (auto x : other.virtual_contexts()) AddVirtualContext(x); +void Hints::AddFromChildSerializer(const Hints& other, Zone* zone) { + for (auto x : other.constants()) AddConstant(x, zone); + for (auto x : other.maps()) AddMap(x, zone); + for (auto x : other.virtual_contexts()) AddVirtualContext(x, zone); + + // Adding hints from a child serializer run means copying data out from + // a zone that's being destroyed. FunctionBlueprints have zone allocated + // data, so we've got to make a deep copy to eliminate traces of the + // dying zone. + for (auto x : other.function_blueprints()) { + Hints new_blueprint_hints; + new_blueprint_hints.AddFromChildSerializer(x.context_hints(), zone); + FunctionBlueprint new_blueprint(x.shared(), x.feedback_vector(), + new_blueprint_hints); + AddFunctionBlueprint(new_blueprint, zone); + } } bool Hints::IsEmpty() const { - return constants().empty() && maps().empty() && - function_blueprints().empty() && virtual_contexts().empty(); + return constants().IsEmpty() && maps().IsEmpty() && + function_blueprints().IsEmpty() && virtual_contexts().IsEmpty(); } std::ostream& operator<<(std::ostream& out, @@ -625,10 +725,10 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) { } void Hints::Clear() { - virtual_contexts_.clear(); - constants_.clear(); - maps_.clear(); - function_blueprints_.clear(); + virtual_contexts_.Clear(); + constants_.Clear(); + maps_.Clear(); + function_blueprints_.Clear(); DCHECK(IsEmpty()); } @@ -636,7 +736,8 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { public: Environment(Zone* zone, CompilationSubject function); Environment(Zone* zone, Isolate* isolate, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments); + base::Optional<Hints> new_target, const HintsVector& arguments, + MissingArgumentsPolicy padding); bool IsDead() const { return ephemeral_hints_.empty(); } @@ -648,7 +749,7 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { void Revive() { DCHECK(IsDead()); - ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone())); + ephemeral_hints_.resize(ephemeral_hints_size(), Hints()); DCHECK(!IsDead()); } @@ -691,7 +792,6 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { int RegisterToLocalIndex(interpreter::Register reg) const; - Zone* zone() const { return zone_; } int parameter_count() const { return parameter_count_; } int register_count() const { return register_count_; } @@ -722,24 +822,25 @@ SerializerForBackgroundCompilation::Environment::Environment( parameter_count_( function_.shared()->GetBytecodeArray().parameter_count()), register_count_(function_.shared()->GetBytecodeArray().register_count()), - closure_hints_(zone), - current_context_hints_(zone), - return_value_hints_(zone), - ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) { + closure_hints_(), + current_context_hints_(), + return_value_hints_(), + ephemeral_hints_(ephemeral_hints_size(), Hints(), zone) { Handle<JSFunction> closure; if (function.closure().ToHandle(&closure)) { - closure_hints_.AddConstant(closure); + closure_hints_.AddConstant(closure, zone); } else { - closure_hints_.AddFunctionBlueprint(function.blueprint()); + closure_hints_.AddFunctionBlueprint(function.blueprint(), zone); } // Consume blueprint context hint information. - current_context_hints().Add(function.blueprint().context_hints()); + current_context_hints().Add(function.blueprint().context_hints(), zone); } SerializerForBackgroundCompilation::Environment::Environment( Zone* zone, Isolate* isolate, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments) + base::Optional<Hints> new_target, const HintsVector& arguments, + MissingArgumentsPolicy padding) : Environment(zone, function) { // Copy the hints for the actually passed arguments, at most up to // the parameter_count. @@ -748,11 +849,14 @@ SerializerForBackgroundCompilation::Environment::Environment( ephemeral_hints_[i] = arguments[i]; } - // Pad the rest with "undefined". - Hints undefined_hint = - Hints::SingleConstant(isolate->factory()->undefined_value(), zone); - for (size_t i = arguments.size(); i < param_count; ++i) { - ephemeral_hints_[i] = undefined_hint; + if (padding == kMissingArgumentsAreUndefined) { + Hints undefined_hint = + Hints::SingleConstant(isolate->factory()->undefined_value(), zone); + for (size_t i = arguments.size(); i < param_count; ++i) { + ephemeral_hints_[i] = undefined_hint; + } + } else { + DCHECK_EQ(padding, kMissingArgumentsAreUnknown); } interpreter::Register new_target_reg = @@ -762,7 +866,7 @@ SerializerForBackgroundCompilation::Environment::Environment( if (new_target_reg.is_valid()) { DCHECK(register_hints(new_target_reg).IsEmpty()); if (new_target.has_value()) { - register_hints(new_target_reg).Add(*new_target); + register_hints(new_target_reg).Add(*new_target, zone); } } } @@ -785,10 +889,10 @@ void SerializerForBackgroundCompilation::Environment::Merge( CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size()); for (size_t i = 0; i < ephemeral_hints_.size(); ++i) { - ephemeral_hints_[i].Add(other->ephemeral_hints_[i]); + ephemeral_hints_[i].Add(other->ephemeral_hints_[i], zone_); } - return_value_hints_.Add(other->return_value_hints_); + return_value_hints_.Add(other->return_value_hints_, zone_); } std::ostream& operator<<( @@ -845,30 +949,33 @@ int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex( } SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags, - BailoutId osr_offset) + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, Handle<JSFunction> closure, + SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) : broker_(broker), dependencies_(dependencies), - zone_(zone), - environment_(new (zone) Environment( - zone, CompilationSubject(closure, broker_->isolate(), zone))), - jump_target_environments_(zone), + zone_scope_(zone_stats, ZONE_NAME), + environment_(new (zone()) Environment( + zone(), CompilationSubject(closure, broker_->isolate(), zone()))), + jump_target_environments_(zone()), flags_(flags), osr_offset_(osr_offset) { JSFunctionRef(broker, closure).Serialize(); } SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - CompilationSubject function, base::Optional<Hints> new_target, - const HintsVector& arguments, SerializerForBackgroundCompilationFlags flags) + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, CompilationSubject function, + base::Optional<Hints> new_target, const HintsVector& arguments, + MissingArgumentsPolicy padding, + SerializerForBackgroundCompilationFlags flags) : broker_(broker), dependencies_(dependencies), - zone_(zone), - environment_(new (zone) Environment(zone, broker_->isolate(), function, - new_target, arguments)), - jump_target_environments_(zone), + zone_scope_(zone_stats, ZONE_NAME), + environment_(new (zone()) + Environment(zone(), broker_->isolate(), function, + new_target, arguments, padding)), + jump_target_environments_(zone()), flags_(flags), osr_offset_(BailoutId::None()) { TraceScope tracer( @@ -902,13 +1009,15 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( Hints SerializerForBackgroundCompilation::Run() { TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run"); + TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: " + << broker()->zone()->allocation_size()); SharedFunctionInfoRef shared(broker(), environment()->function().shared()); FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector()); if (shared.IsSerializedForCompilation(feedback_vector_ref)) { TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo " << Brief(*shared.object()) << ", bailing out.\n"); - return Hints(zone()); + return Hints(); } shared.SetSerializedForCompilation(feedback_vector_ref); @@ -923,6 +1032,9 @@ Hints SerializerForBackgroundCompilation::Run() { feedback_vector_ref.Serialize(); TraverseBytecode(); + + TRACE_BROKER_MEMORY(broker(), "[serializer end] Broker zone usage: " + << broker()->zone()->allocation_size()); return environment()->return_value_hints(); } @@ -1036,12 +1148,19 @@ void SerializerForBackgroundCompilation::TraverseBytecode() { void SerializerForBackgroundCompilation::VisitGetIterator( BytecodeArrayIterator* iterator) { - AccessMode mode = AccessMode::kLoad; Hints const& receiver = environment()->register_hints(iterator->GetRegisterOperand(0)); Handle<Name> name = broker()->isolate()->factory()->iterator_symbol(); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode); + FeedbackSlot load_slot = iterator->GetSlotOperand(1); + ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), load_slot, + AccessMode::kLoad); + if (environment()->IsDead()) return; + + const Hints& callee = Hints(); + FeedbackSlot call_slot = iterator->GetSlotOperand(2); + HintsVector parameters({receiver}, zone()); + ProcessCallOrConstruct(callee, base::nullopt, parameters, call_slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitGetSuperConstructor( @@ -1057,72 +1176,74 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor( map.SerializePrototype(); ObjectRef proto = map.prototype(); if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) { - environment()->register_hints(dst).AddConstant(proto.object()); + environment()->register_hints(dst).AddConstant(proto.object(), zone()); } } } void SerializerForBackgroundCompilation::VisitGetTemplateObject( BytecodeArrayIterator* iterator) { - ObjectRef description( + TemplateObjectDescriptionRef description( broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector()); + FeedbackSource source(feedback_vector(), slot); SharedFunctionInfoRef shared(broker(), environment()->function().shared()); - JSArrayRef template_object = - shared.GetTemplateObject(description, feedback_vector_ref, slot, - SerializationPolicy::kSerializeIfNeeded); + JSArrayRef template_object = shared.GetTemplateObject( + description, source, SerializationPolicy::kSerializeIfNeeded); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().AddConstant(template_object.object()); + environment()->accumulator_hints().AddConstant(template_object.object(), + zone()); } void SerializerForBackgroundCompilation::VisitLdaTrue( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - broker()->isolate()->factory()->true_value()); + broker()->isolate()->factory()->true_value(), zone()); } void SerializerForBackgroundCompilation::VisitLdaFalse( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - broker()->isolate()->factory()->false_value()); + broker()->isolate()->factory()->false_value(), zone()); } void SerializerForBackgroundCompilation::VisitLdaTheHole( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - broker()->isolate()->factory()->the_hole_value()); + broker()->isolate()->factory()->the_hole_value(), zone()); } void SerializerForBackgroundCompilation::VisitLdaUndefined( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - broker()->isolate()->factory()->undefined_value()); + broker()->isolate()->factory()->undefined_value(), zone()); } void SerializerForBackgroundCompilation::VisitLdaNull( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - broker()->isolate()->factory()->null_value()); + broker()->isolate()->factory()->null_value(), zone()); } void SerializerForBackgroundCompilation::VisitLdaZero( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - handle(Smi::FromInt(0), broker()->isolate())); + handle(Smi::FromInt(0), broker()->isolate()), zone()); } void SerializerForBackgroundCompilation::VisitLdaSmi( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().AddConstant(handle( - Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate())); + environment()->accumulator_hints().AddConstant( + handle(Smi::FromInt(iterator->GetImmediateOperand(0)), + broker()->isolate()), + zone()); } void SerializerForBackgroundCompilation::VisitInvokeIntrinsic( @@ -1215,7 +1336,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant( ObjectRef object( broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().AddConstant(object.object()); + environment()->accumulator_hints().AddConstant(object.object(), zone()); } void SerializerForBackgroundCompilation::VisitPushContext( @@ -1225,12 +1346,12 @@ void SerializerForBackgroundCompilation::VisitPushContext( Hints& saved_context_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); saved_context_hints.Clear(); - saved_context_hints.Add(current_context_hints); + saved_context_hints.Add(current_context_hints, zone()); // New context is in the accumulator. Put those hints into the current context // register hints. current_context_hints.Clear(); - current_context_hints.Add(environment()->accumulator_hints()); + current_context_hints.Add(environment()->accumulator_hints(), zone()); } void SerializerForBackgroundCompilation::VisitPopContext( @@ -1239,7 +1360,7 @@ void SerializerForBackgroundCompilation::VisitPopContext( Hints& new_context_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); environment()->current_context_hints().Clear(); - environment()->current_context_hints().Add(new_context_hints); + environment()->current_context_hints().Add(new_context_hints, zone()); } void SerializerForBackgroundCompilation::ProcessImmutableLoad( @@ -1251,7 +1372,7 @@ void SerializerForBackgroundCompilation::ProcessImmutableLoad( // If requested, record the object as a hint for the result value. if (result_hints != nullptr && slot_value.has_value()) { - result_hints->AddConstant(slot_value.value().object()); + result_hints->AddConstant(slot_value.value().object(), zone()); } } @@ -1294,11 +1415,11 @@ void SerializerForBackgroundCompilation::VisitLdaContextSlot( environment()->register_hints(iterator->GetRegisterOperand(0)); const int slot = iterator->GetIndexOperand(1); const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, &new_accumulator_hints); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( @@ -1306,11 +1427,11 @@ void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( const int slot = iterator->GetIndexOperand(0); const int depth = 0; Hints const& context_hints = environment()->current_context_hints(); - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, &new_accumulator_hints); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( @@ -1319,11 +1440,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( const int depth = iterator->GetUnsignedImmediateOperand(2); Hints const& context_hints = environment()->register_hints(iterator->GetRegisterOperand(0)); - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, &new_accumulator_hints); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( @@ -1331,11 +1452,11 @@ void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( const int slot = iterator->GetIndexOperand(0); const int depth = 0; Hints const& context_hints = environment()->current_context_hints(); - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, &new_accumulator_hints); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } void SerializerForBackgroundCompilation::ProcessModuleVariableAccess( @@ -1344,7 +1465,7 @@ void SerializerForBackgroundCompilation::ProcessModuleVariableAccess( const int depth = iterator->GetUnsignedImmediateOperand(1); Hints const& context_hints = environment()->current_context_hints(); - Hints result_hints(zone()); + Hints result_hints; ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, &result_hints); for (Handle<Object> constant : result_hints.constants()) { @@ -1392,14 +1513,15 @@ void SerializerForBackgroundCompilation::VisitLdar( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().Add( - environment()->register_hints(iterator->GetRegisterOperand(0))); + environment()->register_hints(iterator->GetRegisterOperand(0)), zone()); } void SerializerForBackgroundCompilation::VisitStar( BytecodeArrayIterator* iterator) { interpreter::Register reg = iterator->GetRegisterOperand(0); environment()->register_hints(reg).Clear(); - environment()->register_hints(reg).Add(environment()->accumulator_hints()); + environment()->register_hints(reg).Add(environment()->accumulator_hints(), + zone()); } void SerializerForBackgroundCompilation::VisitMov( @@ -1407,7 +1529,8 @@ void SerializerForBackgroundCompilation::VisitMov( interpreter::Register src = iterator->GetRegisterOperand(0); interpreter::Register dst = iterator->GetRegisterOperand(1); environment()->register_hints(dst).Clear(); - environment()->register_hints(dst).Add(environment()->register_hints(src)); + environment()->register_hints(dst).Add(environment()->register_hints(src), + zone()); } void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral( @@ -1415,6 +1538,9 @@ void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral( Handle<String> constant_pattern = Handle<String>::cast( iterator->GetConstantForIndexOperand(0, broker()->isolate())); StringRef description(broker(), constant_pattern); + FeedbackSlot slot = iterator->GetSlotOperand(1); + FeedbackSource source(feedback_vector(), slot); + broker()->ProcessFeedbackForRegExpLiteral(source); environment()->accumulator_hints().Clear(); } @@ -1425,6 +1551,17 @@ void SerializerForBackgroundCompilation::VisitCreateArrayLiteral( iterator->GetConstantForIndexOperand(0, broker()->isolate())); ArrayBoilerplateDescriptionRef description(broker(), array_boilerplate_description); + FeedbackSlot slot = iterator->GetSlotOperand(1); + FeedbackSource source(feedback_vector(), slot); + broker()->ProcessFeedbackForArrayOrObjectLiteral(source); + environment()->accumulator_hints().Clear(); +} + +void SerializerForBackgroundCompilation::VisitCreateEmptyArrayLiteral( + BytecodeArrayIterator* iterator) { + FeedbackSlot slot = iterator->GetSlotOperand(0); + FeedbackSource source(feedback_vector(), slot); + broker()->ProcessFeedbackForArrayOrObjectLiteral(source); environment()->accumulator_hints().Clear(); } @@ -1434,6 +1571,9 @@ void SerializerForBackgroundCompilation::VisitCreateObjectLiteral( Handle<ObjectBoilerplateDescription>::cast( iterator->GetConstantForIndexOperand(0, broker()->isolate())); ObjectBoilerplateDescriptionRef description(broker(), constant_properties); + FeedbackSlot slot = iterator->GetSlotOperand(1); + FeedbackSource source(feedback_vector(), slot); + broker()->ProcessFeedbackForArrayOrObjectLiteral(source); environment()->accumulator_hints().Clear(); } @@ -1490,7 +1630,8 @@ void SerializerForBackgroundCompilation::ProcessCreateContext( for (auto x : current_context_hints.constants()) { if (x->IsContext()) { Handle<Context> as_context(Handle<Context>::cast(x)); - accumulator_hints.AddVirtualContext(VirtualContext(1, as_context)); + accumulator_hints.AddVirtualContext(VirtualContext(1, as_context), + zone()); } } @@ -1498,7 +1639,7 @@ void SerializerForBackgroundCompilation::ProcessCreateContext( // it of distance {existing distance} + 1. for (auto x : current_context_hints.virtual_contexts()) { accumulator_hints.AddVirtualContext( - VirtualContext(x.distance + 1, x.context)); + VirtualContext(x.distance + 1, x.context), zone()); } } @@ -1518,7 +1659,7 @@ void SerializerForBackgroundCompilation::VisitCreateClosure( FunctionBlueprint blueprint(shared, Handle<FeedbackVector>::cast(cell_value), environment()->current_context_hints()); - environment()->accumulator_hints().AddFunctionBlueprint(blueprint); + environment()->accumulator_hints().AddFunctionBlueprint(blueprint, zone()); } } @@ -1542,7 +1683,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0( Hints receiver = Hints::SingleConstant( broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1( @@ -1556,7 +1698,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1( Hints receiver = Hints::SingleConstant( broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver, arg0}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2( @@ -1572,7 +1715,8 @@ void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2( Hints receiver = Hints::SingleConstant( broker()->isolate()->factory()->undefined_value(), zone()); HintsVector parameters({receiver, arg0, arg1}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallAnyReceiver( @@ -1616,7 +1760,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty0( FeedbackSlot slot = iterator->GetSlotOperand(2); HintsVector parameters({receiver}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallProperty1( @@ -1630,7 +1775,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty1( FeedbackSlot slot = iterator->GetSlotOperand(3); HintsVector parameters({receiver, arg0}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallProperty2( @@ -1646,7 +1792,8 @@ void SerializerForBackgroundCompilation::VisitCallProperty2( FeedbackSlot slot = iterator->GetSlotOperand(4); HintsVector parameters({receiver, arg0, arg1}, zone()); - ProcessCallOrConstruct(callee, base::nullopt, parameters, slot); + ProcessCallOrConstruct(callee, base::nullopt, parameters, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitCallWithSpread( @@ -1657,7 +1804,7 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread( int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); FeedbackSlot slot = iterator->GetSlotOperand(3); ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, - slot, true); + slot, kMissingArgumentsAreUnknown); } void SerializerForBackgroundCompilation::VisitCallJSRuntime( @@ -1677,61 +1824,45 @@ void SerializerForBackgroundCompilation::VisitCallJSRuntime( Hints SerializerForBackgroundCompilation::RunChildSerializer( CompilationSubject function, base::Optional<Hints> new_target, - const HintsVector& arguments, bool with_spread) { - if (with_spread) { - DCHECK_LT(0, arguments.size()); - // Pad the missing arguments in case we were called with spread operator. - // Drop the last actually passed argument, which contains the spread. - // We don't know what the spread element produces. Therefore we pretend - // that the function is called with the maximal number of parameters and - // that we have no information about the parameters that were not - // explicitly provided. - HintsVector padded = arguments; - padded.pop_back(); // Remove the spread element. - // Fill the rest with empty hints. - padded.resize( - function.blueprint().shared()->GetBytecodeArray().parameter_count(), - Hints(zone())); - return RunChildSerializer(function, new_target, padded, false); - } - + const HintsVector& arguments, MissingArgumentsPolicy padding) { SerializerForBackgroundCompilation child_serializer( - broker(), dependencies(), zone(), function, new_target, arguments, - flags()); - return child_serializer.Run(); -} - -bool SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct( - Handle<SharedFunctionInfo> shared, const HintsVector& arguments, - SpeculationMode speculation_mode) { + zone_scope_.zone_stats(), broker(), dependencies(), function, new_target, + arguments, padding, flags()); + // The Hints returned by the call to Run are allocated in the zone + // created by the child serializer. Adding those hints to a hints + // object created in our zone will preserve the information. + Hints hints; + hints.AddFromChildSerializer(child_serializer.Run(), zone()); + return hints; +} + +void SerializerForBackgroundCompilation::ProcessSFIForCallOrConstruct( + Callee const& callee, base::Optional<Hints> new_target, + const HintsVector& arguments, SpeculationMode speculation_mode, + MissingArgumentsPolicy padding) { + Handle<SharedFunctionInfo> shared = callee.shared(broker()->isolate()); if (shared->IsApiFunction()) { ProcessApiCall(shared, arguments); DCHECK(!shared->IsInlineable()); } else if (shared->HasBuiltinId()) { - ProcessBuiltinCall(shared, arguments, speculation_mode); + ProcessBuiltinCall(shared, new_target, arguments, speculation_mode, + padding); DCHECK(!shared->IsInlineable()); + } else if (shared->IsInlineable() && callee.HasFeedbackVector()) { + CompilationSubject subject = + callee.ToCompilationSubject(broker()->isolate(), zone()); + environment()->accumulator_hints().Add( + RunChildSerializer(subject, new_target, arguments, padding), zone()); } - return shared->IsInlineable(); -} - -bool SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct( - Handle<JSFunction> function, const HintsVector& arguments, - SpeculationMode speculation_mode) { - JSFunctionRef(broker(), function).Serialize(); - - Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate()); - - return ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode) && - function->has_feedback_vector(); } namespace { -// Returns the innermost bound target, if it's a JSFunction and inserts -// all bound arguments and {original_arguments} into {expanded_arguments} -// in the appropriate order. -MaybeHandle<JSFunction> UnrollBoundFunction( - JSBoundFunctionRef const& bound_function, JSHeapBroker* broker, - const HintsVector& original_arguments, HintsVector* expanded_arguments) { +// Returns the innermost bound target and inserts all bound arguments and +// {original_arguments} into {expanded_arguments} in the appropriate order. +JSReceiverRef UnrollBoundFunction(JSBoundFunctionRef const& bound_function, + JSHeapBroker* broker, + const HintsVector& original_arguments, + HintsVector* expanded_arguments) { DCHECK(expanded_arguments->empty()); JSReceiverRef target = bound_function.AsJSReceiver(); @@ -1750,8 +1881,6 @@ MaybeHandle<JSFunction> UnrollBoundFunction( reversed_bound_arguments.push_back(arg); } - if (!target.IsJSFunction()) return MaybeHandle<JSFunction>(); - expanded_arguments->insert(expanded_arguments->end(), reversed_bound_arguments.rbegin(), reversed_bound_arguments.rend()); @@ -1759,13 +1888,38 @@ MaybeHandle<JSFunction> UnrollBoundFunction( original_arguments.begin(), original_arguments.end()); - return target.AsJSFunction().object(); + return target; } } // namespace +void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct( + Handle<Object> callee, base::Optional<Hints> new_target, + const HintsVector& arguments, SpeculationMode speculation_mode, + MissingArgumentsPolicy padding) { + const HintsVector* actual_arguments = &arguments; + HintsVector expanded_arguments(zone()); + if (callee->IsJSBoundFunction()) { + JSBoundFunctionRef bound_function(broker(), + Handle<JSBoundFunction>::cast(callee)); + bound_function.Serialize(); + callee = UnrollBoundFunction(bound_function, broker(), arguments, + &expanded_arguments) + .object(); + actual_arguments = &expanded_arguments; + } + if (!callee->IsJSFunction()) return; + + JSFunctionRef function(broker(), Handle<JSFunction>::cast(callee)); + function.Serialize(); + Callee new_callee(function.object()); + ProcessSFIForCallOrConstruct(new_callee, new_target, *actual_arguments, + speculation_mode, padding); +} + void SerializerForBackgroundCompilation::ProcessCallOrConstruct( Hints callee, base::Optional<Hints> new_target, - const HintsVector& arguments, FeedbackSlot slot, bool with_spread) { + const HintsVector& arguments, FeedbackSlot slot, + MissingArgumentsPolicy padding) { SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation; if (!slot.IsInvalid()) { FeedbackSource source(feedback_vector(), slot); @@ -1782,11 +1936,11 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( // site, and it may make sense to add the Array JSFunction constant. if (new_target.has_value()) { // Construct; feedback is new_target, which often is also the callee. - new_target->AddConstant(target->object()); - callee.AddConstant(target->object()); + new_target->AddConstant(target->object(), zone()); + callee.AddConstant(target->object(), zone()); } else { // Call; target is callee. - callee.AddConstant(target->object()); + callee.AddConstant(target->object(), zone()); } } } @@ -1795,50 +1949,22 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( environment()->accumulator_hints().Clear(); // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. - for (auto hint : callee.constants()) { - const HintsVector* actual_arguments = &arguments; - Handle<JSFunction> function; - HintsVector expanded_arguments(zone()); - if (hint->IsJSBoundFunction()) { - JSBoundFunctionRef bound_function(broker(), - Handle<JSBoundFunction>::cast(hint)); - bound_function.Serialize(); - - MaybeHandle<JSFunction> maybe_function = UnrollBoundFunction( - bound_function, broker(), arguments, &expanded_arguments); - if (maybe_function.is_null()) continue; - function = maybe_function.ToHandleChecked(); - actual_arguments = &expanded_arguments; - } else if (hint->IsJSFunction()) { - function = Handle<JSFunction>::cast(hint); - } else { - continue; - } - - if (ProcessCalleeForCallOrConstruct(function, *actual_arguments, - speculation_mode)) { - environment()->accumulator_hints().Add(RunChildSerializer( - CompilationSubject(function, broker()->isolate(), zone()), new_target, - *actual_arguments, with_spread)); - } + for (auto constant : callee.constants()) { + ProcessCalleeForCallOrConstruct(constant, new_target, arguments, + speculation_mode, padding); } // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. for (auto hint : callee.function_blueprints()) { - Handle<SharedFunctionInfo> shared = hint.shared(); - if (!ProcessSFIForCallOrConstruct(shared, arguments, speculation_mode)) { - continue; - } - - environment()->accumulator_hints().Add(RunChildSerializer( - CompilationSubject(hint), new_target, arguments, with_spread)); + ProcessSFIForCallOrConstruct(Callee(hint), new_target, arguments, + speculation_mode, padding); } } void SerializerForBackgroundCompilation::ProcessCallVarArgs( ConvertReceiverMode receiver_mode, Hints const& callee, interpreter::Register first_reg, int reg_count, FeedbackSlot slot, - bool with_spread) { + MissingArgumentsPolicy padding) { HintsVector arguments(zone()); // The receiver is either given in the first register or it is implicitly // the {undefined} value. @@ -1848,7 +1974,7 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs( } environment()->ExportRegisterHints(first_reg, reg_count, &arguments); - ProcessCallOrConstruct(callee, base::nullopt, arguments, slot); + ProcessCallOrConstruct(callee, base::nullopt, arguments, slot, padding); } void SerializerForBackgroundCompilation::ProcessApiCall( @@ -1866,17 +1992,17 @@ void SerializerForBackgroundCompilation::ProcessApiCall( FunctionTemplateInfoRef target_template_info( broker(), handle(target->function_data(), broker()->isolate())); if (!target_template_info.has_call_code()) return; - target_template_info.SerializeCallCode(); SharedFunctionInfoRef target_ref(broker(), target); target_ref.SerializeFunctionTemplateInfo(); if (target_template_info.accept_any_receiver() && - target_template_info.is_signature_undefined()) + target_template_info.is_signature_undefined()) { return; + } - CHECK_GE(arguments.size(), 1); + if (arguments.empty()) return; Hints const& receiver_hints = arguments[0]; for (auto hint : receiver_hints.constants()) { if (hint->IsUndefined()) { @@ -1920,8 +2046,9 @@ void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate( } void SerializerForBackgroundCompilation::ProcessBuiltinCall( - Handle<SharedFunctionInfo> target, const HintsVector& arguments, - SpeculationMode speculation_mode) { + Handle<SharedFunctionInfo> target, base::Optional<Hints> new_target, + const HintsVector& arguments, SpeculationMode speculation_mode, + MissingArgumentsPolicy padding) { DCHECK(target->HasBuiltinId()); const int builtin_id = target->builtin_id(); const char* name = Builtins::name(builtin_id); @@ -1963,20 +2090,31 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall( case Builtins::kPromiseResolveTrampoline: // For JSCallReducer::ReducePromiseInternalResolve and // JSNativeContextSpecialization::ReduceJSResolvePromise. - if (arguments.size() >= 2) { - Hints const& resolution_hints = arguments[1]; + if (arguments.size() >= 1) { + Hints const& resolution_hints = + arguments.size() >= 2 + ? arguments[1] + : Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), + zone()); ProcessHintsForPromiseResolve(resolution_hints); } break; case Builtins::kPromiseInternalResolve: // For JSCallReducer::ReducePromiseInternalResolve and // JSNativeContextSpecialization::ReduceJSResolvePromise. - if (arguments.size() >= 3) { - Hints const& resolution_hints = arguments[2]; + if (arguments.size() >= 2) { + Hints const& resolution_hints = + arguments.size() >= 3 + ? arguments[2] + : Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), + zone()); ProcessHintsForPromiseResolve(resolution_hints); } break; case Builtins::kRegExpPrototypeTest: + case Builtins::kRegExpPrototypeTestFast: // For JSCallReducer::ReduceRegExpPrototypeTest. if (arguments.size() >= 1 && speculation_mode != SpeculationMode::kDisallowSpeculation) { @@ -1990,35 +2128,105 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall( case Builtins::kArrayPrototypeFind: case Builtins::kArrayPrototypeFindIndex: case Builtins::kArrayMap: + case Builtins::kArraySome: + if (arguments.size() >= 2 && + speculation_mode != SpeculationMode::kDisallowSpeculation) { + Hints const& callback = arguments[1]; + // "Call(callbackfn, T, « kValue, k, O »)" + HintsVector new_arguments(zone()); + new_arguments.push_back( + arguments.size() < 3 + ? Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone()) + : arguments[2]); // T + new_arguments.push_back(Hints()); // kValue + new_arguments.push_back(Hints()); // k + new_arguments.push_back(arguments[0]); // O + for (auto constant : callback.constants()) { + ProcessCalleeForCallOrConstruct(constant, base::nullopt, + new_arguments, + SpeculationMode::kDisallowSpeculation, + kMissingArgumentsAreUndefined); + } + } + break; case Builtins::kArrayReduce: case Builtins::kArrayReduceRight: - case Builtins::kArraySome: if (arguments.size() >= 2 && speculation_mode != SpeculationMode::kDisallowSpeculation) { - Hints const& callback_hints = arguments[1]; - ProcessHintsForFunctionCall(callback_hints); + Hints const& callback = arguments[1]; + // "Call(callbackfn, undefined, « accumulator, kValue, k, O »)" + HintsVector new_arguments(zone()); + new_arguments.push_back(Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone())); + new_arguments.push_back(Hints()); // accumulator + new_arguments.push_back(Hints()); // kValue + new_arguments.push_back(Hints()); // k + new_arguments.push_back(arguments[0]); // O + for (auto constant : callback.constants()) { + ProcessCalleeForCallOrConstruct(constant, base::nullopt, + new_arguments, + SpeculationMode::kDisallowSpeculation, + kMissingArgumentsAreUndefined); + } } break; + // TODO(neis): At least for Array* we should look at blueprints too. + // TODO(neis): Might need something like a FunctionBlueprint but for + // creating bound functions rather than creating closures. case Builtins::kFunctionPrototypeApply: - case Builtins::kFunctionPrototypeCall: + if (arguments.size() >= 1) { + // Drop hints for all arguments except the user-given receiver. + Hints new_receiver = + arguments.size() >= 2 + ? arguments[1] + : Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), + zone()); + HintsVector new_arguments({new_receiver}, zone()); + for (auto constant : arguments[0].constants()) { + ProcessCalleeForCallOrConstruct(constant, base::nullopt, + new_arguments, + SpeculationMode::kDisallowSpeculation, + kMissingArgumentsAreUnknown); + } + } + break; case Builtins::kPromiseConstructor: - // TODO(mslekova): Since the reducer for all these introduce a - // JSCall/JSConstruct that will again get optimized by the JSCallReducer, - // we basically might have to do all the serialization that we do for that - // here as well. The only difference is that the new JSCall/JSConstruct - // has speculation disabled, causing the JSCallReducer to do much less - // work. To account for that, ProcessCallOrConstruct should have a way of - // taking the speculation mode as an argument rather than getting that - // from the feedback. (Also applies to Reflect.apply and - // Reflect.construct.) if (arguments.size() >= 1) { - ProcessHintsForFunctionCall(arguments[0]); + // "Call(executor, undefined, « resolvingFunctions.[[Resolve]], + // resolvingFunctions.[[Reject]] »)" + HintsVector new_arguments( + {Hints::SingleConstant( + broker()->isolate()->factory()->undefined_value(), zone())}, + zone()); + for (auto constant : arguments[0].constants()) { + ProcessCalleeForCallOrConstruct(constant, base::nullopt, + new_arguments, + SpeculationMode::kDisallowSpeculation, + kMissingArgumentsAreUnknown); + } + } + break; + case Builtins::kFunctionPrototypeCall: + if (arguments.size() >= 1) { + HintsVector new_arguments(arguments.begin() + 1, arguments.end(), + zone()); + for (auto constant : arguments[0].constants()) { + ProcessCalleeForCallOrConstruct( + constant, base::nullopt, new_arguments, + SpeculationMode::kDisallowSpeculation, padding); + } } break; case Builtins::kReflectApply: case Builtins::kReflectConstruct: if (arguments.size() >= 2) { - ProcessHintsForFunctionCall(arguments[1]); + for (auto constant : arguments[1].constants()) { + if (constant->IsJSFunction()) { + JSFunctionRef(broker(), constant).Serialize(); + } + } } break; case Builtins::kObjectPrototypeIsPrototypeOf: @@ -2181,13 +2389,6 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( } } -void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall( - Hints const& target_hints) { - for (auto constant : target_hints.constants()) { - if (constant->IsJSFunction()) JSFunctionRef(broker(), constant).Serialize(); - } -} - namespace { void ProcessMapForFunctionBind(MapRef map) { map.SerializePrototype(); @@ -2195,8 +2396,9 @@ void ProcessMapForFunctionBind(MapRef map) { JSFunction::kNameDescriptorIndex) + 1; if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) { - map.SerializeOwnDescriptor(JSFunction::kLengthDescriptorIndex); - map.SerializeOwnDescriptor(JSFunction::kNameDescriptorIndex); + map.SerializeOwnDescriptor( + InternalIndex(JSFunction::kLengthDescriptorIndex)); + map.SerializeOwnDescriptor(InternalIndex(JSFunction::kNameDescriptorIndex)); } } } // namespace @@ -2261,7 +2463,8 @@ void SerializerForBackgroundCompilation::ProcessJump( void SerializerForBackgroundCompilation::VisitReturn( BytecodeArrayIterator* iterator) { - environment()->return_value_hints().Add(environment()->accumulator_hints()); + environment()->return_value_hints().Add(environment()->accumulator_hints(), + zone()); environment()->ClearEphemeralHints(); } @@ -2301,7 +2504,8 @@ void SerializerForBackgroundCompilation::VisitConstruct( HintsVector arguments(zone()); environment()->ExportRegisterHints(first_reg, reg_count, &arguments); - ProcessCallOrConstruct(callee, new_target, arguments, slot); + ProcessCallOrConstruct(callee, new_target, arguments, slot, + kMissingArgumentsAreUndefined); } void SerializerForBackgroundCompilation::VisitConstructWithSpread( @@ -2315,8 +2519,10 @@ void SerializerForBackgroundCompilation::VisitConstructWithSpread( HintsVector arguments(zone()); environment()->ExportRegisterHints(first_reg, reg_count, &arguments); - - ProcessCallOrConstruct(callee, new_target, arguments, slot, true); + DCHECK(!arguments.empty()); + arguments.pop_back(); // Remove the spread element. + ProcessCallOrConstruct(callee, new_target, arguments, slot, + kMissingArgumentsAreUnknown); } void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot, @@ -2333,7 +2539,7 @@ void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot, base::Optional<ObjectRef> value = feedback.AsGlobalAccess().GetConstantHint(); if (value.has_value()) { - environment()->accumulator_hints().AddConstant(value->object()); + environment()->accumulator_hints().AddConstant(value->object(), zone()); } } else { DCHECK(feedback.IsInsufficient()); @@ -2480,9 +2686,16 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( receiver_map.SerializeRootMap(); // For JSNativeContextSpecialization::ReduceNamedAccess. - if (receiver_map.IsMapOfTargetGlobalProxy()) { - broker()->target_native_context().global_proxy_object().GetPropertyCell( + JSGlobalProxyRef global_proxy = + broker()->target_native_context().global_proxy_object(); + JSGlobalObjectRef global_object = + broker()->target_native_context().global_object(); + if (receiver_map.equals(global_proxy.map())) { + base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell( name, SerializationPolicy::kSerializeIfNeeded); + if (access_mode == AccessMode::kLoad && cell.has_value()) { + new_accumulator_hints->AddConstant(cell->value().object(), zone()); + } } PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( @@ -2515,6 +2728,10 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( FunctionTemplateInfoRef fti(broker(), access_info.constant()); if (fti.has_call_code()) fti.SerializeCallCode(); } + } else if (access_info.IsModuleExport()) { + // For JSNativeContextSpecialization::BuildPropertyLoad + DCHECK(!access_info.constant().is_null()); + CellRef(broker(), access_info.constant()); } // For PropertyAccessBuilder::TryBuildLoadConstantDataField @@ -2535,7 +2752,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( access_info.field_representation(), access_info.field_index(), SerializationPolicy::kSerializeIfNeeded)); if (constant.has_value()) { - new_accumulator_hints->AddConstant(constant->object()); + new_accumulator_hints->AddConstant(constant->object(), zone()); } } } @@ -2565,7 +2782,7 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess( return; } - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; switch (feedback.kind()) { case ProcessedFeedback::kElementAccess: ProcessElementAccess(receiver, key, feedback.AsElementAccess(), @@ -2583,14 +2800,14 @@ void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess( if (access_mode == AccessMode::kLoad) { environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } else { DCHECK(new_accumulator_hints.IsEmpty()); } } void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( - Hints receiver, NameRef const& name, FeedbackSlot slot, + Hints const& receiver, NameRef const& name, FeedbackSlot slot, AccessMode access_mode) { if (slot.IsInvalid() || feedback_vector().is_null()) return; FeedbackSource source(feedback_vector(), slot); @@ -2598,12 +2815,13 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name); if (BailoutOnUninitialized(feedback)) return; - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; switch (feedback.kind()) { case ProcessedFeedback::kNamedAccess: DCHECK(name.equals(feedback.AsNamedAccess().name())); ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode, &new_accumulator_hints); + // TODO(neis): Propagate feedback maps to receiver hints. break; case ProcessedFeedback::kInsufficient: break; @@ -2613,7 +2831,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( if (access_mode == AccessMode::kLoad) { environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } else { DCHECK(new_accumulator_hints.IsEmpty()); } @@ -2622,7 +2840,7 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( void SerializerForBackgroundCompilation::ProcessNamedAccess( Hints receiver, NamedAccessFeedback const& feedback, AccessMode access_mode, Hints* new_accumulator_hints) { - for (Handle<Map> map : feedback.AsNamedAccess().maps()) { + for (Handle<Map> map : feedback.maps()) { MapRef map_ref(broker(), map); ProcessMapForNamedPropertyAccess(map_ref, feedback.name(), access_mode, base::nullopt, new_accumulator_hints); @@ -2635,8 +2853,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess( base::nullopt, new_accumulator_hints); } - JSGlobalProxyRef global_proxy = - broker()->target_native_context().global_proxy_object(); for (Handle<Object> hint : receiver.constants()) { ObjectRef object(broker(), hint); if (access_mode == AccessMode::kLoad && object.IsJSObject()) { @@ -2645,13 +2861,6 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess( object.AsJSObject(), new_accumulator_hints); } - // For JSNativeContextSpecialization::ReduceNamedAccessFromNexus. - if (object.equals(global_proxy)) { - // TODO(neis): Record accumulator hint? Also for string.length and maybe - // more. - global_proxy.GetPropertyCell(feedback.name(), - SerializationPolicy::kSerializeIfNeeded); - } // For JSNativeContextSpecialization::ReduceJSLoadNamed. if (access_mode == AccessMode::kLoad && object.IsJSFunction() && feedback.name().equals(ObjectRef( @@ -2659,9 +2868,12 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess( JSFunctionRef function = object.AsJSFunction(); function.Serialize(); if (new_accumulator_hints != nullptr && function.has_prototype()) { - new_accumulator_hints->AddConstant(function.prototype().object()); + new_accumulator_hints->AddConstant(function.prototype().object(), + zone()); } } + // TODO(neis): Also record accumulator hint for string.length and maybe + // more? } } @@ -2841,7 +3053,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf( environment()->register_hints(iterator->GetRegisterOperand(0)); Hints rhs = environment()->accumulator_hints(); FeedbackSlot slot = iterator->GetSlotOperand(1); - Hints new_accumulator_hints(zone()); + Hints new_accumulator_hints; if (slot.IsInvalid() || feedback_vector().is_null()) return; FeedbackSource source(feedback_vector(), slot); @@ -2853,7 +3065,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf( InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf(); if (rhs_feedback.value().has_value()) { Handle<JSObject> constructor = rhs_feedback.value()->object(); - rhs.AddConstant(constructor); + rhs.AddConstant(constructor, zone()); } } @@ -2865,7 +3077,7 @@ void SerializerForBackgroundCompilation::VisitTestInstanceOf( if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs); environment()->accumulator_hints().Clear(); - environment()->accumulator_hints().Add(new_accumulator_hints); + environment()->accumulator_hints().Add(new_accumulator_hints, zone()); } void SerializerForBackgroundCompilation::VisitToNumeric( diff --git a/chromium/v8/src/compiler/serializer-for-background-compilation.h b/chromium/v8/src/compiler/serializer-for-background-compilation.h index 881ed61a555..8f7883eeba7 100644 --- a/chromium/v8/src/compiler/serializer-for-background-compilation.h +++ b/chromium/v8/src/compiler/serializer-for-background-compilation.h @@ -17,6 +17,7 @@ namespace compiler { class CompilationDependencies; class JSHeapBroker; +class ZoneStats; enum class SerializerForBackgroundCompilationFlag : uint8_t { kBailoutOnUninitialized = 1 << 0, @@ -27,9 +28,9 @@ using SerializerForBackgroundCompilationFlags = base::Flags<SerializerForBackgroundCompilationFlag>; void RunSerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags, - BailoutId osr_offset); + ZoneStats* zone_stats, JSHeapBroker* broker, + CompilationDependencies* dependencies, Handle<JSFunction> closure, + SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset); } // namespace compiler } // namespace internal diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.cc b/chromium/v8/src/compiler/simd-scalar-lowering.cc index 783f3bcc113..2781cc248f0 100644 --- a/chromium/v8/src/compiler/simd-scalar-lowering.cc +++ b/chromium/v8/src/compiler/simd-scalar-lowering.cc @@ -132,6 +132,7 @@ void SimdScalarLowering::LowerGraph() { V(F32x4UConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ V(F32x4Add) \ @@ -210,6 +211,7 @@ void SimdScalarLowering::LowerGraph() { V(I8x16LeS) \ V(I8x16LtU) \ V(I8x16LeU) \ + V(S8x16Swizzle) \ V(S8x16Shuffle) MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) { @@ -940,6 +942,28 @@ void SimdScalarLowering::LowerNode(Node* node) { } break; } + case IrOpcode::kSimd128ReverseBytes: { + DCHECK_EQ(1, node->InputCount()); + bool is_float = ReplacementType(node->InputAt(0)) == SimdType::kFloat32x4; + replacements_[node->id()].type = + is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4; + Node** rep = GetReplacementsWithType( + node->InputAt(0), + is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4); + Node* rep_node[kNumLanes32]; + for (int i = 0; i < kNumLanes32; ++i) { + Node* temp = is_float ? graph()->NewNode( + machine()->BitcastFloat32ToInt32(), rep[i]) + : rep[i]; + temp = graph()->NewNode(machine()->Word32ReverseBytes(), temp); + rep_node[kNumLanes32 - 1 - i] = + is_float + ? graph()->NewNode(machine()->BitcastInt32ToFloat32(), temp) + : temp; + } + ReplaceNode(node, rep_node, kNumLanes32); + break; + } case IrOpcode::kLoad: case IrOpcode::kUnalignedLoad: case IrOpcode::kProtectedLoad: { @@ -1219,6 +1243,7 @@ void SimdScalarLowering::LowerNode(Node* node) { } F32X4_UNOP_CASE(Abs) F32X4_UNOP_CASE(Neg) + F32X4_UNOP_CASE(Sqrt) #undef F32X4_UNOP_CASE case IrOpcode::kF32x4RecipApprox: case IrOpcode::kF32x4RecipSqrtApprox: { @@ -1368,6 +1393,45 @@ void SimdScalarLowering::LowerNode(Node* node) { ReplaceNode(node, rep_node, num_lanes); break; } + case IrOpcode::kS8x16Swizzle: { + DCHECK_EQ(2, node->InputCount()); + Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type); + Node** indices = GetReplacementsWithType(node->InputAt(1), rep_type); + Node** rep_nodes = zone()->NewArray<Node*>(num_lanes); + Node* stack_slot = graph()->NewNode( + machine()->StackSlot(MachineRepresentation::kSimd128)); + + // Push all num_lanes values into stack slot. + const Operator* store_op = machine()->Store( + StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier)); + Node* effect_input = graph()->start(); + for (int i = num_lanes - 1; i >= 0; i--) { + // We want all the stores to happen first before any of the loads + // below, so connect them via effect edge from i-1 to i. + Node* store = + graph()->NewNode(store_op, stack_slot, mcgraph_->Int32Constant(i), + rep_left[i], effect_input, graph()->start()); + effect_input = store; + } + + for (int i = num_lanes - 1; i >= 0; i--) { + // Only select lane when index is < num_lanes, otherwise write 0 to + // lane. Use Uint32 to take care of negative indices. + Diamond d(graph(), common(), + graph()->NewNode(machine()->Uint32LessThan(), indices[i], + mcgraph_->Int32Constant(num_lanes))); + + Node* load = + graph()->NewNode(machine()->Load(LoadRepresentation::Uint8()), + stack_slot, indices[i], effect_input, d.if_true); + + rep_nodes[i] = d.Phi(MachineRepresentation::kWord8, load, + mcgraph_->Int32Constant(0)); + } + + ReplaceNode(node, rep_nodes, num_lanes); + break; + } case IrOpcode::kS8x16Shuffle: { DCHECK_EQ(2, node->InputCount()); const uint8_t* shuffle = S8x16ShuffleOf(node->op()); diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc index 87f29346b4f..24d241435a9 100644 --- a/chromium/v8/src/compiler/simplified-lowering.cc +++ b/chromium/v8/src/compiler/simplified-lowering.cc @@ -1261,7 +1261,13 @@ class RepresentationSelector { void VisitObjectState(Node* node) { if (propagate()) { for (int i = 0; i < node->InputCount(); i++) { - EnqueueInput(node, i, UseInfo::Any()); + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { + EnqueueInput(node, i, UseInfo::AnyTagged()); + } else { + EnqueueInput(node, i, UseInfo::Any()); + } } } else if (lower()) { Zone* zone = jsgraph_->zone(); @@ -1272,6 +1278,11 @@ class RepresentationSelector { Node* input = node->InputAt(i); (*types)[i] = DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input)); + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { + ConvertInput(node, i, UseInfo::AnyTagged()); + } } NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState( ObjectIdOf(node->op()), types)); @@ -1564,7 +1575,7 @@ class RepresentationSelector { } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) { VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32, Type::Unsigned32()); - if (lower()) DeferReplacement(node, lowering->Uint32Mod(node)); + if (lower()) ChangeToUint32OverflowOp(node); } else { VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32, Type::Signed32()); @@ -2678,7 +2689,11 @@ class RepresentationSelector { case IrOpcode::kReferenceEqual: { VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit); if (lower()) { - NodeProperties::ChangeOp(node, lowering->machine()->WordEqual()); + if (COMPRESS_POINTERS_BOOL) { + NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal()); + } else { + NodeProperties::ChangeOp(node, lowering->machine()->WordEqual()); + } } return; } @@ -2905,6 +2920,18 @@ class RepresentationSelector { SetOutput(node, MachineRepresentation::kTaggedPointer); return; } + case IrOpcode::kLoadMessage: { + if (truncation.IsUnused()) return VisitUnused(node); + VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged); + return; + } + case IrOpcode::kStoreMessage: { + ProcessInput(node, 0, UseInfo::Word()); + ProcessInput(node, 1, UseInfo::AnyTagged()); + ProcessRemainingInputs(node, 2); + SetOutput(node, MachineRepresentation::kNone); + return; + } case IrOpcode::kLoadFieldByIndex: { if (truncation.IsUnused()) return VisitUnused(node); VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(), @@ -2956,6 +2983,11 @@ class RepresentationSelector { access.machine_type.representation()); return; } + case IrOpcode::kLoadStackArgument: { + if (truncation.IsUnused()) return VisitUnused(node); + VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged); + return; + } case IrOpcode::kStoreElement: { ElementAccess access = ElementAccessOf(node->op()); Node* value_node = node->InputAt(2); diff --git a/chromium/v8/src/compiler/simplified-operator-reducer.cc b/chromium/v8/src/compiler/simplified-operator-reducer.cc index 885a86286eb..0f293d2b38b 100644 --- a/chromium/v8/src/compiler/simplified-operator-reducer.cc +++ b/chromium/v8/src/compiler/simplified-operator-reducer.cc @@ -155,23 +155,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) { Node* new_node = graph()->NewNode( simplified()->ChangeInt31ToCompressedSigned(), m.InputAt(0)); return Replace(new_node); - } else if (m.IsCheckedInt32ToTaggedSigned()) { - // Create a new checked node that outputs CompressedSigned values, with - // an explicit decompression after it. - Node* new_checked = graph()->CloneNode(m.node()); - NodeProperties::ChangeOp( - new_checked, simplified()->CheckedInt32ToCompressedSigned( - CheckParametersOf(m.node()->op()).feedback())); - Node* new_decompression = graph()->NewNode( - machine()->ChangeCompressedSignedToTaggedSigned(), new_checked); - - // For all uses of the old checked node, instead insert the new "checked - // + decompression". Also, update control and effect. - ReplaceWithValue(m.node(), new_decompression, new_checked, new_checked); - - // In the current node, we can skip the decompression since we are going - // to have a Decompression + Compression combo. - return Replace(new_checked); } break; } diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc index 6b86a95e01b..63d24274ece 100644 --- a/chromium/v8/src/compiler/simplified-operator.cc +++ b/chromium/v8/src/compiler/simplified-operator.cc @@ -1149,6 +1149,17 @@ struct SimplifiedOperatorGlobalCache final { }; LoadFieldByIndexOperator kLoadFieldByIndex; + struct LoadStackArgumentOperator final : public Operator { + LoadStackArgumentOperator() + : Operator( // -- + IrOpcode::kLoadStackArgument, // opcode + Operator::kNoDeopt | Operator::kNoThrow | + Operator::kNoWrite, // flags + "LoadStackArgument", // name + 2, 1, 1, 1, 1, 0) {} // counts + }; + LoadStackArgumentOperator kLoadStackArgument; + #define SPECULATIVE_NUMBER_BINOP(Name) \ template <NumberOperationHint kHint> \ struct Name##Operator final : public Operator1<NumberOperationHint> { \ @@ -1754,6 +1765,24 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP) ACCESS_OP_LIST(ACCESS) #undef ACCESS +const Operator* SimplifiedOperatorBuilder::LoadMessage() { + return new (zone()) + Operator(IrOpcode::kLoadMessage, + Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, + "LoadMessage", 1, 1, 1, 1, 1, 0); +} + +const Operator* SimplifiedOperatorBuilder::StoreMessage() { + return new (zone()) + Operator(IrOpcode::kStoreMessage, + Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoRead, + "StoreMessage", 2, 1, 1, 0, 1, 0); +} + +const Operator* SimplifiedOperatorBuilder::LoadStackArgument() { + return &cache_.kLoadStackArgument; +} + const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement( Handle<Map> double_map, Handle<Map> fast_map) { TransitionAndStoreElementParameters parameters(double_map, fast_map); diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h index 58e9bfdffbb..a1438cdce0a 100644 --- a/chromium/v8/src/compiler/simplified-operator.h +++ b/chromium/v8/src/compiler/simplified-operator.h @@ -876,6 +876,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final Type type, AllocationType allocation = AllocationType::kYoung, AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse); + const Operator* LoadMessage(); + const Operator* StoreMessage(); + const Operator* LoadFieldByIndex(); const Operator* LoadField(FieldAccess const&); const Operator* StoreField(FieldAccess const&); @@ -883,6 +886,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final // load-element [base + index] const Operator* LoadElement(ElementAccess const&); + // load-stack-argument [base + index] + const Operator* LoadStackArgument(); + // store-element [base + index], value const Operator* StoreElement(ElementAccess const&); diff --git a/chromium/v8/src/compiler/store-store-elimination.cc b/chromium/v8/src/compiler/store-store-elimination.cc index bd53fb895fa..08accd61c5c 100644 --- a/chromium/v8/src/compiler/store-store-elimination.cc +++ b/chromium/v8/src/compiler/store-store-elimination.cc @@ -2,14 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include <iterator> - #include "src/compiler/store-store-elimination.h" #include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" +#include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" +#include "src/compiler/persistent-map.h" +#include "src/compiler/simplified-operator.h" +#include "src/zone/zone-containers.h" namespace v8 { namespace internal { @@ -41,7 +43,199 @@ namespace compiler { #define DCHECK_EXTRA(condition, fmt, ...) ((void)0) #endif -void StoreStoreElimination::RedundantStoreFinder::Find() { +namespace { + +using StoreOffset = uint32_t; + +struct UnobservableStore { + NodeId id_; + StoreOffset offset_; + + bool operator==(const UnobservableStore other) const { + return (id_ == other.id_) && (offset_ == other.offset_); + } + + bool operator<(const UnobservableStore other) const { + return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_); + } +}; + +size_t hash_value(const UnobservableStore& p) { + return base::hash_combine(p.id_, p.offset_); +} + +// Instances of UnobservablesSet are immutable. They represent either a set of +// UnobservableStores, or the "unvisited empty set". +// +// We apply some sharing to save memory. The class UnobservablesSet is only a +// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most +// changes to an UnobservablesSet might allocate in the temp_zone. +// +// The size of an instance should be the size of a pointer, plus additional +// space in the zone in the case of non-unvisited UnobservablesSets. Copying +// an UnobservablesSet allocates no memory. +class UnobservablesSet final { + private: + using KeyT = UnobservableStore; + using ValueT = bool; // Emulates set semantics in the map. + + // The PersistentMap uses a special value to signify 'not present'. We use + // a boolean value to emulate set semantics. + static constexpr ValueT kNotPresent = false; + static constexpr ValueT kPresent = true; + + public: + using SetT = PersistentMap<KeyT, ValueT>; + + // Creates a new UnobservablesSet, with the null set. + static UnobservablesSet Unvisited() { return UnobservablesSet(); } + + // Create a new empty UnobservablesSet. This allocates in the zone, and + // can probably be optimized to use a global singleton. + static UnobservablesSet VisitedEmpty(Zone* zone); + UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default; + + // Computes the intersection of two UnobservablesSets. If one of the sets is + // empty, will return empty. + UnobservablesSet Intersect(const UnobservablesSet& other, + const UnobservablesSet& empty, Zone* zone) const; + + // Returns a set that it is the current one, plus the observation obs passed + // as parameter. If said obs it's already in the set, we don't have to + // create a new one. + UnobservablesSet Add(UnobservableStore obs, Zone* zone) const; + + // Returns a set that it is the current one, except for all of the + // observations with offset off. This is done by creating a new set and + // copying all observations with different offsets. + // This can probably be done better if the observations are stored first by + // offset and then by node. + // We are removing all nodes with offset off since different nodes may + // alias one another, and we currently we don't have the means to know if + // two nodes are definitely the same value. + UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const; + + const SetT* set() const { return set_; } + + bool IsUnvisited() const { return set_ == nullptr; } + bool IsEmpty() const { + return set_ == nullptr || set_->begin() == set_->end(); + } + bool Contains(UnobservableStore obs) const { + return set_ != nullptr && set_->Get(obs) != kNotPresent; + } + + bool operator==(const UnobservablesSet& other) const { + if (IsUnvisited() || other.IsUnvisited()) { + return IsEmpty() && other.IsEmpty(); + } else { + // Both pointers guaranteed not to be nullptrs. + return *set() == *(other.set()); + } + } + + bool operator!=(const UnobservablesSet& other) const { + return !(*this == other); + } + + private: + UnobservablesSet() = default; + explicit UnobservablesSet(const SetT* set) : set_(set) {} + + static SetT* NewSet(Zone* zone) { + return new (zone->New(sizeof(UnobservablesSet::SetT))) + UnobservablesSet::SetT(zone, kNotPresent); + } + + static void SetAdd(SetT* set, const KeyT& key) { set->Set(key, kPresent); } + static void SetErase(SetT* set, const KeyT& key) { + set->Set(key, kNotPresent); + } + + const SetT* set_ = nullptr; +}; + +class RedundantStoreFinder final { + public: + // Note that we Initialize unobservable_ with js_graph->graph->NodeCount() + // amount of empty sets. + RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone) + : jsgraph_(js_graph), + tick_counter_(tick_counter), + temp_zone_(temp_zone), + revisit_(temp_zone), + in_revisit_(js_graph->graph()->NodeCount(), temp_zone), + unobservable_(js_graph->graph()->NodeCount(), + UnobservablesSet::Unvisited(), temp_zone), + to_remove_(temp_zone), + unobservables_visited_empty_( + UnobservablesSet::VisitedEmpty(temp_zone)) {} + + // Crawls from the end of the graph to the beginning, with the objective of + // finding redundant stores. + void Find(); + + // This method is used for const correctness to go through the final list of + // redundant stores that are replaced on the graph. + const ZoneSet<Node*>& to_remove_const() { return to_remove_; } + + private: + // Assumption: All effectful nodes are reachable from End via a sequence of + // control, then a sequence of effect edges. + // Visit goes through the control chain, visiting effectful nodes that it + // encounters. + void Visit(Node* node); + + // Marks effect inputs for visiting, if we are able to update this path of + // the graph. + void VisitEffectfulNode(Node* node); + + // Compute the intersection of the UnobservablesSets of all effect uses and + // return it. + // The result UnobservablesSet will never be null. + UnobservablesSet RecomputeUseIntersection(Node* node); + + // Recompute unobservables-set for a node. Will also mark superfluous nodes + // as to be removed. + UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses); + + // Returns true if node's opcode cannot observe StoreFields. + static bool CannotObserveStoreField(Node* node); + + void MarkForRevisit(Node* node); + bool HasBeenVisited(Node* node); + + // To safely cast an offset from a FieldAccess, which has a potentially + // wider range (namely int). + StoreOffset ToOffset(const FieldAccess& access) { + DCHECK_GE(access.offset, 0); + return static_cast<StoreOffset>(access.offset); + } + + JSGraph* jsgraph() const { return jsgraph_; } + Isolate* isolate() { return jsgraph()->isolate(); } + Zone* temp_zone() const { return temp_zone_; } + UnobservablesSet& unobservable_for_id(NodeId id) { + DCHECK_LT(id, unobservable_.size()); + return unobservable_[id]; + } + ZoneSet<Node*>& to_remove() { return to_remove_; } + + JSGraph* const jsgraph_; + TickCounter* const tick_counter_; + Zone* const temp_zone_; + + ZoneStack<Node*> revisit_; + ZoneVector<bool> in_revisit_; + + // Maps node IDs to UnobservableNodeSets. + ZoneVector<UnobservablesSet> unobservable_; + ZoneSet<Node*> to_remove_; + const UnobservablesSet unobservables_visited_empty_; +}; + +void RedundantStoreFinder::Find() { Visit(jsgraph()->graph()->end()); while (!revisit_.empty()) { @@ -65,7 +259,7 @@ void StoreStoreElimination::RedundantStoreFinder::Find() { #endif } -void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) { +void RedundantStoreFinder::MarkForRevisit(Node* node) { DCHECK_LT(node->id(), in_revisit_.size()); if (!in_revisit_[node->id()]) { revisit_.push(node); @@ -73,32 +267,12 @@ void StoreStoreElimination::RedundantStoreFinder::MarkForRevisit(Node* node) { } } -bool StoreStoreElimination::RedundantStoreFinder::HasBeenVisited(Node* node) { +bool RedundantStoreFinder::HasBeenVisited(Node* node) { return !unobservable_for_id(node->id()).IsUnvisited(); } -void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, - Zone* temp_zone) { - // Find superfluous nodes - RedundantStoreFinder finder(js_graph, tick_counter, temp_zone); - finder.Find(); - - // Remove superfluous nodes - for (Node* node : finder.to_remove_const()) { - if (FLAG_trace_store_elimination) { - PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n", - node->id(), node->op()->mnemonic()); - } - Node* previous_effect = NodeProperties::GetEffectInput(node); - NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr, - nullptr); - node->Kill(); - } -} - -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::RedundantStoreFinder::RecomputeSet( - Node* node, const StoreStoreElimination::UnobservablesSet& uses) { +UnobservablesSet RedundantStoreFinder::RecomputeSet( + Node* node, const UnobservablesSet& uses) { switch (node->op()->opcode()) { case IrOpcode::kStoreField: { Node* stored_to = node->InputAt(0); @@ -150,8 +324,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeSet( UNREACHABLE(); } -bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField( - Node* node) { +bool RedundantStoreFinder::CannotObserveStoreField(Node* node) { IrOpcode::Value opcode = node->opcode(); return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad || opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi || @@ -159,7 +332,7 @@ bool StoreStoreElimination::RedundantStoreFinder::CannotObserveStoreField( opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain; } -void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) { +void RedundantStoreFinder::Visit(Node* node) { if (!HasBeenVisited(node)) { for (int i = 0; i < node->op()->ControlInputCount(); i++) { Node* control_input = NodeProperties::GetControlInput(node, i); @@ -180,19 +353,15 @@ void StoreStoreElimination::RedundantStoreFinder::Visit(Node* node) { } } -void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode( - Node* node) { +void RedundantStoreFinder::VisitEffectfulNode(Node* node) { if (HasBeenVisited(node)) { TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic()); } - StoreStoreElimination::UnobservablesSet after_set = - RecomputeUseIntersection(node); - StoreStoreElimination::UnobservablesSet before_set = - RecomputeSet(node, after_set); + UnobservablesSet after_set = RecomputeUseIntersection(node); + UnobservablesSet before_set = RecomputeSet(node, after_set); DCHECK(!before_set.IsUnvisited()); - StoreStoreElimination::UnobservablesSet stores_for_node = - unobservable_for_id(node->id()); + UnobservablesSet stores_for_node = unobservable_for_id(node->id()); bool cur_set_changed = stores_for_node.IsUnvisited() || stores_for_node != before_set; if (!cur_set_changed) { @@ -212,9 +381,7 @@ void StoreStoreElimination::RedundantStoreFinder::VisitEffectfulNode( } } -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection( - Node* node) { +UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) { // There were no effect uses. Break early. if (node->op()->EffectOutputCount() == 0) { IrOpcode::Value opcode = node->opcode(); @@ -236,8 +403,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection( // {first} == false indicates that cur_set is the intersection of at least one // thing. bool first = true; - StoreStoreElimination::UnobservablesSet cur_set = - StoreStoreElimination::UnobservablesSet::Unvisited(); // irrelevant + UnobservablesSet cur_set = UnobservablesSet::Unvisited(); // irrelevant for (Edge edge : node->use_edges()) { if (!NodeProperties::IsEffectEdge(edge)) { continue; @@ -245,8 +411,7 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection( // Intersect with the new use node. Node* use = edge.from(); - StoreStoreElimination::UnobservablesSet new_set = - unobservable_for_id(use->id()); + UnobservablesSet new_set = unobservable_for_id(use->id()); if (first) { first = false; cur_set = new_set; @@ -268,72 +433,70 @@ StoreStoreElimination::RedundantStoreFinder::RecomputeUseIntersection( return cur_set; } -StoreStoreElimination::UnobservablesSet::UnobservablesSet() : set_(nullptr) {} - -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::UnobservablesSet::VisitedEmpty(Zone* zone) { - ZoneSet<UnobservableStore>* empty_set = - new (zone->New(sizeof(ZoneSet<UnobservableStore>))) - ZoneSet<UnobservableStore>(zone); - return StoreStoreElimination::UnobservablesSet(empty_set); +UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) { + return UnobservablesSet(NewSet(zone)); } -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::UnobservablesSet::Intersect( - const StoreStoreElimination::UnobservablesSet& other, - const StoreStoreElimination::UnobservablesSet& empty, Zone* zone) const { - if (IsEmpty() || other.IsEmpty()) { - return empty; - } else { - ZoneSet<UnobservableStore>* intersection = - new (zone->New(sizeof(ZoneSet<UnobservableStore>))) - ZoneSet<UnobservableStore>(zone); - // Put the intersection of set() and other.set() in intersection. - set_intersection(set()->begin(), set()->end(), other.set()->begin(), - other.set()->end(), - std::inserter(*intersection, intersection->end())); - - return StoreStoreElimination::UnobservablesSet(intersection); +UnobservablesSet UnobservablesSet::Intersect(const UnobservablesSet& other, + const UnobservablesSet& empty, + Zone* zone) const { + if (IsEmpty() || other.IsEmpty()) return empty; + + UnobservablesSet::SetT* intersection = NewSet(zone); + for (const auto& triple : set()->Zip(*other.set())) { + if (std::get<1>(triple) && std::get<2>(triple)) { + intersection->Set(std::get<0>(triple), kPresent); + } } + + return UnobservablesSet(intersection); } -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::UnobservablesSet::Add(UnobservableStore obs, - Zone* zone) const { - bool found = set()->find(obs) != set()->end(); - if (found) { - return *this; - } else { - // Make a new empty set. - ZoneSet<UnobservableStore>* new_set = - new (zone->New(sizeof(ZoneSet<UnobservableStore>))) - ZoneSet<UnobservableStore>(zone); - // Copy the old elements over. - *new_set = *set(); - // Add the new element. - bool inserted = new_set->insert(obs).second; - DCHECK(inserted); - USE(inserted); // silence warning about unused variable - - return StoreStoreElimination::UnobservablesSet(new_set); +UnobservablesSet UnobservablesSet::Add(UnobservableStore obs, + Zone* zone) const { + if (set()->Get(obs) != kNotPresent) return *this; + + UnobservablesSet::SetT* new_set = NewSet(zone); + *new_set = *set(); + SetAdd(new_set, obs); + + return UnobservablesSet(new_set); +} + +UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset, + Zone* zone) const { + UnobservablesSet::SetT* new_set = NewSet(zone); + *new_set = *set(); + + // Remove elements with the given offset. + for (const auto& entry : *new_set) { + const UnobservableStore& obs = entry.first; + if (obs.offset_ == offset) SetErase(new_set, obs); } + + return UnobservablesSet(new_set); } -StoreStoreElimination::UnobservablesSet -StoreStoreElimination::UnobservablesSet::RemoveSameOffset(StoreOffset offset, - Zone* zone) const { - // Make a new empty set. - ZoneSet<UnobservableStore>* new_set = - new (zone->New(sizeof(ZoneSet<UnobservableStore>))) - ZoneSet<UnobservableStore>(zone); - // Copy all elements over that have a different offset. - for (auto obs : *set()) { - if (obs.offset_ != offset) { - new_set->insert(obs); +} // namespace + +// static +void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone) { + // Find superfluous nodes + RedundantStoreFinder finder(js_graph, tick_counter, temp_zone); + finder.Find(); + + // Remove superfluous nodes + for (Node* node : finder.to_remove_const()) { + if (FLAG_trace_store_elimination) { + PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n", + node->id(), node->op()->mnemonic()); } + Node* previous_effect = NodeProperties::GetEffectInput(node); + NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr, + nullptr); + node->Kill(); } - - return StoreStoreElimination::UnobservablesSet(new_set); } #undef TRACE diff --git a/chromium/v8/src/compiler/store-store-elimination.h b/chromium/v8/src/compiler/store-store-elimination.h index 7704938fc0d..0813adb1f0c 100644 --- a/chromium/v8/src/compiler/store-store-elimination.h +++ b/chromium/v8/src/compiler/store-store-elimination.h @@ -5,18 +5,18 @@ #ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_ #define V8_COMPILER_STORE_STORE_ELIMINATION_H_ -#include "src/compiler/common-operator.h" -#include "src/compiler/js-graph.h" -#include "src/compiler/simplified-operator.h" -#include "src/zone/zone-containers.h" +#include "src/common/globals.h" namespace v8 { namespace internal { class TickCounter; +class Zone; namespace compiler { +class JSGraph; + // Store-store elimination. // // The aim of this optimization is to detect the following pattern in the @@ -44,176 +44,10 @@ namespace compiler { // // This implementation needs all dead nodes removed from the graph, and the // graph should be trimmed. -class StoreStoreElimination final { +class StoreStoreElimination final : public AllStatic { public: static void Run(JSGraph* js_graph, TickCounter* tick_counter, Zone* temp_zone); - - private: - using StoreOffset = uint32_t; - - struct UnobservableStore { - NodeId id_; - StoreOffset offset_; - - bool operator==(const UnobservableStore other) const { - return (id_ == other.id_) && (offset_ == other.offset_); - } - - bool operator<(const UnobservableStore other) const { - return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_); - } - }; - - // Instances of UnobservablesSet are immutable. They represent either a set of - // UnobservableStores, or the "unvisited empty set". - // - // We apply some sharing to save memory. The class UnobservablesSet is only a - // pointer wide, and a copy does not use any heap (or temp_zone) memory. Most - // changes to an UnobservablesSet might allocate in the temp_zone. - // - // The size of an instance should be the size of a pointer, plus additional - // space in the zone in the case of non-unvisited UnobservablesSets. Copying - // an UnobservablesSet allocates no memory. - class UnobservablesSet final { - public: - // Creates a new UnobservablesSet, with the null set. - static UnobservablesSet Unvisited() { return UnobservablesSet(); } - - // Create a new empty UnobservablesSet. This allocates in the zone, and - // can probably be optimized to use a global singleton. - static UnobservablesSet VisitedEmpty(Zone* zone); - UnobservablesSet(const UnobservablesSet& other) V8_NOEXCEPT = default; - - // Computes the intersection of two UnobservablesSets. If one of the sets is - // empty, will return empty. - UnobservablesSet Intersect(const UnobservablesSet& other, - const UnobservablesSet& empty, Zone* zone) const; - - // Returns a set that it is the current one, plus the observation obs passed - // as parameter. If said obs it's already in the set, we don't have to - // create a new one. - UnobservablesSet Add(UnobservableStore obs, Zone* zone) const; - - // Returns a set that it is the current one, except for all of the - // observations with offset off. This is done by creating a new set and - // copying all observations with different offsets. - // This can probably be done better if the observations are stored first by - // offset and then by node. - // We are removing all nodes with offset off since different nodes may - // alias one another, and we currently we don't have the means to know if - // two nodes are definitely the same value. - UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const; - - const ZoneSet<UnobservableStore>* set() const { return set_; } - - bool IsUnvisited() const { return set_ == nullptr; } - bool IsEmpty() const { return set_ == nullptr || set_->empty(); } - bool Contains(UnobservableStore obs) const { - return set_ != nullptr && (set_->find(obs) != set_->end()); - } - - bool operator==(const UnobservablesSet& other) const { - if (IsUnvisited() || other.IsUnvisited()) { - return IsEmpty() && other.IsEmpty(); - } else { - // Both pointers guaranteed not to be nullptrs. - return *set() == *(other.set()); - } - } - - bool operator!=(const UnobservablesSet& other) const { - return !(*this == other); - } - - private: - UnobservablesSet(); - explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set) - : set_(set) {} - const ZoneSet<UnobservableStore>* set_; - }; - - class RedundantStoreFinder final { - public: - // Note that we Initialize unobservable_ with js_graph->graph->NodeCount() - // amount of empty sets. - RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, - Zone* temp_zone) - : jsgraph_(js_graph), - tick_counter_(tick_counter), - temp_zone_(temp_zone), - revisit_(temp_zone), - in_revisit_(js_graph->graph()->NodeCount(), temp_zone), - unobservable_(js_graph->graph()->NodeCount(), - StoreStoreElimination::UnobservablesSet::Unvisited(), - temp_zone), - to_remove_(temp_zone), - unobservables_visited_empty_( - StoreStoreElimination::UnobservablesSet::VisitedEmpty( - temp_zone)) {} - - // Crawls from the end of the graph to the beginning, with the objective of - // finding redundant stores. - void Find(); - - // This method is used for const correctness to go through the final list of - // redundant stores that are replaced on the graph. - const ZoneSet<Node*>& to_remove_const() { return to_remove_; } - - private: - // Assumption: All effectful nodes are reachable from End via a sequence of - // control, then a sequence of effect edges. - // Visit goes through the control chain, visiting effectful nodes that it - // encounters. - void Visit(Node* node); - - // Marks effect inputs for visiting, if we are able to update this path of - // the graph. - void VisitEffectfulNode(Node* node); - - // Compute the intersection of the UnobservablesSets of all effect uses and - // return it. - // The result UnobservablesSet will never be null. - UnobservablesSet RecomputeUseIntersection(Node* node); - - // Recompute unobservables-set for a node. Will also mark superfluous nodes - // as to be removed. - UnobservablesSet RecomputeSet(Node* node, const UnobservablesSet& uses); - - // Returns true if node's opcode cannot observe StoreFields. - static bool CannotObserveStoreField(Node* node); - - void MarkForRevisit(Node* node); - bool HasBeenVisited(Node* node); - - // To safely cast an offset from a FieldAccess, which has a potentially - // wider range (namely int). - StoreOffset ToOffset(const FieldAccess& access) { - DCHECK_GE(access.offset, 0); - return static_cast<StoreOffset>(access.offset); - } - - JSGraph* jsgraph() const { return jsgraph_; } - Isolate* isolate() { return jsgraph()->isolate(); } - Zone* temp_zone() const { return temp_zone_; } - UnobservablesSet& unobservable_for_id(NodeId id) { - DCHECK_LT(id, unobservable_.size()); - return unobservable_[id]; - } - ZoneSet<Node*>& to_remove() { return to_remove_; } - - JSGraph* const jsgraph_; - TickCounter* const tick_counter_; - Zone* const temp_zone_; - - ZoneStack<Node*> revisit_; - ZoneVector<bool> in_revisit_; - - // Maps node IDs to UnobservableNodeSets. - ZoneVector<UnobservablesSet> unobservable_; - ZoneSet<Node*> to_remove_; - const UnobservablesSet unobservables_visited_empty_; - }; }; } // namespace compiler diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc index 6ba1b39431b..76768ccfc22 100644 --- a/chromium/v8/src/compiler/typer.cc +++ b/chromium/v8/src/compiler/typer.cc @@ -847,13 +847,30 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) { DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(node)->opcode()); DCHECK_EQ(2, NodeProperties::GetControlInput(node)->InputCount()); + auto res = induction_vars_->induction_variables().find(node->id()); + DCHECK(res != induction_vars_->induction_variables().end()); + InductionVariable* induction_var = res->second; + InductionVariable::ArithmeticType arithmetic_type = induction_var->Type(); Type initial_type = Operand(node, 0); Type increment_type = Operand(node, 2); + const bool both_types_integer = initial_type.Is(typer_->cache_->kInteger) && + increment_type.Is(typer_->cache_->kInteger); + bool maybe_nan = false; + // The addition or subtraction could still produce a NaN, if the integer + // ranges touch infinity. + if (both_types_integer) { + Type resultant_type = + (arithmetic_type == InductionVariable::ArithmeticType::kAddition) + ? typer_->operation_typer()->NumberAdd(initial_type, increment_type) + : typer_->operation_typer()->NumberSubtract(initial_type, + increment_type); + maybe_nan = resultant_type.Maybe(Type::NaN()); + } + // We only handle integer induction variables (otherwise ranges // do not apply and we cannot do anything). - if (!initial_type.Is(typer_->cache_->kInteger) || - !increment_type.Is(typer_->cache_->kInteger)) { + if (!both_types_integer || maybe_nan) { // Fallback to normal phi typing, but ensure monotonicity. // (Unfortunately, without baking in the previous type, monotonicity might // be violated because we might not yet have retyped the incrementing @@ -874,12 +891,6 @@ Type Typer::Visitor::TypeInductionVariablePhi(Node* node) { } // Now process the bounds. - auto res = induction_vars_->induction_variables().find(node->id()); - DCHECK(res != induction_vars_->induction_variables().end()); - InductionVariable* induction_var = res->second; - - InductionVariable::ArithmeticType arithmetic_type = induction_var->Type(); - double min = -V8_INFINITY; double max = V8_INFINITY; @@ -1339,6 +1350,10 @@ Type Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) { return Type::OtherObject(); } +Type Typer::Visitor::TypeJSGetTemplateObject(Node* node) { + return Type::Array(); +} + Type Typer::Visitor::TypeJSLoadProperty(Node* node) { return Type::NonInternal(); } @@ -2192,10 +2207,16 @@ Type Typer::Visitor::TypeLoadField(Node* node) { return FieldAccessOf(node->op()).type; } +Type Typer::Visitor::TypeLoadMessage(Node* node) { return Type::Any(); } + Type Typer::Visitor::TypeLoadElement(Node* node) { return ElementAccessOf(node->op()).type; } +Type Typer::Visitor::TypeLoadStackArgument(Node* node) { + return Type::NonInternal(); +} + Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); } Type Typer::Visitor::TypeLoadTypedElement(Node* node) { @@ -2222,6 +2243,8 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) { Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); } +Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); } + Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); } Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); } diff --git a/chromium/v8/src/compiler/types.cc b/chromium/v8/src/compiler/types.cc index 018c54c3d57..caa086bbd3b 100644 --- a/chromium/v8/src/compiler/types.cc +++ b/chromium/v8/src/compiler/types.cc @@ -183,7 +183,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case HEAP_NUMBER_TYPE: return kNumber; case JS_OBJECT_TYPE: - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: case JS_ERROR_TYPE: case JS_GLOBAL_OBJECT_TYPE: case JS_GLOBAL_PROXY_TYPE: @@ -207,16 +207,16 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_MESSAGE_OBJECT_TYPE: case JS_DATE_TYPE: #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: - case JS_INTL_COLLATOR_TYPE: - case JS_INTL_DATE_TIME_FORMAT_TYPE: - case JS_INTL_LIST_FORMAT_TYPE: - case JS_INTL_LOCALE_TYPE: - case JS_INTL_NUMBER_FORMAT_TYPE: - case JS_INTL_PLURAL_RULES_TYPE: - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: - case JS_INTL_SEGMENT_ITERATOR_TYPE: - case JS_INTL_SEGMENTER_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: + case JS_COLLATOR_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: + case JS_LOCALE_TYPE: + case JS_NUMBER_FORMAT_TYPE: + case JS_PLURAL_RULES_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENTER_TYPE: #endif // V8_INTL_SUPPORT case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_GENERATOR_OBJECT_TYPE: @@ -225,8 +225,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_MODULE_NAMESPACE_TYPE: case JS_ARRAY_BUFFER_TYPE: case JS_ARRAY_ITERATOR_TYPE: - case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type. - case JS_REGEXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: case JS_TYPED_ARRAY_TYPE: case JS_DATA_VIEW_TYPE: case JS_SET_TYPE: @@ -244,12 +244,12 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_WEAK_REF_TYPE: case JS_WEAK_SET_TYPE: case JS_PROMISE_TYPE: - case WASM_EXCEPTION_TYPE: - case WASM_GLOBAL_TYPE: - case WASM_INSTANCE_TYPE: - case WASM_MEMORY_TYPE: - case WASM_MODULE_TYPE: - case WASM_TABLE_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: + case WASM_MEMORY_OBJECT_TYPE: + case WASM_MODULE_OBJECT_TYPE: + case WASM_TABLE_OBJECT_TYPE: case WEAK_CELL_TYPE: DCHECK(!map.is_callable()); DCHECK(!map.is_undetectable()); @@ -365,7 +365,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case PROMISE_REJECT_REACTION_JOB_TASK_TYPE: case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE: #define MAKE_TORQUE_CLASS_TYPE(V) case V: - TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE) + TORQUE_INTERNAL_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE) #undef MAKE_TORQUE_CLASS_TYPE UNREACHABLE(); } diff --git a/chromium/v8/src/compiler/verifier.cc b/chromium/v8/src/compiler/verifier.cc index 608d6ffee68..d7fdd4269eb 100644 --- a/chromium/v8/src/compiler/verifier.cc +++ b/chromium/v8/src/compiler/verifier.cc @@ -732,6 +732,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // Type is OtherObject. CheckTypeIs(node, Type::OtherObject()); break; + case IrOpcode::kJSGetTemplateObject: + // Type is Array + CheckTypeIs(node, Type::Array()); + break; case IrOpcode::kJSLoadProperty: // Type can be anything. CheckTypeIs(node, Type::Any()); @@ -1594,12 +1598,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckTypeIs(node, Type::NonInternal()); break; case IrOpcode::kLoadField: + case IrOpcode::kLoadMessage: // Object -> fieldtype // TODO(rossberg): activate once machine ops are typed. // CheckValueInputIs(node, 0, Type::Object()); // CheckTypeIs(node, FieldAccessOf(node->op()).type)); break; case IrOpcode::kLoadElement: + case IrOpcode::kLoadStackArgument: // Object -> elementtype // TODO(rossberg): activate once machine ops are typed. // CheckValueInputIs(node, 0, Type::Object()); @@ -1613,6 +1619,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kLoadDataViewElement: break; case IrOpcode::kStoreField: + case IrOpcode::kStoreMessage: // (Object, fieldtype) -> _|_ // TODO(rossberg): activate once machine ops are typed. // CheckValueInputIs(node, 0, Type::Object()); @@ -1700,6 +1707,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kWord64Ctz: case IrOpcode::kWord64ReverseBits: case IrOpcode::kWord64ReverseBytes: + case IrOpcode::kSimd128ReverseBytes: case IrOpcode::kInt64AbsWithOverflow: case IrOpcode::kWord64Equal: case IrOpcode::kInt32Add: @@ -1801,7 +1809,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kBitcastInt32ToFloat32: case IrOpcode::kBitcastInt64ToFloat64: case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kBitcastWordToTaggedSigned: case IrOpcode::kBitcastWord32ToCompressedSigned: diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc index 28f9943e591..ddc97ce5031 100644 --- a/chromium/v8/src/compiler/wasm-compiler.cc +++ b/chromium/v8/src/compiler/wasm-compiler.cc @@ -176,8 +176,6 @@ WasmGraphBuilder::WasmGraphBuilder( : zone_(zone), mcgraph_(mcgraph), env_(env), - cur_buffer_(def_buffer_), - cur_bufsize_(kDefaultBufferSize), has_simd_(ContainsSimd(sig)), untrusted_code_mitigations_(FLAG_untrusted_code_mitigations), sig_(sig), @@ -255,24 +253,19 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) { return graph()->NewNode(mcgraph()->common()->Merge(count), count, controls); } -Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals, - Node* control) { - DCHECK(IrOpcode::IsMergeOpcode(control->opcode())); - Vector<Node*> buf = Realloc(vals, count, count + 1); - buf[count] = control; +Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, + Node** vals_and_control) { + DCHECK(IrOpcode::IsMergeOpcode(vals_and_control[count]->opcode())); return graph()->NewNode( mcgraph()->common()->Phi(wasm::ValueTypes::MachineRepresentationFor(type), count), - count + 1, buf.begin()); + count + 1, vals_and_control); } -Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects, - Node* control) { - DCHECK(IrOpcode::IsMergeOpcode(control->opcode())); - Vector<Node*> buf = Realloc(effects, count, count + 1); - buf[count] = control; +Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) { + DCHECK(IrOpcode::IsMergeOpcode(effects_and_control[count]->opcode())); return graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1, - buf.begin()); + effects_and_control); } Node* WasmGraphBuilder::RefNull() { @@ -1114,6 +1107,10 @@ Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node, } Node* WasmGraphBuilder::Switch(unsigned count, Node* key) { + // The instruction selector will use {kArchTableSwitch} for large switches, + // which has limited input count, see {InstructionSelector::EmitTableSwitch}. + DCHECK_LE(count, Instruction::kMaxInputCount - 2); // value_range + 2 + DCHECK_LE(count, wasm::kV8MaxWasmFunctionBrTableSize + 1); // plus IfDefault return graph()->NewNode(mcgraph()->common()->Switch(count), key, Control()); } @@ -1266,27 +1263,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore( case 8: result = graph()->NewNode(m->Word64ReverseBytes(), value); break; - case 16: { - Node* byte_reversed_lanes[4]; - for (int lane = 0; lane < 4; lane++) { - byte_reversed_lanes[lane] = graph()->NewNode( - m->Word32ReverseBytes(), - graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane), - value)); - } - - // This is making a copy of the value. - result = - graph()->NewNode(mcgraph()->machine()->S128And(), value, value); - - for (int lane = 0; lane < 4; lane++) { - result = - graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane), - result, byte_reversed_lanes[lane]); - } - + case 16: + result = graph()->NewNode(m->Simd128ReverseBytes(), value); break; - } default: UNREACHABLE(); break; @@ -1405,27 +1384,9 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node, case 8: result = graph()->NewNode(m->Word64ReverseBytes(), value); break; - case 16: { - Node* byte_reversed_lanes[4]; - for (int lane = 0; lane < 4; lane++) { - byte_reversed_lanes[lane] = graph()->NewNode( - m->Word32ReverseBytes(), - graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane), - value)); - } - - // This is making a copy of the value. - result = - graph()->NewNode(mcgraph()->machine()->S128And(), value, value); - - for (int lane = 0; lane < 4; lane++) { - result = - graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(3 - lane), - result, byte_reversed_lanes[lane]); - } - + case 16: + result = graph()->NewNode(m->Simd128ReverseBytes(), value); break; - } default: UNREACHABLE(); } @@ -2295,13 +2256,14 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) { return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1); } -Vector<Node*> WasmGraphBuilder::GetExceptionValues( - Node* except_obj, const wasm::WasmException* exception) { +Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj, + const wasm::WasmException* exception, + Vector<Node*> values) { Node* values_array = BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1); uint32_t index = 0; const wasm::WasmExceptionSig* sig = exception->sig; - Vector<Node*> values = Buffer(sig->parameter_count()); + DCHECK_EQ(sig->parameter_count(), values.size()); for (size_t i = 0; i < sig->parameter_count(); ++i) { Node* value; switch (sig->GetParam(i)) { @@ -2347,7 +2309,7 @@ Vector<Node*> WasmGraphBuilder::GetExceptionValues( values[i] = value; } DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception)); - return values; + return values_array; } Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right, @@ -2682,7 +2644,8 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function, return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args)); } -Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args, +Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, + Vector<Node*> args, wasm::WasmCodePosition position, Node* instance_node, const Operator* op) { if (instance_node == nullptr) { @@ -2695,25 +2658,28 @@ Node* WasmGraphBuilder::BuildCallNode(wasm::FunctionSig* sig, Node** args, const size_t count = 1 + params + extra; // Reallocate the buffer to make space for extra inputs. - args = Realloc(args, 1 + params, count).begin(); + base::SmallVector<Node*, 16 + extra> inputs(count); + DCHECK_EQ(1 + params, args.size()); // Make room for the instance_node parameter at index 1, just after code. - memmove(&args[2], &args[1], params * sizeof(Node*)); - args[1] = instance_node; + inputs[0] = args[0]; // code + inputs[1] = instance_node; + if (params > 0) memcpy(&inputs[2], &args[1], params * sizeof(Node*)); // Add effect and control inputs. - args[params + 2] = Effect(); - args[params + 3] = Control(); + inputs[params + 2] = Effect(); + inputs[params + 3] = Control(); - Node* call = SetEffect(graph()->NewNode(op, static_cast<int>(count), args)); + Node* call = + SetEffect(graph()->NewNode(op, static_cast<int>(count), inputs.begin())); DCHECK(position == wasm::kNoCodePosition || position > 0); if (position > 0) SetSourcePosition(call, position); return call; } -Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args, - Node*** rets, +Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, + Vector<Node*> args, Vector<Node*> rets, wasm::WasmCodePosition position, Node* instance_node, UseRetpoline use_retpoline) { @@ -2725,21 +2691,22 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args, size_t ret_count = sig->return_count(); if (ret_count == 0) return call; // No return value. - *rets = Buffer(ret_count).begin(); + DCHECK_EQ(ret_count, rets.size()); if (ret_count == 1) { // Only a single return value. - (*rets)[0] = call; + rets[0] = call; } else { // Create projections for all return values. for (size_t i = 0; i < ret_count; i++) { - (*rets)[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call, - graph()->start()); + rets[i] = graph()->NewNode(mcgraph()->common()->Projection(i), call, + graph()->start()); } } return call; } -Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args, +Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, + Vector<Node*> args, wasm::WasmCodePosition position, Node* instance_node, UseRetpoline use_retpoline) { @@ -2753,8 +2720,8 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args, return call; } -Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args, - Node*** rets, +Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, + Vector<Node*> args, Vector<Node*> rets, wasm::WasmCodePosition position, int func_index, IsReturnCall continuation) { @@ -2779,13 +2746,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args, case kCallContinues: return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline); case kReturnCall: - DCHECK_NULL(rets); + DCHECK(rets.empty()); return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline); } } -Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args, - Node*** rets, +Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, + Vector<Node*> args, Vector<Node*> rets, wasm::WasmCodePosition position, Node* func_index, IsReturnCall continuation) { @@ -2829,12 +2796,13 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args, case kCallContinues: return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline); case kReturnCall: - DCHECK_NULL(rets); + DCHECK(rets.empty()); return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline); } } -Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets, +Node* WasmGraphBuilder::CallDirect(uint32_t index, Vector<Node*> args, + Vector<Node*> rets, wasm::WasmCodePosition position) { DCHECK_NULL(args[0]); wasm::FunctionSig* sig = env_->module->functions[index].sig; @@ -2853,7 +2821,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets, } Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index, - Node** args, Node*** rets, + Vector<Node*> args, Vector<Node*> rets, wasm::WasmCodePosition position) { return BuildIndirectCall(table_index, sig_index, args, rets, position, kCallContinues); @@ -2902,8 +2870,9 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index, } Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, - uint32_t sig_index, Node** args, - Node*** rets, + uint32_t sig_index, + Vector<Node*> args, + Vector<Node*> rets, wasm::WasmCodePosition position, IsReturnCall continuation) { DCHECK_NOT_NULL(args[0]); @@ -2993,14 +2962,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, } } -Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, +Node* WasmGraphBuilder::ReturnCall(uint32_t index, Vector<Node*> args, wasm::WasmCodePosition position) { DCHECK_NULL(args[0]); wasm::FunctionSig* sig = env_->module->functions[index].sig; if (env_ && index < env_->module->num_imported_functions) { // Return Call to an imported function. - return BuildImportCall(sig, args, nullptr, position, index, kReturnCall); + return BuildImportCall(sig, args, {}, position, index, kReturnCall); } // A direct tail call to a wasm function defined in this module. @@ -3013,9 +2982,10 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, } Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index, - uint32_t sig_index, Node** args, + uint32_t sig_index, + Vector<Node*> args, wasm::WasmCodePosition position) { - return BuildIndirectCall(table_index, sig_index, args, nullptr, position, + return BuildIndirectCall(table_index, sig_index, args, {}, position, kReturnCall); } @@ -3062,6 +3032,14 @@ bool CanCover(Node* value, IrOpcode::Value opcode) { return true; } +Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) { + if (mcgraph()->machine()->Is64()) { + value = + graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value); + } + return value; +} + Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) { if (mcgraph()->machine()->Is64()) { value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value); @@ -3070,12 +3048,20 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) { } Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) { + if (COMPRESS_POINTERS_BOOL) { + return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value, + BuildSmiShiftBitsConstant32()); + } value = BuildChangeInt32ToIntPtr(value); return graph()->NewNode(mcgraph()->machine()->WordShl(), value, BuildSmiShiftBitsConstant()); } Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) { + if (COMPRESS_POINTERS_BOOL) { + return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value, + BuildSmiShiftBitsConstant32()); + } return graph()->NewNode(mcgraph()->machine()->WordShl(), Uint32ToUintptr(value), BuildSmiShiftBitsConstant()); } @@ -3084,16 +3070,32 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() { return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize); } +Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() { + return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize); +} + Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) { - value = graph()->NewNode(mcgraph()->machine()->WordSar(), value, - BuildSmiShiftBitsConstant()); - if (mcgraph()->machine()->Is64()) { + if (COMPRESS_POINTERS_BOOL) { value = graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value); + value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value, + BuildSmiShiftBitsConstant32()); + } else { + value = BuildChangeSmiToIntPtr(value); + value = BuildTruncateIntPtrToInt32(value); } return value; } +Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) { + if (COMPRESS_POINTERS_BOOL) { + value = BuildChangeSmiToInt32(value); + return BuildChangeInt32ToIntPtr(value); + } + return graph()->NewNode(mcgraph()->machine()->WordSar(), value, + BuildSmiShiftBitsConstant()); +} + Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value, uint32_t maxval) { DCHECK(Smi::IsValid(maxval)); @@ -3181,14 +3183,16 @@ Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep, if (IsPhiWithMerge(tnode, merge)) { AppendToPhi(tnode, fnode); } else if (tnode != fnode) { + // Note that it is not safe to use {Buffer} here since this method is used + // via {CheckForException} while the {Buffer} is in use by another method. uint32_t count = merge->InputCount(); // + 1 for the merge node. - Vector<Node*> vals = Buffer(count + 1); - for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode; - vals[count - 1] = fnode; - vals[count] = merge; - return graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1, - vals.begin()); + base::SmallVector<Node*, 9> inputs(count + 1); + for (uint32_t j = 0; j < count - 1; j++) inputs[j] = tnode; + inputs[count - 1] = fnode; + inputs[count] = merge; + tnode = graph()->NewNode(mcgraph()->common()->Phi(rep, count), count + 1, + inputs.begin()); } return tnode; } @@ -3198,13 +3202,18 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, if (IsPhiWithMerge(tnode, merge)) { AppendToPhi(tnode, fnode); } else if (tnode != fnode) { + // Note that it is not safe to use {Buffer} here since this method is used + // via {CheckForException} while the {Buffer} is in use by another method. uint32_t count = merge->InputCount(); - Vector<Node*> effects = Buffer(count); + // + 1 for the merge node. + base::SmallVector<Node*, 9> inputs(count + 1); for (uint32_t j = 0; j < count - 1; j++) { - effects[j] = tnode; + inputs[j] = tnode; } - effects[count - 1] = fnode; - tnode = EffectPhi(count, effects.begin(), merge); + inputs[count - 1] = fnode; + inputs[count] = merge; + tnode = graph()->NewNode(mcgraph()->common()->EffectPhi(count), count + 1, + inputs.begin()); } return tnode; } @@ -3310,10 +3319,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() { Node* result = graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size, mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2)); - if (mcgraph()->machine()->Is64()) { - result = - graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result); - } + result = BuildTruncateIntPtrToInt32(result); return result; } @@ -3365,7 +3371,7 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f, parameter_count, effect_, Control()); } -Node* WasmGraphBuilder::GetGlobal(uint32_t index) { +Node* WasmGraphBuilder::GlobalGet(uint32_t index) { const wasm::WasmGlobal& global = env_->module->globals[index]; if (wasm::ValueTypes::IsReferenceType(global.type)) { if (global.mutability && global.imported) { @@ -3395,7 +3401,7 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) { return result; } -Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) { +Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) { const wasm::WasmGlobal& global = env_->module->globals[index]; if (wasm::ValueTypes::IsReferenceType(global.type)) { if (global.mutability && global.imported) { @@ -4008,6 +4014,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]); case wasm::kExprF64x2Neg: return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]); + case wasm::kExprF64x2Sqrt: + return graph()->NewNode(mcgraph()->machine()->F64x2Sqrt(), inputs[0]); case wasm::kExprF64x2Add: return graph()->NewNode(mcgraph()->machine()->F64x2Add(), inputs[0], inputs[1]); @@ -4044,6 +4052,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprF64x2Ge: return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1], inputs[0]); + case wasm::kExprF64x2Qfma: + return graph()->NewNode(mcgraph()->machine()->F64x2Qfma(), inputs[0], + inputs[1], inputs[2]); + case wasm::kExprF64x2Qfms: + return graph()->NewNode(mcgraph()->machine()->F64x2Qfms(), inputs[0], + inputs[1], inputs[2]); case wasm::kExprF32x4Splat: return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]); case wasm::kExprF32x4SConvertI32x4: @@ -4056,6 +4070,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->F32x4Abs(), inputs[0]); case wasm::kExprF32x4Neg: return graph()->NewNode(mcgraph()->machine()->F32x4Neg(), inputs[0]); + case wasm::kExprF32x4Sqrt: + return graph()->NewNode(mcgraph()->machine()->F32x4Sqrt(), inputs[0]); case wasm::kExprF32x4RecipApprox: return graph()->NewNode(mcgraph()->machine()->F32x4RecipApprox(), inputs[0]); @@ -4101,6 +4117,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprF32x4Ge: return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1], inputs[0]); + case wasm::kExprF32x4Qfma: + return graph()->NewNode(mcgraph()->machine()->F32x4Qfma(), inputs[0], + inputs[1], inputs[2]); + case wasm::kExprF32x4Qfms: + return graph()->NewNode(mcgraph()->machine()->F32x4Qfms(), inputs[0], + inputs[1], inputs[2]); case wasm::kExprI64x2Splat: return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]); case wasm::kExprI64x2Neg: @@ -4459,6 +4481,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { return graph()->NewNode(mcgraph()->machine()->S1x16AnyTrue(), inputs[0]); case wasm::kExprS1x16AllTrue: return graph()->NewNode(mcgraph()->machine()->S1x16AllTrue(), inputs[0]); + case wasm::kExprS8x16Swizzle: + return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0], + inputs[1]); default: FATAL_UNSUPPORTED_OPCODE(opcode); } @@ -4492,13 +4517,23 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, case wasm::kExprI32x4ReplaceLane: return graph()->NewNode(mcgraph()->machine()->I32x4ReplaceLane(lane), inputs[0], inputs[1]); - case wasm::kExprI16x8ExtractLane: + case wasm::kExprI16x8ExtractLaneS: + return graph()->NewNode( + mcgraph()->machine()->SignExtendWord16ToInt32(), + graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane), + inputs[0])); + case wasm::kExprI16x8ExtractLaneU: return graph()->NewNode(mcgraph()->machine()->I16x8ExtractLane(lane), inputs[0]); case wasm::kExprI16x8ReplaceLane: return graph()->NewNode(mcgraph()->machine()->I16x8ReplaceLane(lane), inputs[0], inputs[1]); - case wasm::kExprI8x16ExtractLane: + case wasm::kExprI8x16ExtractLaneS: + return graph()->NewNode( + mcgraph()->machine()->SignExtendWord8ToInt32(), + graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane), + inputs[0])); + case wasm::kExprI8x16ExtractLaneU: return graph()->NewNode(mcgraph()->machine()->I8x16ExtractLane(lane), inputs[0]); case wasm::kExprI8x16ReplaceLane: @@ -5076,7 +5111,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { CallDescriptor* GetI64ToBigIntCallDescriptor() { if (!lowering_special_case_) { - lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>(); + lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>(); } if (lowering_special_case_->i64_to_bigint_call_descriptor) { @@ -5112,7 +5147,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { CallDescriptor* GetBigIntToI64CallDescriptor() { if (!lowering_special_case_) { - lowering_special_case_ = base::make_unique<Int64LoweringSpecialCase>(); + lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>(); } if (lowering_special_case_->bigint_to_i64_call_descriptor) { @@ -5613,7 +5648,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* function_index_smi = LOAD_RAW( function_data, WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag, - MachineType::TypeCompressedTagged()); + MachineType::TypeCompressedTaggedSigned()); Node* function_index = BuildChangeSmiToInt32(function_index_smi); return function_index; } @@ -5622,13 +5657,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* jump_table_offset_smi = LOAD_RAW( function_data, WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag, - MachineType::TypeCompressedTagged()); - Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi); + MachineType::TypeCompressedTaggedSigned()); + Node* jump_table_offset = BuildChangeSmiToIntPtr(jump_table_offset_smi); return jump_table_offset; } + Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig, + Node* iterable, Node* context) { + Node* iterable_to_fixed_array = + BuildLoadBuiltinFromIsolateRoot(Builtins::kIterableToFixedArrayForWasm); + IterableToFixedArrayForWasmDescriptor interface_descriptor; + Node* length = BuildChangeUint31ToSmi( + Uint32Constant(static_cast<uint32_t>(sig->return_count()))); + auto call_descriptor = Linkage::GetStubCallDescriptor( + mcgraph()->zone(), interface_descriptor, + interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kNoProperties, StubCallMode::kCallCodeObject); + return SetEffect(graph()->NewNode( + mcgraph()->common()->Call(call_descriptor), iterable_to_fixed_array, + iterable, length, context, Effect(), Control())); + } + void BuildJSToWasmWrapper(bool is_import) { const int wasm_count = static_cast<int>(sig_->parameter_count()); + const int rets_count = static_cast<int>(sig_->return_count()); // Build the start and the JS parameter nodes. SetEffect(SetControl(Start(wasm_count + 5))); @@ -5662,8 +5714,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } const int args_count = wasm_count + 1; // +1 for wasm_code. - Vector<Node*> args = Buffer(args_count); - Node** rets; + base::SmallVector<Node*, 16> args(args_count); + base::SmallVector<Node*, 1> rets(rets_count); // Convert JS parameters to wasm numbers. for (int i = 0; i < wasm_count; ++i) { @@ -5680,8 +5732,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // Load function index from {WasmExportedFunctionData}. Node* function_index = BuildLoadFunctionIndexFromExportedFunctionData(function_data); - BuildImportCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, - function_index, kCallContinues); + BuildImportCall(sig_, VectorOf(args), VectorOf(rets), + wasm::kNoCodePosition, function_index, kCallContinues); } else { // Call to a wasm function defined in this module. // The call target is the jump table slot for that function. @@ -5693,8 +5745,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset); args[0] = jump_table_slot; - BuildWasmCall(sig_, args.begin(), &rets, wasm::kNoCodePosition, nullptr, - kNoRetpoline); + BuildWasmCall(sig_, VectorOf(args), VectorOf(rets), wasm::kNoCodePosition, + nullptr, kNoRetpoline); } // Clear the ThreadInWasm flag. @@ -5765,7 +5817,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { sloppy_receiver = false; V8_FALLTHROUGH; // fallthru case WasmImportCallKind::kJSFunctionArityMatchSloppy: { - Vector<Node*> args = Buffer(wasm_count + 7); + base::SmallVector<Node*, 16> args(wasm_count + 7); int pos = 0; Node* function_context = LOAD_RAW(callable_node, @@ -5785,7 +5837,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags); // Convert wasm numbers to JS values. - pos = AddArgumentNodes(args, pos, wasm_count, sig_); + pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_); args[pos++] = undefined_node; // new target args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count @@ -5805,7 +5857,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { sloppy_receiver = false; V8_FALLTHROUGH; // fallthru case WasmImportCallKind::kJSFunctionArityMismatchSloppy: { - Vector<Node*> args = Buffer(wasm_count + 9); + base::SmallVector<Node*, 16> args(wasm_count + 9); int pos = 0; Node* function_context = LOAD_RAW(callable_node, @@ -5852,7 +5904,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { flags, Operator::kNoProperties); // Convert wasm numbers to JS values. - pos = AddArgumentNodes(args, pos, wasm_count, sig_); + pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_); args[pos++] = function_context; args[pos++] = Effect(); args[pos++] = Control(); @@ -5866,7 +5918,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // === General case of unknown callable ================================== // ======================================================================= case WasmImportCallKind::kUseCallBuiltin: { - Vector<Node*> args = Buffer(wasm_count + 7); + base::SmallVector<Node*, 16> args(wasm_count + 7); int pos = 0; args[pos++] = BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny); @@ -5879,7 +5931,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { CallDescriptor::kNoFlags, Operator::kNoProperties); // Convert wasm numbers to JS values. - pos = AddArgumentNodes(args, pos, wasm_count, sig_); + pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_); // The native_context is sufficient here, because all kind of callables // which depend on the context provide their own context. The context @@ -5903,15 +5955,24 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { SetEffect(call); SetSourcePosition(call, 0); - // Convert the return value back. - Node* val = sig_->return_count() == 0 - ? mcgraph()->Int32Constant(0) - : FromJS(call, native_context, sig_->GetReturn()); - - // Set the ThreadInWasm flag again. - BuildModifyThreadInWasmFlag(true); - - Return(val); + // Convert the return value(s) back. + if (sig_->return_count() <= 1) { + Node* val = sig_->return_count() == 0 + ? mcgraph()->Int32Constant(0) + : FromJS(call, native_context, sig_->GetReturn()); + BuildModifyThreadInWasmFlag(true); + Return(val); + } else { + Node* fixed_array = + BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context); + base::SmallVector<Node*, 8> wasm_values(sig_->return_count()); + for (unsigned i = 0; i < sig_->return_count(); ++i) { + wasm_values[i] = FromJS(LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i), + native_context, sig_->GetReturn(i)); + } + BuildModifyThreadInWasmFlag(true); + Return(VectorOf(wasm_values)); + } if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); return true; @@ -6006,7 +6067,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { if (return_count == 0) { Return(Int32Constant(0)); } else { - Vector<Node*> returns = Buffer(return_count); + base::SmallVector<Node*, 8> returns(return_count); offset = 0; for (size_t i = 0; i < return_count; ++i) { wasm::ValueType type = sig_->GetReturn(i); @@ -6016,7 +6077,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { returns[i] = val; offset += wasm::ValueTypes::ElementSizeInBytes(type); } - Return(returns); + Return(VectorOf(returns)); } if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); @@ -6078,7 +6139,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { if (return_count == 0) { Return(Int32Constant(0)); } else { - Vector<Node*> returns = Buffer(return_count); + base::SmallVector<Node*, 8> returns(return_count); offset = 0; for (size_t i = 0; i < return_count; ++i) { wasm::ValueType type = sig_->GetReturn(i); @@ -6088,7 +6149,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { returns[i] = val; offset += wasm::ValueTypes::ElementSizeInBytes(type); } - Return(returns); + Return(VectorOf(returns)); } if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); @@ -6130,10 +6191,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset)); // Call the underlying closure. - Vector<Node*> args = Buffer(wasm_count + 7); + base::SmallVector<Node*, 16> args(wasm_count + 7); int pos = 0; - args[pos++] = graph()->NewNode(mcgraph()->common()->HeapConstant( - BUILTIN_CODE(isolate, Call_ReceiverIsAny))); + args[pos++] = + BuildLoadBuiltinFromIsolateRoot(Builtins::kCall_ReceiverIsAny); args[pos++] = callable; args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver @@ -6158,14 +6219,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* call = SetEffect(graph()->NewNode( mcgraph()->common()->Call(call_descriptor), pos, args.begin())); - // TODO(wasm): Extend this to support multi-return. - DCHECK_LE(sig_->return_count(), 1); - // Convert return JS values to wasm numbers and back to JS values. - Node* jsval = - sig_->return_count() == 0 - ? BuildLoadUndefinedValueFromInstance() - : ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn()); + Node* jsval; + if (sig_->return_count() == 0) { + jsval = BuildLoadUndefinedValueFromInstance(); + } else if (sig_->return_count() == 1) { + jsval = ToJS(FromJS(call, context, sig_->GetReturn()), sig_->GetReturn()); + } else { + Node* fixed_array = + BuildMultiReturnFixedArrayFromIterable(sig_, call, context); + int32_t return_count = static_cast<int32_t>(sig_->return_count()); + Node* size = + graph()->NewNode(mcgraph()->common()->NumberConstant(return_count)); + Node* result_fixed_array = + BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1); + for (unsigned i = 0; i < sig_->return_count(); ++i) { + const auto& type = sig_->GetReturn(i); + Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i); + Node* cast = ToJS(FromJS(elem, context, type), type); + STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast); + } + jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray, + context, &result_fixed_array, 1, + effect_, Control()); + } Return(jsval); } @@ -6184,7 +6261,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { kNoWriteBarrier); int wasm_arg_count = static_cast<int>(sig_->parameter_count()); - Vector<Node*> args = Buffer(wasm_arg_count + 4); + base::SmallVector<Node*, 16> args(wasm_arg_count + 4); int pos = 0; args[pos++] = code_entry; @@ -6222,14 +6299,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { pos = 0; offset = 0; for (wasm::ValueType type : sig_->returns()) { - StoreRepresentation store_rep( - wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier); Node* value = sig_->return_count() == 1 ? call : graph()->NewNode(mcgraph()->common()->Projection(pos), call, Control()); - SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep), - arg_buffer, Int32Constant(offset), value, + SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer, + Int32Constant(offset), value, Effect(), Control())); offset += wasm::ValueTypes::ElementSizeInBytes(type); pos++; @@ -6287,7 +6362,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob( // Create the Graph. //---------------------------------------------------------------------------- std::unique_ptr<Zone> zone = - base::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME); + std::make_unique<Zone>(wasm_engine->allocator(), ZONE_NAME); Graph* graph = new (zone.get()) Graph(zone.get()); CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( @@ -6702,7 +6777,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate, wasm::FunctionSig* sig) { std::unique_ptr<Zone> zone = - base::make_unique<Zone>(isolate->allocator(), ZONE_NAME); + std::make_unique<Zone>(isolate->allocator(), ZONE_NAME); Graph* graph = new (zone.get()) Graph(zone.get()); CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( @@ -6749,7 +6824,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate, MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { std::unique_ptr<Zone> zone = - base::make_unique<Zone>(isolate->allocator(), ZONE_NAME); + std::make_unique<Zone>(isolate->allocator(), ZONE_NAME); Graph* graph = new (zone.get()) Graph(zone.get()); CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( @@ -6916,6 +6991,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation( counters->wasm_compile_function_peak_memory_bytes()->AddSample( static_cast<int>(mcgraph->graph()->zone()->allocation_size())); auto result = info.ReleaseWasmCompilationResult(); + CHECK_NOT_NULL(result); // Compilation expected to succeed. DCHECK_EQ(wasm::ExecutionTier::kTurbofan, result->result_tier); return std::move(*result); } diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h index dd86ea14997..de0ca58c23a 100644 --- a/chromium/v8/src/compiler/wasm-compiler.h +++ b/chromium/v8/src/compiler/wasm-compiler.h @@ -179,16 +179,6 @@ class WasmGraphBuilder { wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph, wasm::FunctionSig* sig, compiler::SourcePositionTable* spt = nullptr); - Vector<Node*> Buffer(size_t count) { - if (count > cur_bufsize_) { - size_t new_size = count + cur_bufsize_ + 5; - cur_buffer_ = - reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*))); - cur_bufsize_ = new_size; - } - return {cur_buffer_, count}; - } - //----------------------------------------------------------------------- // Operations independent of {control} or {effect}. //----------------------------------------------------------------------- @@ -199,11 +189,11 @@ class WasmGraphBuilder { Node* TerminateLoop(Node* effect, Node* control); Node* TerminateThrow(Node* effect, Node* control); Node* Merge(unsigned count, Node** controls); - Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control); + Node* Phi(wasm::ValueType type, unsigned count, Node** vals_and_control); Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge, Node* tnode, Node* fnode); Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode); - Node* EffectPhi(unsigned count, Node** effects, Node* control); + Node* EffectPhi(unsigned count, Node** effects_and_control); Node* RefNull(); Node* RefFunc(uint32_t function_index); Node* Uint32Constant(uint32_t value); @@ -223,8 +213,9 @@ class WasmGraphBuilder { Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag); Node* LoadExceptionTagFromTable(uint32_t exception_index); Node* GetExceptionTag(Node* except_obj); - Vector<Node*> GetExceptionValues(Node* except_obj, - const wasm::WasmException* exception); + Node* GetExceptionValues(Node* except_obj, + const wasm::WasmException* exception, + Vector<Node*> values_out); bool IsPhiWithMerge(Node* phi, Node* merge); bool ThrowsException(Node* node, Node** if_success, Node** if_exception); void AppendToMerge(Node* merge, Node* from); @@ -275,20 +266,21 @@ class WasmGraphBuilder { } Node* Unreachable(wasm::WasmCodePosition position); - Node* CallDirect(uint32_t index, Node** args, Node*** rets, + Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets, wasm::WasmCodePosition position); - Node* CallIndirect(uint32_t table_index, uint32_t sig_index, Node** args, - Node*** rets, wasm::WasmCodePosition position); + Node* CallIndirect(uint32_t table_index, uint32_t sig_index, + Vector<Node*> args, Vector<Node*> rets, + wasm::WasmCodePosition position); - Node* ReturnCall(uint32_t index, Node** args, + Node* ReturnCall(uint32_t index, Vector<Node*> args, wasm::WasmCodePosition position); Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index, - Node** args, wasm::WasmCodePosition position); + Vector<Node*> args, wasm::WasmCodePosition position); Node* Invert(Node* node); - Node* GetGlobal(uint32_t index); - Node* SetGlobal(uint32_t index, Node* val); + Node* GlobalGet(uint32_t index); + Node* GlobalSet(uint32_t index, Node* val); Node* TableGet(uint32_t table_index, Node* index, wasm::WasmCodePosition position); Node* TableSet(uint32_t table_index, Node* index, Node* val, @@ -427,8 +419,6 @@ class WasmGraphBuilder { void RemoveBytecodePositionDecorator(); protected: - static const int kDefaultBufferSize = 16; - Zone* const zone_; MachineGraph* const mcgraph_; wasm::CompilationEnv* const env_; @@ -444,9 +434,6 @@ class WasmGraphBuilder { SetOncePointer<Node> isolate_root_node_; SetOncePointer<const Operator> stack_check_call_operator_; - Node** cur_buffer_; - size_t cur_bufsize_; - Node* def_buffer_[kDefaultBufferSize]; bool has_simd_ = false; bool needs_stack_check_ = false; const bool untrusted_code_mitigations_ = true; @@ -496,28 +483,29 @@ class WasmGraphBuilder { template <typename... Args> Node* BuildCCall(MachineSignature* sig, Node* function, Args... args); - Node* BuildCallNode(wasm::FunctionSig* sig, Node** args, + Node* BuildCallNode(wasm::FunctionSig* sig, Vector<Node*> args, wasm::WasmCodePosition position, Node* instance_node, const Operator* op); // Helper function for {BuildIndirectCall}. void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size, Node** ift_sig_ids, Node** ift_targets, Node** ift_instances); - Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args, - Node*** rets, wasm::WasmCodePosition position, + Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, + Vector<Node*> args, Vector<Node*> rets, + wasm::WasmCodePosition position, IsReturnCall continuation); - Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets, - wasm::WasmCodePosition position, Node* instance_node, - UseRetpoline use_retpoline); - Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Node** args, + Node* BuildWasmCall(wasm::FunctionSig* sig, Vector<Node*> args, + Vector<Node*> rets, wasm::WasmCodePosition position, + Node* instance_node, UseRetpoline use_retpoline); + Node* BuildWasmReturnCall(wasm::FunctionSig* sig, Vector<Node*> args, wasm::WasmCodePosition position, Node* instance_node, UseRetpoline use_retpoline); - Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets, - wasm::WasmCodePosition position, int func_index, - IsReturnCall continuation); - Node* BuildImportCall(wasm::FunctionSig* sig, Node** args, Node*** rets, - wasm::WasmCodePosition position, Node* func_index, - IsReturnCall continuation); + Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args, + Vector<Node*> rets, wasm::WasmCodePosition position, + int func_index, IsReturnCall continuation); + Node* BuildImportCall(wasm::FunctionSig* sig, Vector<Node*> args, + Vector<Node*> rets, wasm::WasmCodePosition position, + Node* func_index, IsReturnCall continuation); Node* BuildF32CopySign(Node* left, Node* right); Node* BuildF64CopySign(Node* left, Node* right); @@ -574,11 +562,14 @@ class WasmGraphBuilder { MachineType result_type, wasm::TrapReason trap_zero, wasm::WasmCodePosition position); + Node* BuildTruncateIntPtrToInt32(Node* value); Node* BuildChangeInt32ToIntPtr(Node* value); Node* BuildChangeInt32ToSmi(Node* value); Node* BuildChangeUint31ToSmi(Node* value); Node* BuildSmiShiftBitsConstant(); + Node* BuildSmiShiftBitsConstant32(); Node* BuildChangeSmiToInt32(Node* value); + Node* BuildChangeSmiToIntPtr(Node* value); // generates {index > max ? Smi(max) : Smi(index)} Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval); @@ -599,15 +590,8 @@ class WasmGraphBuilder { Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index); Node* BuildDecodeException64BitValue(Node* values_array, uint32_t* index); - Vector<Node*> Realloc(Node* const* buffer, size_t old_count, - size_t new_count) { - DCHECK_GE(new_count, old_count); // Only support growing. - Vector<Node*> buf = Buffer(new_count); - if (buf.begin() != buffer) { - memcpy(buf.begin(), buffer, old_count * sizeof(Node*)); - } - return buf; - } + Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig, + Node* iterable, Node* context); //----------------------------------------------------------------------- // Operations involving the CEntry, a dependency we want to remove diff --git a/chromium/v8/src/compiler/zone-stats.h b/chromium/v8/src/compiler/zone-stats.h index 63d58eb99f2..68036f116da 100644 --- a/chromium/v8/src/compiler/zone-stats.h +++ b/chromium/v8/src/compiler/zone-stats.h @@ -33,6 +33,8 @@ class V8_EXPORT_PRIVATE ZoneStats final { zone_ = nullptr; } + ZoneStats* zone_stats() const { return zone_stats_; } + private: const char* zone_name_; ZoneStats* const zone_stats_; diff --git a/chromium/v8/src/d8/OWNERS b/chromium/v8/src/d8/OWNERS index ff3b6d73724..0f3e3d8e5cd 100644 --- a/chromium/v8/src/d8/OWNERS +++ b/chromium/v8/src/d8/OWNERS @@ -1,5 +1,5 @@ binji@chromium.org bmeurer@chromium.org -clemensh@chromium.org +clemensb@chromium.org verwaest@chromium.org yangguo@chromium.org diff --git a/chromium/v8/src/d8/d8-platforms.cc b/chromium/v8/src/d8/d8-platforms.cc index 42ce14f4f79..8faf893c803 100644 --- a/chromium/v8/src/d8/d8-platforms.cc +++ b/chromium/v8/src/d8/d8-platforms.cc @@ -11,7 +11,6 @@ #include "src/base/platform/mutex.h" #include "src/base/platform/platform.h" #include "src/base/platform/time.h" -#include "src/base/template-utils.h" #include "src/base/utils/random-number-generator.h" #include "src/d8/d8-platforms.h" @@ -94,7 +93,7 @@ class PredictablePlatform : public Platform { std::unique_ptr<Platform> MakePredictablePlatform( std::unique_ptr<Platform> platform) { - return base::make_unique<PredictablePlatform>(std::move(platform)); + return std::make_unique<PredictablePlatform>(std::move(platform)); } class DelayedTasksPlatform : public Platform { @@ -284,14 +283,14 @@ class DelayedTasksPlatform : public Platform { } std::unique_ptr<Task> MakeDelayedTask(std::unique_ptr<Task> task) { - return base::make_unique<DelayedTask>(std::move(task), - GetRandomDelayInMilliseconds()); + return std::make_unique<DelayedTask>(std::move(task), + GetRandomDelayInMilliseconds()); } std::unique_ptr<IdleTask> MakeDelayedIdleTask( std::unique_ptr<IdleTask> task) { - return base::make_unique<DelayedIdleTask>(std::move(task), - GetRandomDelayInMilliseconds()); + return std::make_unique<DelayedIdleTask>(std::move(task), + GetRandomDelayInMilliseconds()); } DISALLOW_COPY_AND_ASSIGN(DelayedTasksPlatform); @@ -300,10 +299,10 @@ class DelayedTasksPlatform : public Platform { std::unique_ptr<Platform> MakeDelayedTasksPlatform( std::unique_ptr<Platform> platform, int64_t random_seed) { if (random_seed) { - return base::make_unique<DelayedTasksPlatform>(std::move(platform), - random_seed); + return std::make_unique<DelayedTasksPlatform>(std::move(platform), + random_seed); } - return base::make_unique<DelayedTasksPlatform>(std::move(platform)); + return std::make_unique<DelayedTasksPlatform>(std::move(platform)); } } // namespace v8 diff --git a/chromium/v8/src/d8/d8.cc b/chromium/v8/src/d8/d8.cc index 13a35b0cd34..33f2b70b142 100644 --- a/chromium/v8/src/d8/d8.cc +++ b/chromium/v8/src/d8/d8.cc @@ -71,6 +71,11 @@ #define CHECK(condition) assert(condition) #endif +#define TRACE_BS(...) \ + do { \ + if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + namespace v8 { namespace { @@ -213,12 +218,20 @@ static Local<Value> Throw(Isolate* isolate, const char* message) { .ToLocalChecked()); } -static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context, - Local<v8::Object> object, const char* property) { +static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate, + Local<Context> context, + Local<v8::Object> object, + const char* property) { Local<String> v8_str = String::NewFromUtf8(isolate, property, NewStringType::kNormal) - .ToLocalChecked(); - return object->Get(context, v8_str).ToLocalChecked(); + .FromMaybe(Local<String>()); + if (v8_str.IsEmpty()) return Local<Value>(); + return object->Get(context, v8_str); +} + +static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context, + Local<v8::Object> object, const char* property) { + return TryGetValue(isolate, context, object, property).ToLocalChecked(); } Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) { @@ -333,7 +346,6 @@ Global<Function> Shell::stringify_function_; base::LazyMutex Shell::workers_mutex_; bool Shell::allow_new_workers_ = true; std::unordered_set<std::shared_ptr<Worker>> Shell::running_workers_; -std::vector<ExternalizedContents> Shell::externalized_contents_; std::atomic<bool> Shell::script_executed_{false}; base::LazyMutex Shell::isolate_status_lock_; std::map<v8::Isolate*, bool> Shell::isolate_status_; @@ -377,7 +389,7 @@ class BackgroundCompileThread : public base::Thread { BackgroundCompileThread(Isolate* isolate, Local<String> source) : base::Thread(GetThreadOptions("BackgroundCompileThread")), source_(source), - streamed_source_(base::make_unique<DummySourceStream>(source, isolate), + streamed_source_(std::make_unique<DummySourceStream>(source, isolate), v8::ScriptCompiler::StreamedSource::UTF8), task_(v8::ScriptCompiler::StartStreamingScript(isolate, &streamed_source_)) {} @@ -740,8 +752,60 @@ struct DynamicImportData { Global<Promise::Resolver> resolver; }; +struct ModuleResolutionData { + ModuleResolutionData(Isolate* isolate_, Local<Value> module_namespace_, + Local<Promise::Resolver> resolver_) + : isolate(isolate_) { + module_namespace.Reset(isolate, module_namespace_); + resolver.Reset(isolate, resolver_); + } + + Isolate* isolate; + Global<Value> module_namespace; + Global<Promise::Resolver> resolver; +}; + } // namespace +void Shell::ModuleResolutionSuccessCallback( + const FunctionCallbackInfo<Value>& info) { + std::unique_ptr<ModuleResolutionData> module_resolution_data( + static_cast<ModuleResolutionData*>( + info.Data().As<v8::External>()->Value())); + Isolate* isolate(module_resolution_data->isolate); + HandleScope handle_scope(isolate); + + Local<Promise::Resolver> resolver( + module_resolution_data->resolver.Get(isolate)); + Local<Value> module_namespace( + module_resolution_data->module_namespace.Get(isolate)); + + PerIsolateData* data = PerIsolateData::Get(isolate); + Local<Context> realm = data->realms_[data->realm_current_].Get(isolate); + Context::Scope context_scope(realm); + + resolver->Resolve(realm, module_namespace).ToChecked(); +} + +void Shell::ModuleResolutionFailureCallback( + const FunctionCallbackInfo<Value>& info) { + std::unique_ptr<ModuleResolutionData> module_resolution_data( + static_cast<ModuleResolutionData*>( + info.Data().As<v8::External>()->Value())); + Isolate* isolate(module_resolution_data->isolate); + HandleScope handle_scope(isolate); + + Local<Promise::Resolver> resolver( + module_resolution_data->resolver.Get(isolate)); + + PerIsolateData* data = PerIsolateData::Get(isolate); + Local<Context> realm = data->realms_[data->realm_current_].Get(isolate); + Context::Scope context_scope(realm); + + DCHECK_EQ(info.Length(), 1); + resolver->Reject(realm, info[0]).ToChecked(); +} + MaybeLocal<Promise> Shell::HostImportModuleDynamically( Local<Context> context, Local<ScriptOrModule> referrer, Local<String> specifier) { @@ -829,19 +893,44 @@ void Shell::DoHostImportModuleDynamically(void* import_data) { if (root_module->InstantiateModule(realm, ResolveModuleCallback) .FromMaybe(false)) { maybe_result = root_module->Evaluate(realm); + CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty()); EmptyMessageQueues(isolate); } - Local<Value> module; - if (!maybe_result.ToLocal(&module)) { + Local<Value> result; + if (!maybe_result.ToLocal(&result)) { DCHECK(try_catch.HasCaught()); resolver->Reject(realm, try_catch.Exception()).ToChecked(); return; } - DCHECK(!try_catch.HasCaught()); Local<Value> module_namespace = root_module->GetModuleNamespace(); - resolver->Resolve(realm, module_namespace).ToChecked(); + if (i::FLAG_harmony_top_level_await) { + Local<Promise> result_promise(Local<Promise>::Cast(result)); + if (result_promise->State() == Promise::kRejected) { + resolver->Reject(realm, result_promise->Result()).ToChecked(); + return; + } + + // Setup callbacks, and then chain them to the result promise. + // ModuleResolutionData will be deleted by the callbacks. + auto module_resolution_data = + new ModuleResolutionData(isolate, module_namespace, resolver); + Local<v8::External> edata = External::New(isolate, module_resolution_data); + Local<Function> callback_success; + CHECK(Function::New(realm, ModuleResolutionSuccessCallback, edata) + .ToLocal(&callback_success)); + Local<Function> callback_failure; + CHECK(Function::New(realm, ModuleResolutionFailureCallback, edata) + .ToLocal(&callback_failure)); + result_promise->Then(realm, callback_success, callback_failure) + .ToLocalChecked(); + } else { + // TODO(joshualitt): Clean up exception handling after introucing new + // API for evaluating async modules. + DCHECK(!try_catch.HasCaught()); + resolver->Resolve(realm, module_namespace).ToChecked(); + } } bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) { @@ -857,7 +946,6 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) { try_catch.SetVerbose(true); Local<Module> root_module; - MaybeLocal<Value> maybe_exception; if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) { CHECK(try_catch.HasCaught()); @@ -869,6 +957,7 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) { if (root_module->InstantiateModule(realm, ResolveModuleCallback) .FromMaybe(false)) { maybe_result = root_module->Evaluate(realm); + CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty()); EmptyMessageQueues(isolate); } Local<Value> result; @@ -878,6 +967,30 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) { ReportException(isolate, &try_catch); return false; } + if (i::FLAG_harmony_top_level_await) { + // Loop until module execution finishes + // TODO(joshualitt): This is a bit wonky. "Real" engines would not be + // able to just busy loop waiting for execution to finish. + Local<Promise> result_promise(Local<Promise>::Cast(result)); + while (result_promise->State() == Promise::kPending) { + isolate->RunMicrotasks(); + } + + if (result_promise->State() == Promise::kRejected) { + // If the exception has been caught by the promise pipeline, we rethrow + // here in order to ReportException. + // TODO(joshualitt): Clean this up after we create a new API for the case + // where TLA is enabled. + if (!try_catch.HasCaught()) { + isolate->ThrowException(result_promise->Result()); + } else { + DCHECK_EQ(try_catch.Exception(), result_promise->Result()); + } + ReportException(isolate, &try_catch); + return false; + } + } + DCHECK(!try_catch.HasCaught()); return true; } @@ -984,6 +1097,27 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) { } } +// performance.measureMemory() implements JavaScript Memory API proposal. +// See https://github.com/ulan/javascript-agent-memory/blob/master/explainer.md. +void Shell::PerformanceMeasureMemory( + const v8::FunctionCallbackInfo<v8::Value>& args) { + v8::MeasureMemoryMode mode = v8::MeasureMemoryMode::kSummary; + v8::Isolate* isolate = args.GetIsolate(); + Local<Context> context = isolate->GetCurrentContext(); + if (args.Length() >= 1 && args[0]->IsObject()) { + Local<Object> object = args[0].As<Object>(); + Local<Value> value = TryGetValue(isolate, context, object, "detailed") + .FromMaybe(Local<Value>()); + if (!value.IsEmpty() && value->IsBoolean() && + value->BooleanValue(isolate)) { + mode = v8::MeasureMemoryMode::kDetailed; + } + } + v8::MaybeLocal<v8::Promise> result = + args.GetIsolate()->MeasureMemory(context, mode); + args.GetReturnValue().Set(result.FromMaybe(v8::Local<v8::Promise>())); +} + // Realm.current() returns the index of the currently active realm. void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) { Isolate* isolate = args.GetIsolate(); @@ -1820,6 +1954,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) { String::NewFromUtf8(isolate, "now", NewStringType::kNormal) .ToLocalChecked(), FunctionTemplate::New(isolate, PerformanceNow)); + performance_template->Set( + String::NewFromUtf8(isolate, "measureMemory", NewStringType::kNormal) + .ToLocalChecked(), + FunctionTemplate::New(isolate, PerformanceMeasureMemory)); global_template->Set( String::NewFromUtf8(isolate, "performance", NewStringType::kNormal) .ToLocalChecked(), @@ -2364,6 +2502,33 @@ class InspectorClient : public v8_inspector::V8InspectorClient { context_.Reset(isolate_, context); } + void runMessageLoopOnPause(int contextGroupId) override { + v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_); + v8::HandleScope handle_scope(isolate_); + Local<String> callback_name = + v8::String::NewFromUtf8(isolate_, "handleInspectorMessage", + v8::NewStringType::kNormal) + .ToLocalChecked(); + Local<Context> context = context_.Get(isolate_); + Local<Value> callback = + context->Global()->Get(context, callback_name).ToLocalChecked(); + if (!callback->IsFunction()) return; + + v8::TryCatch try_catch(isolate_); + is_paused = true; + + while (is_paused) { + USE(Local<Function>::Cast(callback)->Call(context, Undefined(isolate_), 0, + {})); + if (try_catch.HasCaught()) { + Shell::ReportException(isolate_, &try_catch); + is_paused = false; + } + } + } + + void quitMessageLoopOnPause() override { is_paused = false; } + private: static v8_inspector::V8InspectorSession* GetSession(Local<Context> context) { InspectorClient* inspector_client = static_cast<InspectorClient*>( @@ -2402,6 +2567,7 @@ class InspectorClient : public v8_inspector::V8InspectorClient { std::unique_ptr<v8_inspector::V8Inspector> inspector_; std::unique_ptr<v8_inspector::V8InspectorSession> session_; std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_; + bool is_paused = false; Global<Context> context_; Isolate* isolate_; }; @@ -2548,12 +2714,6 @@ void SourceGroup::JoinThread() { thread_->Join(); } -ExternalizedContents::~ExternalizedContents() { - if (data_ != nullptr) { - deleter_(data_, length_, deleter_data_); - } -} - void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) { base::MutexGuard lock_guard(&mutex_); data_.push_back(std::move(data)); @@ -2841,9 +3001,6 @@ bool Shell::SetOptions(int argc, char* argv[]) { options.icu_locale = argv[i] + 13; argv[i] = nullptr; #ifdef V8_USE_EXTERNAL_STARTUP_DATA - } else if (strncmp(argv[i], "--natives_blob=", 15) == 0) { - options.natives_blob = argv[i] + 15; - argv[i] = nullptr; } else if (strncmp(argv[i], "--snapshot_blob=", 16) == 0) { options.snapshot_blob = argv[i] + 16; argv[i] = nullptr; @@ -3133,11 +3290,10 @@ class Serializer : public ValueSerializer::Delegate { std::unique_ptr<SerializationData> Release() { return std::move(data_); } - void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) { - to->insert(to->end(), - std::make_move_iterator(externalized_contents_.begin()), - std::make_move_iterator(externalized_contents_.end())); - externalized_contents_.clear(); + void AppendBackingStoresTo(std::vector<std::shared_ptr<BackingStore>>* to) { + to->insert(to->end(), std::make_move_iterator(backing_stores_.begin()), + std::make_move_iterator(backing_stores_.end())); + backing_stores_.clear(); } protected: @@ -3157,8 +3313,8 @@ class Serializer : public ValueSerializer::Delegate { size_t index = shared_array_buffers_.size(); shared_array_buffers_.emplace_back(isolate_, shared_array_buffer); - data_->shared_array_buffer_contents_.push_back( - MaybeExternalize(shared_array_buffer)); + data_->sab_backing_stores_.push_back( + shared_array_buffer->GetBackingStore()); return Just<uint32_t>(static_cast<uint32_t>(index)); } @@ -3173,7 +3329,7 @@ class Serializer : public ValueSerializer::Delegate { size_t index = wasm_modules_.size(); wasm_modules_.emplace_back(isolate_, module); - data_->transferrable_modules_.push_back(module->GetTransferrableModule()); + data_->compiled_wasm_modules_.push_back(module->GetCompiledModule()); return Just<uint32_t>(static_cast<uint32_t>(index)); } @@ -3229,17 +3385,6 @@ class Serializer : public ValueSerializer::Delegate { } } - template <typename T> - typename T::Contents MaybeExternalize(Local<T> array_buffer) { - if (array_buffer->IsExternal()) { - return array_buffer->GetContents(); - } else { - typename T::Contents contents = array_buffer->Externalize(); - externalized_contents_.emplace_back(contents); - return contents; - } - } - Maybe<bool> FinalizeTransfer() { for (const auto& global_array_buffer : array_buffers_) { Local<ArrayBuffer> array_buffer = @@ -3249,9 +3394,12 @@ class Serializer : public ValueSerializer::Delegate { return Nothing<bool>(); } - ArrayBuffer::Contents contents = MaybeExternalize(array_buffer); + auto backing_store = array_buffer->GetBackingStore(); + if (!array_buffer->IsExternal()) { + array_buffer->Externalize(backing_store); + } + data_->backing_stores_.push_back(std::move(backing_store)); array_buffer->Detach(); - data_->array_buffer_contents_.push_back(contents); } return Just(true); @@ -3263,7 +3411,7 @@ class Serializer : public ValueSerializer::Delegate { std::vector<Global<ArrayBuffer>> array_buffers_; std::vector<Global<SharedArrayBuffer>> shared_array_buffers_; std::vector<Global<WasmModuleObject>> wasm_modules_; - std::vector<ExternalizedContents> externalized_contents_; + std::vector<std::shared_ptr<v8::BackingStore>> backing_stores_; size_t current_memory_usage_; DISALLOW_COPY_AND_ASSIGN(Serializer); @@ -3285,9 +3433,9 @@ class Deserializer : public ValueDeserializer::Delegate { } uint32_t index = 0; - for (const auto& contents : data_->array_buffer_contents()) { + for (const auto& backing_store : data_->backing_stores()) { Local<ArrayBuffer> array_buffer = - ArrayBuffer::New(isolate_, contents.Data(), contents.ByteLength()); + ArrayBuffer::New(isolate_, std::move(backing_store)); deserializer_.TransferArrayBuffer(index++, array_buffer); } @@ -3297,11 +3445,9 @@ class Deserializer : public ValueDeserializer::Delegate { MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId( Isolate* isolate, uint32_t clone_id) override { DCHECK_NOT_NULL(data_); - if (clone_id < data_->shared_array_buffer_contents().size()) { - const SharedArrayBuffer::Contents contents = - data_->shared_array_buffer_contents().at(clone_id); - return SharedArrayBuffer::New(isolate_, contents.Data(), - contents.ByteLength()); + if (clone_id < data_->sab_backing_stores().size()) { + return SharedArrayBuffer::New( + isolate_, std::move(data_->sab_backing_stores().at(clone_id))); } return MaybeLocal<SharedArrayBuffer>(); } @@ -3309,11 +3455,9 @@ class Deserializer : public ValueDeserializer::Delegate { MaybeLocal<WasmModuleObject> GetWasmModuleFromId( Isolate* isolate, uint32_t transfer_id) override { DCHECK_NOT_NULL(data_); - if (transfer_id < data_->transferrable_modules().size()) { - return WasmModuleObject::FromTransferrableModule( - isolate_, data_->transferrable_modules().at(transfer_id)); - } - return MaybeLocal<WasmModuleObject>(); + if (transfer_id >= data_->compiled_wasm_modules().size()) return {}; + return WasmModuleObject::FromCompiledModule( + isolate_, data_->compiled_wasm_modules().at(transfer_id)); } private: @@ -3333,9 +3477,6 @@ std::unique_ptr<SerializationData> Shell::SerializeValue( if (serializer.WriteValue(context, value, transfer).To(&ok)) { data = serializer.Release(); } - // Append externalized contents even when WriteValue fails. - base::MutexGuard lock_guard(workers_mutex_.Pointer()); - serializer.AppendExternalizedContentsTo(&externalized_contents_); return data; } @@ -3377,7 +3518,6 @@ void Shell::WaitForRunningWorkers() { base::MutexGuard lock_guard(workers_mutex_.Pointer()); DCHECK(running_workers_.empty()); allow_new_workers_ = true; - externalized_contents_.clear(); } int Shell::Main(int argc, char* argv[]) { @@ -3402,7 +3542,7 @@ int Shell::Main(int argc, char* argv[]) { std::unique_ptr<platform::tracing::TracingController> tracing; std::ofstream trace_file; if (options.trace_enabled && !i::FLAG_verify_predictable) { - tracing = base::make_unique<platform::tracing::TracingController>(); + tracing = std::make_unique<platform::tracing::TracingController>(); trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json"); DCHECK(trace_file.good()); @@ -3447,9 +3587,8 @@ int Shell::Main(int argc, char* argv[]) { } v8::V8::InitializePlatform(g_platform.get()); v8::V8::Initialize(); - if (options.natives_blob || options.snapshot_blob) { - v8::V8::InitializeExternalStartupData(options.natives_blob, - options.snapshot_blob); + if (options.snapshot_blob) { + v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob); } else { v8::V8::InitializeExternalStartupData(argv[0]); } @@ -3626,3 +3765,4 @@ int main(int argc, char* argv[]) { return v8::Shell::Main(argc, argv); } #undef CHECK #undef DCHECK +#undef TRACE_BS diff --git a/chromium/v8/src/d8/d8.h b/chromium/v8/src/d8/d8.h index 04fc5f5d341..458bad858ab 100644 --- a/chromium/v8/src/d8/d8.h +++ b/chromium/v8/src/d8/d8.h @@ -111,70 +111,20 @@ class SourceGroup { int end_offset_; }; -// The backing store of an ArrayBuffer or SharedArrayBuffer, after -// Externalize() has been called on it. -class ExternalizedContents { - public: - explicit ExternalizedContents(const ArrayBuffer::Contents& contents) - : data_(contents.Data()), - length_(contents.ByteLength()), - deleter_(contents.Deleter()), - deleter_data_(contents.DeleterData()) {} - explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents) - : data_(contents.Data()), - length_(contents.ByteLength()), - deleter_(contents.Deleter()), - deleter_data_(contents.DeleterData()) {} - ExternalizedContents(ExternalizedContents&& other) V8_NOEXCEPT - : data_(other.data_), - length_(other.length_), - deleter_(other.deleter_), - deleter_data_(other.deleter_data_) { - other.data_ = nullptr; - other.length_ = 0; - other.deleter_ = nullptr; - other.deleter_data_ = nullptr; - } - ExternalizedContents& operator=(ExternalizedContents&& other) V8_NOEXCEPT { - if (this != &other) { - data_ = other.data_; - length_ = other.length_; - deleter_ = other.deleter_; - deleter_data_ = other.deleter_data_; - other.data_ = nullptr; - other.length_ = 0; - other.deleter_ = nullptr; - other.deleter_data_ = nullptr; - } - return *this; - } - ~ExternalizedContents(); - - private: - void* data_; - size_t length_; - ArrayBuffer::Contents::DeleterCallback deleter_; - void* deleter_data_; - - DISALLOW_COPY_AND_ASSIGN(ExternalizedContents); -}; - class SerializationData { public: SerializationData() : size_(0) {} uint8_t* data() { return data_.get(); } size_t size() { return size_; } - const std::vector<ArrayBuffer::Contents>& array_buffer_contents() { - return array_buffer_contents_; + const std::vector<std::shared_ptr<v8::BackingStore>>& backing_stores() { + return backing_stores_; } - const std::vector<SharedArrayBuffer::Contents>& - shared_array_buffer_contents() { - return shared_array_buffer_contents_; + const std::vector<std::shared_ptr<v8::BackingStore>>& sab_backing_stores() { + return sab_backing_stores_; } - const std::vector<WasmModuleObject::TransferrableModule>& - transferrable_modules() { - return transferrable_modules_; + const std::vector<CompiledWasmModule>& compiled_wasm_modules() { + return compiled_wasm_modules_; } private: @@ -184,9 +134,9 @@ class SerializationData { std::unique_ptr<uint8_t, DataDeleter> data_; size_t size_; - std::vector<ArrayBuffer::Contents> array_buffer_contents_; - std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_; - std::vector<WasmModuleObject::TransferrableModule> transferrable_modules_; + std::vector<std::shared_ptr<v8::BackingStore>> backing_stores_; + std::vector<std::shared_ptr<v8::BackingStore>> sab_backing_stores_; + std::vector<CompiledWasmModule> compiled_wasm_modules_; private: friend class Serializer; @@ -334,7 +284,6 @@ class ShellOptions { SourceGroup* isolate_sources = nullptr; const char* icu_data_file = nullptr; const char* icu_locale = nullptr; - const char* natives_blob = nullptr; const char* snapshot_blob = nullptr; bool trace_enabled = false; const char* trace_path = nullptr; @@ -389,6 +338,8 @@ class Shell : public i::AllStatic { static void MapCounters(v8::Isolate* isolate, const char* name); static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args); + static void PerformanceMeasureMemory( + const v8::FunctionCallbackInfo<v8::Value>& args); static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args); static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -473,6 +424,10 @@ class Shell : public i::AllStatic { static MaybeLocal<Promise> HostImportModuleDynamically( Local<Context> context, Local<ScriptOrModule> referrer, Local<String> specifier); + static void ModuleResolutionSuccessCallback( + const v8::FunctionCallbackInfo<v8::Value>& info); + static void ModuleResolutionFailureCallback( + const v8::FunctionCallbackInfo<v8::Value>& info); static void HostInitializeImportMetaObject(Local<Context> context, Local<Module> module, Local<Object> meta); @@ -519,7 +474,6 @@ class Shell : public i::AllStatic { static base::LazyMutex workers_mutex_; // Guards the following members. static bool allow_new_workers_; static std::unordered_set<std::shared_ptr<Worker>> running_workers_; - static std::vector<ExternalizedContents> externalized_contents_; // Multiple isolates may update this flag concurrently. static std::atomic<bool> script_executed_; diff --git a/chromium/v8/src/debug/debug-coverage.cc b/chromium/v8/src/debug/debug-coverage.cc index cb466ab6ab7..5f368683f25 100644 --- a/chromium/v8/src/debug/debug-coverage.cc +++ b/chromium/v8/src/debug/debug-coverage.cc @@ -577,11 +577,15 @@ struct SharedFunctionInfoAndCount { // Sort by: // - start, ascending. // - end, descending. - // - count, ascending. + // - info.is_toplevel() first + // - count, descending. bool operator<(const SharedFunctionInfoAndCount& that) const { if (this->start != that.start) return this->start < that.start; if (this->end != that.end) return this->end > that.end; - return this->count < that.count; + if (this->info.is_toplevel() != that.info.is_toplevel()) { + return this->info.is_toplevel(); + } + return this->count > that.count; } SharedFunctionInfo info; @@ -653,12 +657,30 @@ std::unique_ptr<Coverage> Coverage::Collect( // Find the correct outer function based on start position. // - // This is not robust when considering two functions with identical source - // ranges. In this case, it is unclear which function is the inner / outer - // function. Above, we ensure that such functions are sorted in ascending - // `count` order, so at least our `parent_is_covered` optimization below - // should be fine. - // TODO(jgruber): Consider removing the optimization. + // This is, in general, not robust when considering two functions with + // identical source ranges; then the notion of inner and outer is unclear. + // Identical source ranges arise when the source range of top-most entity + // (e.g. function) in the script is identical to the whole script, e.g. + // <script>function foo() {}<script>. The script has its own shared + // function info, which has the same source range as the SFI for `foo`. + // Node.js creates an additional wrapper for scripts (again with identical + // source range) and those wrappers will have a call count of zero even if + // the wrapped script was executed (see v8:9212). We mitigate this issue + // by sorting top-level SFIs first among SFIs with the same source range: + // This ensures top-level SFIs are processed first. If a top-level SFI has + // a non-zero call count, it gets recorded due to `function_is_relevant` + // below (e.g. script wrappers), while top-level SFIs with zero call count + // do not get reported (this ensures node's extra wrappers do not get + // reported). If two SFIs with identical source ranges get reported, we + // report them in decreasing order of call count, as in all known cases + // this corresponds to the nesting order. In the case of the script tag + // example above, we report the zero call count of `foo` last. As it turns + // out, embedders started to rely on functions being reported in nesting + // order. + // TODO(jgruber): Investigate whether it is possible to remove node's + // extra top-level wrapper script, or change its source range, or ensure + // that it follows the invariant that nesting order is descending count + // order for SFIs with identical source ranges. while (!nesting.empty() && functions->at(nesting.back()).end <= start) { nesting.pop_back(); } diff --git a/chromium/v8/src/debug/debug-coverage.h b/chromium/v8/src/debug/debug-coverage.h index 9c1f0bcc2c4..81b178181a6 100644 --- a/chromium/v8/src/debug/debug-coverage.h +++ b/chromium/v8/src/debug/debug-coverage.h @@ -5,6 +5,7 @@ #ifndef V8_DEBUG_DEBUG_COVERAGE_H_ #define V8_DEBUG_DEBUG_COVERAGE_H_ +#include <memory> #include <vector> #include "src/debug/debug-interface.h" diff --git a/chromium/v8/src/debug/debug-evaluate.cc b/chromium/v8/src/debug/debug-evaluate.cc index 203885143fa..55658f5a158 100644 --- a/chromium/v8/src/debug/debug-evaluate.cc +++ b/chromium/v8/src/debug/debug-evaluate.cc @@ -23,9 +23,13 @@ namespace internal { MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate, Handle<String> source, - bool throw_on_side_effect) { + debug::EvaluateGlobalMode mode) { // Disable breaks in side-effect free mode. - DisableBreak disable_break_scope(isolate->debug(), throw_on_side_effect); + DisableBreak disable_break_scope( + isolate->debug(), + mode == debug::EvaluateGlobalMode::kDisableBreaks || + mode == + debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect); Handle<Context> context = isolate->native_context(); ScriptOriginOptions origin_options(false, true); @@ -42,11 +46,15 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate, Handle<JSFunction> fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info, context); - if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode(); + if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) { + isolate->debug()->StartSideEffectCheckMode(); + } MaybeHandle<Object> result = Execution::Call( isolate, fun, Handle<JSObject>(context->global_proxy(), isolate), 0, nullptr); - if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode(); + if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) { + isolate->debug()->StopSideEffectCheckMode(); + } return result; } @@ -161,7 +169,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate, : isolate_(isolate), frame_inspector_(frame, inlined_jsframe_index, isolate), scope_iterator_(isolate, &frame_inspector_, - ScopeIterator::COLLECT_NON_LOCALS) { + ScopeIterator::ReparseStrategy::kScript) { Handle<Context> outer_context(frame_inspector_.GetFunction()->context(), isolate); evaluation_context_ = outer_context; @@ -174,31 +182,31 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate, // - To make stack-allocated variables visible, we materialize them and // use a debug-evaluate context to wrap both the materialized object and // the original context. - // - We use the original context chain from the function context to the - // native context. + // - We also wrap all contexts on the chain between the original context + // and the function context. // - Between the function scope and the native context, we only resolve - // variable names that the current function already uses. Only for these - // names we can be sure that they will be correctly resolved. For the - // rest, we only resolve to with, script, and native contexts. We use a - // whitelist to implement that. + // variable names that are guaranteed to not be shadowed by stack-allocated + // variables. Contexts between the function context and the original + // context have a blacklist attached to implement that. // Context::Lookup has special handling for debug-evaluate contexts: // - Look up in the materialized stack variables. + // - Check the blacklist to find out whether to abort further lookup. // - Look up in the original context. - // - Check the whitelist to find out whether to skip contexts during lookup. - for (; scope_iterator_.InInnerScope(); scope_iterator_.Next()) { + for (; !scope_iterator_.Done(); scope_iterator_.Next()) { ScopeIterator::ScopeType scope_type = scope_iterator_.Type(); if (scope_type == ScopeIterator::ScopeTypeScript) break; ContextChainElement context_chain_element; - if (scope_type == ScopeIterator::ScopeTypeLocal || - scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK)) { + if (scope_iterator_.InInnerScope() && + (scope_type == ScopeIterator::ScopeTypeLocal || + scope_iterator_.DeclaresLocals(ScopeIterator::Mode::STACK))) { context_chain_element.materialized_object = scope_iterator_.ScopeObject(ScopeIterator::Mode::STACK); } if (scope_iterator_.HasContext()) { context_chain_element.wrapped_context = scope_iterator_.CurrentContext(); } - if (scope_type == ScopeIterator::ScopeTypeLocal) { - context_chain_element.whitelist = scope_iterator_.GetNonLocals(); + if (!scope_iterator_.InInnerScope()) { + context_chain_element.blacklist = scope_iterator_.GetLocals(); } context_chain_.push_back(context_chain_element); } @@ -214,7 +222,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate, scope_info->SetIsDebugEvaluateScope(); evaluation_context_ = factory->NewDebugEvaluateContext( evaluation_context_, scope_info, element.materialized_object, - element.wrapped_context, element.whitelist); + element.wrapped_context, element.blacklist); } } diff --git a/chromium/v8/src/debug/debug-evaluate.h b/chromium/v8/src/debug/debug-evaluate.h index 78198920502..b04bd76e22f 100644 --- a/chromium/v8/src/debug/debug-evaluate.h +++ b/chromium/v8/src/debug/debug-evaluate.h @@ -24,7 +24,7 @@ class FrameInspector; class DebugEvaluate : public AllStatic { public: static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source, - bool throw_on_side_effect); + debug::EvaluateGlobalMode mode); // Evaluate a piece of JavaScript in the context of a stack frame for // debugging. Things that need special attention are: @@ -83,7 +83,7 @@ class DebugEvaluate : public AllStatic { struct ContextChainElement { Handle<Context> wrapped_context; Handle<JSObject> materialized_object; - Handle<StringSet> whitelist; + Handle<StringSet> blacklist; }; Handle<Context> evaluation_context_; diff --git a/chromium/v8/src/debug/debug-frames.cc b/chromium/v8/src/debug/debug-frames.cc index 78c4c323fcd..19178d34ce0 100644 --- a/chromium/v8/src/debug/debug-frames.cc +++ b/chromium/v8/src/debug/debug-frames.cc @@ -70,7 +70,7 @@ int FrameInspector::GetParametersCount() { Handle<Object> FrameInspector::GetParameter(int index) { if (is_optimized_) return deoptimized_frame_->GetParameter(index); - // TODO(clemensh): Handle wasm_interpreted_frame_. + // TODO(clemensb): Handle wasm_interpreted_frame_. return handle(frame_->GetParameter(index), isolate_); } @@ -93,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal( VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + IsStaticFlag is_static_flag; return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag, - &maybe_assigned_flag) != -1; + &maybe_assigned_flag, + &is_static_flag) != -1; } RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared, diff --git a/chromium/v8/src/debug/debug-frames.h b/chromium/v8/src/debug/debug-frames.h index 274d10030af..78248614e20 100644 --- a/chromium/v8/src/debug/debug-frames.h +++ b/chromium/v8/src/debug/debug-frames.h @@ -5,6 +5,8 @@ #ifndef V8_DEBUG_DEBUG_FRAMES_H_ #define V8_DEBUG_DEBUG_FRAMES_H_ +#include <memory> + #include "src/deoptimizer/deoptimizer.h" #include "src/execution/isolate.h" #include "src/execution/v8threads.h" diff --git a/chromium/v8/src/debug/debug-interface.h b/chromium/v8/src/debug/debug-interface.h index 59bc6d08632..5f10e2a55a1 100644 --- a/chromium/v8/src/debug/debug-interface.h +++ b/chromium/v8/src/debug/debug-interface.h @@ -5,6 +5,8 @@ #ifndef V8_DEBUG_DEBUG_INTERFACE_H_ #define V8_DEBUG_DEBUG_INTERFACE_H_ +#include <memory> + #include "include/v8-inspector.h" #include "include/v8-util.h" #include "include/v8.h" @@ -157,6 +159,7 @@ class WasmScript : public Script { int NumFunctions() const; int NumImportedFunctions() const; + MemorySpan<const uint8_t> Bytecode() const; std::pair<int, int> GetFunctionRange(int function_index) const; @@ -468,9 +471,15 @@ enum class NativeAccessorType { int64_t GetNextRandomInt64(v8::Isolate* isolate); +enum class EvaluateGlobalMode { + kDefault, + kDisableBreaks, + kDisableBreaksAndThrowOnSideEffect +}; + V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Value> EvaluateGlobal( v8::Isolate* isolate, v8::Local<v8::String> source, - bool throw_on_side_effect); + EvaluateGlobalMode mode); int GetDebuggingId(v8::Local<v8::Function> function); diff --git a/chromium/v8/src/debug/debug-scope-iterator.cc b/chromium/v8/src/debug/debug-scope-iterator.cc index 72e7dc2e452..38b51695e1d 100644 --- a/chromium/v8/src/debug/debug-scope-iterator.cc +++ b/chromium/v8/src/debug/debug-scope-iterator.cc @@ -49,7 +49,9 @@ namespace internal { DebugScopeIterator::DebugScopeIterator(Isolate* isolate, FrameInspector* frame_inspector) - : iterator_(isolate, frame_inspector) { + : iterator_( + isolate, frame_inspector, + ::v8::internal::ScopeIterator::ReparseStrategy::kFunctionLiteral) { if (!Done() && ShouldIgnore()) Advance(); } diff --git a/chromium/v8/src/debug/debug-scopes.cc b/chromium/v8/src/debug/debug-scopes.cc index 4569780d001..ecf5f20aebc 100644 --- a/chromium/v8/src/debug/debug-scopes.cc +++ b/chromium/v8/src/debug/debug-scopes.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector, - ScopeIterator::Option option) + ReparseStrategy strategy) : isolate_(isolate), frame_inspector_(frame_inspector), function_(frame_inspector_->GetFunction()), @@ -37,7 +37,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector, // We should not instantiate a ScopeIterator for wasm frames. DCHECK_NE(Script::TYPE_WASM, frame_inspector->GetScript()->type()); - TryParseAndRetrieveScopes(option); + TryParseAndRetrieveScopes(strategy); } ScopeIterator::~ScopeIterator() { delete info_; } @@ -72,7 +72,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, context_(generator->context(), isolate), script_(Script::cast(function_->shared().script()), isolate) { CHECK(function_->shared().IsSubjectToDebugging()); - TryParseAndRetrieveScopes(DEFAULT); + TryParseAndRetrieveScopes(ReparseStrategy::kFunctionLiteral); } void ScopeIterator::Restart() { @@ -84,7 +84,118 @@ void ScopeIterator::Restart() { UnwrapEvaluationContext(); } -void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) { +namespace { + +// Takes the scope of a parsed script, a function and a break location +// inside the function. The result is the innermost lexical scope around +// the break point, which serves as the starting point of the ScopeIterator. +// And the scope of the function that was passed in (called closure scope). +// +// The start scope is guaranteed to be either the closure scope itself, +// or a child of the closure scope. +class ScopeChainRetriever { + public: + ScopeChainRetriever(DeclarationScope* scope, Handle<JSFunction> function, + int position) + : scope_(scope), + break_scope_start_(function->shared().StartPosition()), + break_scope_end_(function->shared().EndPosition()), + is_default_constructor_( + IsDefaultConstructor(function->shared().kind())), + position_(position) { + DCHECK_NOT_NULL(scope); + RetrieveScopes(); + } + + DeclarationScope* ClosureScope() { return closure_scope_; } + Scope* StartScope() { return start_scope_; } + + private: + DeclarationScope* scope_; + const int break_scope_start_; + const int break_scope_end_; + const bool is_default_constructor_; + const int position_; + + DeclarationScope* closure_scope_ = nullptr; + Scope* start_scope_ = nullptr; + + void RetrieveScopes() { + if (is_default_constructor_) { + // Even though the DefaultBaseConstructor is a child of a Class scope, the + // source positions are *not* nested. This means the actual scope for the + // DefaultBaseConstructor needs to be found by doing a DFS. + RetrieveScopeChainDefaultConstructor(scope_); + } else { + RetrieveScopeChain(); + } + DCHECK_NOT_NULL(closure_scope_); + DCHECK_NOT_NULL(start_scope_); + } + + bool RetrieveScopeChainDefaultConstructor(Scope* scope) { + const int beg_pos = scope->start_position(); + const int end_pos = scope->end_position(); + if (beg_pos == position_ && end_pos == position_) { + DCHECK(scope->is_function_scope()); + DCHECK( + IsDefaultConstructor(scope->AsDeclarationScope()->function_kind())); + start_scope_ = scope; + closure_scope_ = scope->AsDeclarationScope(); + return true; + } + + for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr; + inner_scope = inner_scope->sibling()) { + if (RetrieveScopeChainDefaultConstructor(inner_scope)) return true; + } + return false; + } + + void RetrieveScopeChain() { + Scope* parent = nullptr; + Scope* current = scope_; + SetClosureScopeIfFound(current); + + while (parent != current) { + parent = current; + for (Scope* inner_scope = current->inner_scope(); inner_scope != nullptr; + inner_scope = inner_scope->sibling()) { + if (SetClosureScopeIfFound(inner_scope) || + ContainsPosition(inner_scope)) { + current = inner_scope; + break; + } + } + } + start_scope_ = current; + } + + bool SetClosureScopeIfFound(Scope* scope) { + const int start = scope->start_position(); + const int end = scope->end_position(); + if (start == break_scope_start_ && end == break_scope_end_) { + closure_scope_ = scope->AsDeclarationScope(); + return true; + } + return false; + } + + bool ContainsPosition(Scope* scope) { + const int start = scope->start_position(); + const int end = scope->end_position(); + // In case the closure_scope_ hasn't been found yet, we are less strict + // about recursing downwards. This might be the case for nested arrow + // functions that have the same end position. + const bool position_fits_end = + closure_scope_ ? position_ < end : position_ <= end; + return start < position_ && position_fits_end; + } +}; + +} // namespace + +void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) { // Catch the case when the debugger stops in an internal function. Handle<SharedFunctionInfo> shared_info(function_->shared(), isolate_); Handle<ScopeInfo> scope_info(shared_info->scope_info(), isolate_); @@ -105,7 +216,6 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) { return; } - DCHECK_NE(IGNORE_NESTED_SCOPES, option); bool ignore_nested_scopes = false; if (shared_info->HasBreakInfo() && frame_inspector_ != nullptr) { // The source position at return is always the end of the function, @@ -123,44 +233,47 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) { } // Reparse the code and analyze the scopes. - // Check whether we are in global, eval or function code. - if (scope_info->scope_type() == FUNCTION_SCOPE) { - // Inner function. + // Depending on the choosen strategy, the whole script or just + // the closure is re-parsed for function scopes. + Handle<Script> script(Script::cast(shared_info->script()), isolate_); + if (scope_info->scope_type() == FUNCTION_SCOPE && + strategy == ReparseStrategy::kFunctionLiteral) { info_ = new ParseInfo(isolate_, shared_info); } else { - // Global or eval code. - Handle<Script> script(Script::cast(shared_info->script()), isolate_); info_ = new ParseInfo(isolate_, script); - if (scope_info->scope_type() == EVAL_SCOPE) { - info_->set_eval(); - if (!context_->IsNativeContext()) { - info_->set_outer_scope_info(handle(context_->scope_info(), isolate_)); - } - // Language mode may be inherited from the eval caller. - // Retrieve it from shared function info. - info_->set_language_mode(shared_info->language_mode()); - } else if (scope_info->scope_type() == MODULE_SCOPE) { - DCHECK(info_->is_module()); - } else { - DCHECK_EQ(SCRIPT_SCOPE, scope_info->scope_type()); + info_->set_eager(); + } + + if (scope_info->scope_type() == EVAL_SCOPE || script->is_wrapped()) { + info_->set_eval(); + if (!context_->IsNativeContext()) { + info_->set_outer_scope_info(handle(context_->scope_info(), isolate_)); } + // Language mode may be inherited from the eval caller. + // Retrieve it from shared function info. + info_->set_language_mode(shared_info->language_mode()); + } else if (scope_info->scope_type() == MODULE_SCOPE) { + DCHECK(info_->is_module()); + } else { + DCHECK(scope_info->scope_type() == SCRIPT_SCOPE || + scope_info->scope_type() == FUNCTION_SCOPE); } if (parsing::ParseAny(info_, shared_info, isolate_) && Rewriter::Rewrite(info_)) { info_->ast_value_factory()->Internalize(isolate_); - closure_scope_ = info_->literal()->scope(); + DeclarationScope* literal_scope = info_->literal()->scope(); - if (option == COLLECT_NON_LOCALS) { - DCHECK(non_locals_.is_null()); - non_locals_ = info_->literal()->scope()->CollectNonLocals( - isolate_, info_, StringSet::New(isolate_)); - if (!closure_scope_->has_this_declaration() && - closure_scope_->HasThisReference()) { - non_locals_ = StringSet::Add(isolate_, non_locals_, - isolate_->factory()->this_string()); - } - } + ScopeChainRetriever scope_chain_retriever(literal_scope, function_, + GetSourcePosition()); + start_scope_ = scope_chain_retriever.StartScope(); + current_scope_ = start_scope_; + + // In case of a FUNCTION_SCOPE, the ScopeIterator expects + // {closure_scope_} to be set to the scope of the function. + closure_scope_ = scope_info->scope_type() == FUNCTION_SCOPE + ? scope_chain_retriever.ClosureScope() + : literal_scope; CHECK(DeclarationScope::Analyze(info_)); if (ignore_nested_scopes) { @@ -169,9 +282,8 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) { if (closure_scope_->NeedsContext()) { context_ = handle(context_->closure_context(), isolate_); } - } else { - RetrieveScopeChain(closure_scope_); } + UnwrapEvaluationContext(); } else { // A failed reparse indicates that the preparser has diverged from the @@ -260,6 +372,38 @@ bool ScopeIterator::HasContext() const { return !InInnerScope() || current_scope_->NeedsContext(); } +void ScopeIterator::AdvanceOneScope() { + if (current_scope_->NeedsContext()) { + DCHECK(!context_->previous().is_null()); + context_ = handle(context_->previous(), isolate_); + } + DCHECK(current_scope_->outer_scope() != nullptr); + current_scope_ = current_scope_->outer_scope(); +} + +void ScopeIterator::AdvanceToNonHiddenScope() { + do { + AdvanceOneScope(); + } while (current_scope_->is_hidden()); +} + +void ScopeIterator::AdvanceContext() { + DCHECK(!context_->IsNativeContext()); + context_ = handle(context_->previous(), isolate_); + + // While advancing one context, we need to advance at least one + // scope, but until we hit the next scope that actually requires + // a context. All the locals collected along the way build the + // blacklist for debug-evaluate for this context. + locals_ = StringSet::New(isolate_); + do { + if (!current_scope_ || !current_scope_->outer_scope()) break; + + current_scope_ = current_scope_->outer_scope(); + CollectLocalsFromCurrentScope(); + } while (!current_scope_->NeedsContext()); +} + void ScopeIterator::Next() { DCHECK(!Done()); @@ -283,19 +427,17 @@ void ScopeIterator::Next() { context_ = handle(context_->previous(), isolate_); } } else if (!inner) { - DCHECK(!context_->IsNativeContext()); - context_ = handle(context_->previous(), isolate_); + AdvanceContext(); } else { DCHECK_NOT_NULL(current_scope_); - do { - if (current_scope_->NeedsContext()) { - DCHECK(!context_->previous().is_null()); - context_ = handle(context_->previous(), isolate_); - } - DCHECK_IMPLIES(InInnerScope(), current_scope_->outer_scope() != nullptr); - current_scope_ = current_scope_->outer_scope(); - // Repeat to skip hidden scopes. - } while (current_scope_->is_hidden()); + AdvanceToNonHiddenScope(); + + if (!InInnerScope() && current_scope_ != closure_scope_) { + // Edge case when we just go past {closure_scope_}. This case + // already needs to start collecting locals for the blacklist. + locals_ = StringSet::New(isolate_); + CollectLocalsFromCurrentScope(); + } } UnwrapEvaluationContext(); @@ -453,7 +595,20 @@ bool ScopeIterator::SetVariableValue(Handle<String> name, return false; } -Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; } +bool ScopeIterator::ClosureScopeHasThisReference() const { + return !closure_scope_->has_this_declaration() && + closure_scope_->HasThisReference(); +} + +void ScopeIterator::CollectLocalsFromCurrentScope() { + DCHECK(locals_->IsStringSet()); + for (Variable* var : *current_scope_->locals()) { + if (var->location() == VariableLocation::PARAMETER || + var->location() == VariableLocation::LOCAL) { + locals_ = StringSet::Add(isolate_, locals_, var->name()); + } + } +} #ifdef DEBUG // Debug print of the content of the current scope. @@ -524,31 +679,6 @@ int ScopeIterator::GetSourcePosition() { } } -void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) { - DCHECK_NOT_NULL(scope); - - const int position = GetSourcePosition(); - - Scope* parent = nullptr; - Scope* current = scope; - while (parent != current) { - parent = current; - for (Scope* inner_scope = current->inner_scope(); inner_scope != nullptr; - inner_scope = inner_scope->sibling()) { - int beg_pos = inner_scope->start_position(); - int end_pos = inner_scope->end_position(); - DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden()); - if (beg_pos < position && position < end_pos) { - current = inner_scope; - break; - } - } - } - - start_scope_ = current; - current_scope_ = current; -} - void ScopeIterator::VisitScriptScope(const Visitor& visitor) const { Handle<JSGlobalObject> global(context_->global_object(), isolate_); Handle<ScriptContextTable> script_contexts( @@ -884,9 +1014,10 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name, VariableMode mode; InitializationFlag flag; MaybeAssignedFlag maybe_assigned_flag; + IsStaticFlag is_static_flag; int slot_index = ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode, - &flag, &maybe_assigned_flag); + &flag, &maybe_assigned_flag, &is_static_flag); if (slot_index < 0) return false; context_->set(slot_index, *new_value); diff --git a/chromium/v8/src/debug/debug-scopes.h b/chromium/v8/src/debug/debug-scopes.h index 5c3361619a2..f53457ef46e 100644 --- a/chromium/v8/src/debug/debug-scopes.h +++ b/chromium/v8/src/debug/debug-scopes.h @@ -41,10 +41,13 @@ class ScopeIterator { static const int kScopeDetailsFunctionIndex = 5; static const int kScopeDetailsSize = 6; - enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS }; + enum class ReparseStrategy { + kScript, + kFunctionLiteral, + }; ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector, - Option options = DEFAULT); + ReparseStrategy strategy); ScopeIterator(Isolate* isolate, Handle<JSFunction> function); ScopeIterator(Isolate* isolate, Handle<JSGeneratorObject> generator); @@ -77,8 +80,10 @@ class ScopeIterator { // Set variable value and return true on success. bool SetVariableValue(Handle<String> variable_name, Handle<Object> new_value); + bool ClosureScopeHasThisReference() const; + // Populate the set with collected non-local variable names. - Handle<StringSet> GetNonLocals(); + Handle<StringSet> GetLocals() { return locals_; } // Similar to JSFunction::GetName return the function's name or it's inferred // name. @@ -110,7 +115,7 @@ class ScopeIterator { Handle<JSFunction> function_; Handle<Context> context_; Handle<Script> script_; - Handle<StringSet> non_locals_; + Handle<StringSet> locals_; DeclarationScope* closure_scope_ = nullptr; Scope* start_scope_ = nullptr; Scope* current_scope_ = nullptr; @@ -120,11 +125,14 @@ class ScopeIterator { return frame_inspector_->javascript_frame(); } - int GetSourcePosition(); + void AdvanceOneScope(); + void AdvanceToNonHiddenScope(); + void AdvanceContext(); + void CollectLocalsFromCurrentScope(); - void TryParseAndRetrieveScopes(ScopeIterator::Option option); + int GetSourcePosition(); - void RetrieveScopeChain(DeclarationScope* scope); + void TryParseAndRetrieveScopes(ReparseStrategy strategy); void UnwrapEvaluationContext(); diff --git a/chromium/v8/src/debug/debug-stack-trace-iterator.cc b/chromium/v8/src/debug/debug-stack-trace-iterator.cc index 4f691e63a22..f9986885b50 100644 --- a/chromium/v8/src/debug/debug-stack-trace-iterator.cc +++ b/chromium/v8/src/debug/debug-stack-trace-iterator.cc @@ -87,25 +87,28 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const { // Arrow function defined in top level function without references to // variables may have NativeContext as context. if (!context->IsFunctionContext()) return v8::MaybeLocal<v8::Value>(); - ScopeIterator scope_iterator(isolate_, frame_inspector_.get(), - ScopeIterator::COLLECT_NON_LOCALS); + ScopeIterator scope_iterator( + isolate_, frame_inspector_.get(), + ScopeIterator::ReparseStrategy::kFunctionLiteral); // We lookup this variable in function context only when it is used in arrow // function otherwise V8 can optimize it out. - if (!scope_iterator.GetNonLocals()->Has(isolate_, - isolate_->factory()->this_string())) + if (!scope_iterator.ClosureScopeHasThisReference()) { return v8::MaybeLocal<v8::Value>(); + } DisallowHeapAllocation no_gc; VariableMode mode; InitializationFlag flag; MaybeAssignedFlag maybe_assigned_flag; + IsStaticFlag is_static_flag; int slot_index = ScopeInfo::ContextSlotIndex( context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(), - &mode, &flag, &maybe_assigned_flag); + &mode, &flag, &maybe_assigned_flag, &is_static_flag); if (slot_index < 0) return v8::MaybeLocal<v8::Value>(); Handle<Object> value = handle(context->get(slot_index), isolate_); if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>(); return Utils::ToLocal(value); } + Handle<Object> value = frame_inspector_->GetReceiver(); if (value.is_null() || (value->IsSmi() || !value->IsTheHole(isolate_))) { return Utils::ToLocal(value); diff --git a/chromium/v8/src/debug/debug-stack-trace-iterator.h b/chromium/v8/src/debug/debug-stack-trace-iterator.h index 15b8a85c5e8..3319bc15f50 100644 --- a/chromium/v8/src/debug/debug-stack-trace-iterator.h +++ b/chromium/v8/src/debug/debug-stack-trace-iterator.h @@ -5,6 +5,8 @@ #ifndef V8_DEBUG_DEBUG_STACK_TRACE_ITERATOR_H_ #define V8_DEBUG_DEBUG_STACK_TRACE_ITERATOR_H_ +#include <memory> + #include "src/debug/debug-frames.h" #include "src/debug/debug-interface.h" #include "src/execution/frames.h" diff --git a/chromium/v8/src/debug/debug-type-profile.h b/chromium/v8/src/debug/debug-type-profile.h index 16f739e4536..f06af0c4713 100644 --- a/chromium/v8/src/debug/debug-type-profile.h +++ b/chromium/v8/src/debug/debug-type-profile.h @@ -5,6 +5,7 @@ #ifndef V8_DEBUG_DEBUG_TYPE_PROFILE_H_ #define V8_DEBUG_DEBUG_TYPE_PROFILE_H_ +#include <memory> #include <vector> #include "src/debug/debug-interface.h" diff --git a/chromium/v8/src/debug/debug.cc b/chromium/v8/src/debug/debug.cc index aa308150acb..27f30d8c058 100644 --- a/chromium/v8/src/debug/debug.cc +++ b/chromium/v8/src/debug/debug.cc @@ -622,9 +622,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script, Handle<BreakPoint> break_point = isolate_->factory()->NewBreakPoint(*id, condition); if (script->type() == Script::TYPE_WASM) { - Handle<WasmModuleObject> module_object( - WasmModuleObject::cast(script->wasm_module_object()), isolate_); - return WasmModuleObject::SetBreakPoint(module_object, source_position, + return WasmModuleObject::SetBreakPoint(script, source_position, break_point); } @@ -1039,7 +1037,7 @@ void Debug::PrepareStep(StepAction step_action) { // and deoptimize every frame along the way. bool in_current_frame = true; for (; !frames_it.done(); frames_it.Advance()) { - // TODO(clemensh): Implement stepping out from JS to wasm. + // TODO(clemensb): Implement stepping out from JS to wasm. if (frames_it.frame()->is_wasm()) continue; JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame()); if (last_step_action() == StepIn) { @@ -1069,7 +1067,7 @@ void Debug::PrepareStep(StepAction step_action) { thread_local_.target_frame_count_ = current_frame_count; V8_FALLTHROUGH; case StepIn: - // TODO(clemensh): Implement stepping from JS into wasm. + // TODO(clemensb): Implement stepping from JS into wasm. FloodWithOneShot(shared); break; } @@ -1171,7 +1169,7 @@ void Debug::PrepareFunctionForDebugExecution( if (debug_info->flags() & DebugInfo::kPreparedForDebugExecution) return; // Make a copy of the bytecode array if available. - Handle<Object> maybe_original_bytecode_array = + Handle<HeapObject> maybe_original_bytecode_array = isolate_->factory()->undefined_value(); if (shared->HasBytecodeArray()) { Handle<BytecodeArray> original_bytecode_array = @@ -1250,7 +1248,7 @@ void Debug::InstallDebugBreakTrampoline() { JSObject object = JSObject::cast(obj); DescriptorArray descriptors = object.map().instance_descriptors(); - for (int i = 0; i < object.map().NumberOfOwnDescriptors(); ++i) { + for (InternalIndex i : object.map().IterateOwnDescriptors()) { if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) { Object value = descriptors.GetStrongValue(i); if (!value.IsAccessorPair()) continue; @@ -1901,6 +1899,7 @@ bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) { bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source, bool preview, debug::LiveEditResult* result) { DebugScope debug_scope(this); + feature_tracker()->Track(DebugFeatureTracker::kLiveEdit); running_live_edit_ = true; LiveEdit::PatchScript(isolate_, script, source, preview, result); running_live_edit_ = false; @@ -1968,11 +1967,11 @@ void Debug::UpdateState() { if (is_active) { // Note that the debug context could have already been loaded to // bootstrap test cases. - isolate_->compilation_cache()->Disable(); + isolate_->compilation_cache()->DisableScriptAndEval(); is_active = true; feature_tracker()->Track(DebugFeatureTracker::kActive); } else { - isolate_->compilation_cache()->Enable(); + isolate_->compilation_cache()->EnableScriptAndEval(); Unload(); } is_active_ = is_active; diff --git a/chromium/v8/src/debug/debug.h b/chromium/v8/src/debug/debug.h index eef89f93725..73bcceb4a23 100644 --- a/chromium/v8/src/debug/debug.h +++ b/chromium/v8/src/debug/debug.h @@ -5,6 +5,7 @@ #ifndef V8_DEBUG_DEBUG_H_ #define V8_DEBUG_DEBUG_H_ +#include <memory> #include <vector> #include "src/codegen/source-position-table.h" diff --git a/chromium/v8/src/debug/interface-types.h b/chromium/v8/src/debug/interface-types.h index 2375827b1bf..a4204bb739c 100644 --- a/chromium/v8/src/debug/interface-types.h +++ b/chromium/v8/src/debug/interface-types.h @@ -129,7 +129,7 @@ class ConsoleCallArguments : private v8::FunctionCallbackInfo<v8::Value> { } explicit ConsoleCallArguments(const v8::FunctionCallbackInfo<v8::Value>&); - explicit ConsoleCallArguments(internal::BuiltinArguments&); + explicit ConsoleCallArguments(const internal::BuiltinArguments&); }; class ConsoleContext { diff --git a/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc index 2befb70264a..45ff06eb701 100644 --- a/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc +++ b/chromium/v8/src/deoptimizer/arm/deoptimizer-arm.cc @@ -123,6 +123,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ vstr(d0, r1, dst_offset); } + // Mark the stack as not iterable for the CPU profiler which won't be able to + // walk the stack without the return address. + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register zero = r4; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ mov(zero, Operand(0)); + __ strb(zero, MemOperand(is_iterable)); + } + // Remove the saved registers from the stack. __ add(sp, sp, Operand(kSavedRegistersAreaSize)); @@ -209,6 +220,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // Restore the registers from the stack. __ ldm(ia_w, sp, restored_regs); // all but pc registers. + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register one = r4; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ mov(one, Operand(1)); + __ strb(one, MemOperand(is_iterable)); + } + // Remove sp, lr and pc. __ Drop(3); { @@ -218,6 +238,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ pop(lr); __ Jump(scratch); } + __ stop(); } diff --git a/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc index 82ae764e506..17091259d6e 100644 --- a/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc +++ b/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc @@ -189,6 +189,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(), saved_double_registers, x2, x3, kDoubleRegistersOffset); + // Mark the stack as not iterable for the CPU profiler which won't be able to + // walk the stack without the return address. + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.AcquireX(); + __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ strb(xzr, MemOperand(is_iterable)); + } + // Remove the saved registers from the stack. DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0); __ Drop(kSavedRegistersAreaSize / kXRegSize); @@ -251,6 +260,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, RestoreRegList(masm, saved_double_registers, x1, FrameDescription::double_registers_offset()); + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.AcquireX(); + Register one = x4; + __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ Mov(one, Operand(1)); + __ strb(one, MemOperand(is_iterable)); + } + // TODO(all): ARM copies a lot (if not all) of the last output frame onto the // stack, then pops it all into registers. Here, we try to load it directly // into the relevant registers. Is this correct? If so, we should improve the diff --git a/chromium/v8/src/deoptimizer/deoptimize-reason.h b/chromium/v8/src/deoptimizer/deoptimize-reason.h index ac2273460a3..71eaa8b6267 100644 --- a/chromium/v8/src/deoptimizer/deoptimize-reason.h +++ b/chromium/v8/src/deoptimizer/deoptimize-reason.h @@ -48,7 +48,6 @@ namespace internal { V(NotASymbol, "not a Symbol") \ V(OutOfBounds, "out of bounds") \ V(Overflow, "overflow") \ - V(ReceiverNotAGlobalProxy, "receiver was not a global proxy") \ V(Smi, "Smi") \ V(Unknown, "(unknown)") \ V(ValueMismatch, "value mismatch") \ diff --git a/chromium/v8/src/deoptimizer/deoptimizer.cc b/chromium/v8/src/deoptimizer/deoptimizer.cc index 64551c68996..fcb4c27d0b7 100644 --- a/chromium/v8/src/deoptimizer/deoptimizer.cc +++ b/chromium/v8/src/deoptimizer/deoptimizer.cc @@ -357,6 +357,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) { for (Code code : codes) { isolate->heap()->InvalidateCodeDeoptimizationData(code); } + + native_context.GetOSROptimizedCodeCache().EvictMarkedCode( + native_context.GetIsolate()); } void Deoptimizer::DeoptimizeAll(Isolate* isolate) { @@ -375,6 +378,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) { while (!context.IsUndefined(isolate)) { NativeContext native_context = NativeContext::cast(context); MarkAllCodeForContext(native_context); + OSROptimizedCodeCache::Clear(native_context); DeoptimizeMarkedCodeForContext(native_context); context = native_context.next_context_link(); } @@ -432,6 +436,13 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { code.set_deopt_already_counted(true); } DeoptimizeMarkedCodeForContext(function.context().native_context()); + // TODO(mythria): Ideally EvictMarkCode should compact the cache without + // having to explicitly call this. We don't do this currently because + // compacting causes GC and DeoptimizeMarkedCodeForContext uses raw + // pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove + // this call from here. + OSROptimizedCodeCache::Compact( + Handle<NativeContext>(function.context().native_context(), isolate)); } } @@ -3640,8 +3651,7 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked( // Set markers for the double properties. Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); - int field_count = map->NumberOfOwnDescriptors(); - for (int i = 0; i < field_count; i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { FieldIndex index = FieldIndex::ForDescriptor(*map, i); if (descriptors->GetDetails(i).representation().IsDouble() && !index.is_inobject()) { @@ -3673,10 +3683,9 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot, Handle<ByteArray> object_storage = AllocateStorageFor(slot); // Now we handle the interesting (JSObject) case. Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate()); - int field_count = map->NumberOfOwnDescriptors(); // Set markers for the double properties. - for (int i = 0; i < field_count; i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { FieldIndex index = FieldIndex::ForDescriptor(*map, i); if (descriptors->GetDetails(i).representation().IsDouble() && index.is_inobject()) { @@ -3712,8 +3721,7 @@ void TranslatedState::InitializeJSObjectAt( CHECK_GE(slot->GetChildrenCount(), 2); // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange( - *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation); + isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); // Fill the property array field. { @@ -3772,8 +3780,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( } // Notify the concurrent marker about the layout change. - isolate()->heap()->NotifyObjectLayoutChange( - *object_storage, slot->GetChildrenCount() * kTaggedSize, no_allocation); + isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_allocation); // Write the fields to the object. for (int i = 1; i < slot->GetChildrenCount(); i++) { diff --git a/chromium/v8/src/deoptimizer/deoptimizer.h b/chromium/v8/src/deoptimizer/deoptimizer.h index 6d0a350aace..beb2a9aa50e 100644 --- a/chromium/v8/src/deoptimizer/deoptimizer.h +++ b/chromium/v8/src/deoptimizer/deoptimizer.h @@ -488,14 +488,14 @@ class Deoptimizer : public Malloced { DeoptimizeKind* type); // Code generation support. - static int input_offset() { return OFFSET_OF(Deoptimizer, input_); } + static int input_offset() { return offsetof(Deoptimizer, input_); } static int output_count_offset() { - return OFFSET_OF(Deoptimizer, output_count_); + return offsetof(Deoptimizer, output_count_); } - static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } + static int output_offset() { return offsetof(Deoptimizer, output_); } static int caller_frame_top_offset() { - return OFFSET_OF(Deoptimizer, caller_frame_top_); + return offsetof(Deoptimizer, caller_frame_top_); } V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate); @@ -731,11 +731,11 @@ class FrameDescription { int parameter_count() { return parameter_count_; } static int registers_offset() { - return OFFSET_OF(FrameDescription, register_values_.registers_); + return offsetof(FrameDescription, register_values_.registers_); } static int double_registers_offset() { - return OFFSET_OF(FrameDescription, register_values_.double_registers_); + return offsetof(FrameDescription, register_values_.double_registers_); } static int frame_size_offset() { diff --git a/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc index 864e9dbe368..4036b73443b 100644 --- a/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc +++ b/chromium/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc @@ -113,6 +113,17 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ stfd(d0, MemOperand(r4, dst_offset)); } + // Mark the stack as not iterable for the CPU profiler which won't be able to + // walk the stack without the return address. + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register zero = r7; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ li(zero, Operand(0)); + __ stb(zero, MemOperand(is_iterable)); + } + // Remove the saved registers from the stack. __ addi(sp, sp, Operand(kSavedRegistersAreaSize)); @@ -210,12 +221,22 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, { UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register one = r7; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ li(one, Operand(1)); + __ stb(one, MemOperand(is_iterable)); + } + + { + UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ pop(scratch); // get continuation, leave pc on stack __ pop(r0); __ mtlr(r0); __ Jump(scratch); } + __ stop(); } diff --git a/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc index 616a57ba0e4..7ea6e56b8cf 100644 --- a/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc +++ b/chromium/v8/src/deoptimizer/s390/deoptimizer-s390.cc @@ -40,7 +40,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, } // Push all GPRs onto the stack - __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize)); + __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize)); __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers __ mov(r1, Operand(ExternalReference::Create( @@ -48,7 +48,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ StoreP(fp, MemOperand(r1)); const int kSavedRegistersAreaSize = - (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; + (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize; // The bailout id is passed using r10 __ LoadRR(r4, r10); @@ -79,7 +79,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // r6: Fp-to-sp delta. // Parm6: isolate is passed on the stack. __ mov(r7, Operand(ExternalReference::isolate_address(isolate))); - __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize)); // Call Deoptimizer::New(). { @@ -94,13 +94,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // Copy core registers into FrameDescription::registers_[kNumRegisters]. // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); // __ mvc(MemOperand(r3, FrameDescription::registers_offset()), - // MemOperand(sp), kNumberOfRegisters * kPointerSize); + // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize); // Copy core registers into FrameDescription::registers_[kNumRegisters]. // TODO(john.yan): optimize the following code by using mvc instruction DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); for (int i = 0; i < kNumberOfRegisters; i++) { - int offset = (i * kPointerSize) + FrameDescription::registers_offset(); - __ LoadP(r4, MemOperand(sp, i * kPointerSize)); + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); + __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize)); __ StoreP(r4, MemOperand(r3, offset)); } @@ -110,12 +111,24 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { int code = config->GetAllocatableDoubleCode(i); int dst_offset = code * kDoubleSize + double_regs_offset; - int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; + int src_offset = + code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize; // TODO(joransiu): MVC opportunity __ LoadDouble(d0, MemOperand(sp, src_offset)); __ StoreDouble(d0, MemOperand(r3, dst_offset)); } + // Mark the stack as not iterable for the CPU profiler which won't be able to + // walk the stack without the return address. + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register zero = r6; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ lhi(zero, Operand(0)); + __ StoreByte(zero, MemOperand(is_iterable)); + } + // Remove the saved registers from the stack. __ la(sp, MemOperand(sp, kSavedRegistersAreaSize)); @@ -134,7 +147,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ bind(&pop_loop); __ pop(r6); __ StoreP(r6, MemOperand(r5, 0)); - __ la(r5, MemOperand(r5, kPointerSize)); + __ la(r5, MemOperand(r5, kSystemPointerSize)); __ bind(&pop_loop_header); __ CmpP(r4, sp); __ bne(&pop_loop); @@ -158,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // r3 = one past the last FrameDescription**. __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset())); __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_. - __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2)); __ AddP(r3, r6, r3); __ b(&outer_loop_header, Label::kNear); @@ -178,7 +191,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, __ CmpP(r5, Operand::Zero()); __ bne(&inner_push_loop); // test for gt? - __ AddP(r6, r6, Operand(kPointerSize)); + __ AddP(r6, r6, Operand(kSystemPointerSize)); __ bind(&outer_loop_header); __ CmpP(r6, r3); __ blt(&outer_push_loop); @@ -200,15 +213,26 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // Restore the registers from the last output frame. __ LoadRR(r1, r4); for (int i = kNumberOfRegisters - 1; i > 0; i--) { - int offset = (i * kPointerSize) + FrameDescription::registers_offset(); + int offset = + (i * kSystemPointerSize) + FrameDescription::registers_offset(); if ((restored_regs & (1 << i)) != 0) { __ LoadP(ToRegister(i), MemOperand(r1, offset)); } } + { + UseScratchRegisterScope temps(masm); + Register is_iterable = temps.Acquire(); + Register one = r6; + __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate)); + __ lhi(one, Operand(1)); + __ StoreByte(one, MemOperand(is_iterable)); + } + __ pop(ip); // get continuation, leave pc on stack __ pop(r14); __ Jump(ip); + __ stop(); } diff --git a/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc index 29c81f195c1..03d7c759c09 100644 --- a/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc +++ b/chromium/v8/src/deoptimizer/x64/deoptimizer-x64.cc @@ -81,7 +81,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, // On windows put the arguments on the stack (PrepareCallCFunction // has created space for this). On linux pass the arguments in r8 and r9. -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5); __ LoadAddress(arg5, ExternalReference::isolate_address(isolate)); __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5); diff --git a/chromium/v8/src/diagnostics/arm/disasm-arm.cc b/chromium/v8/src/diagnostics/arm/disasm-arm.cc index 51b6594e70e..891ab0662e0 100644 --- a/chromium/v8/src/diagnostics/arm/disasm-arm.cc +++ b/chromium/v8/src/diagnostics/arm/disasm-arm.cc @@ -590,7 +590,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { } case 't': { // 'target: target of branch instructions DCHECK(STRING_STARTS_WITH(format, "target")); - int off = (instr->SImmed24Value() << 2) + 8; + int off = (static_cast<uint32_t>(instr->SImmed24Value()) << 2) + 8u; out_buffer_pos_ += SNPrintF( out_buffer_ + out_buffer_pos_, "%+d -> %s", off, converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off)); @@ -1890,6 +1890,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) { op, size, Vd, Vn, Vm); break; } + case 0x4: { + if (instr->Bit(4) == 0) { + // vshl.s<size> Qd, Qm, Qn. + out_buffer_pos_ += + SNPrintF(out_buffer_ + out_buffer_pos_, + "vshl.s%d q%d, q%d, q%d", size, Vd, Vm, Vn); + } else { + Unknown(instr); + } + break; + } case 0x6: { // vmin/vmax.s<size> Qd, Qm, Qn. const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax"; @@ -2083,6 +2094,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) { op, size, Vd, Vn, Vm); break; } + case 0x4: { + if (instr->Bit(4) == 0) { + // vshl.u<size> Qd, Qm, Qn. + out_buffer_pos_ += + SNPrintF(out_buffer_ + out_buffer_pos_, + "vshl.u%d q%d, q%d, q%d", size, Vd, Vm, Vn); + } else { + Unknown(instr); + } + break; + } case 0x6: { // vmin/vmax.u<size> Qd, Qm, Qn. const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax"; diff --git a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc index 7141cdf2837..db14689ad1c 100644 --- a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc +++ b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc @@ -3840,8 +3840,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, case 'L': { switch (format[2]) { case 'L': { // ILLiteral - Immediate Load Literal. - AppendToOutput("pc%+" PRId32, instr->ImmLLiteral() - << kLoadLiteralScaleLog2); + AppendToOutput("pc%+" PRId32, + instr->ImmLLiteral() * kLoadLiteralScale); return 9; } case 'S': { // ILS - Immediate Load/Store. @@ -3960,7 +3960,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, unsigned rd_index, rn_index; unsigned imm5 = instr->ImmNEON5(); unsigned imm4 = instr->ImmNEON4(); - int tz = CountTrailingZeros(imm5, 32); + int tz = base::bits::CountTrailingZeros(imm5); if (tz <= 3) { // Defined for 0 <= tz <= 3 only. rd_index = imm5 >> (tz + 1); rn_index = imm4 >> tz; @@ -4179,7 +4179,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr, default: UNREACHABLE(); } - offset <<= kInstrSizeLog2; + offset *= kInstrSize; char sign = '+'; if (offset < 0) { sign = '-'; diff --git a/chromium/v8/src/diagnostics/basic-block-profiler.h b/chromium/v8/src/diagnostics/basic-block-profiler.h index 960b4b43e11..9639e0b6615 100644 --- a/chromium/v8/src/diagnostics/basic-block-profiler.h +++ b/chromium/v8/src/diagnostics/basic-block-profiler.h @@ -7,6 +7,7 @@ #include <iosfwd> #include <list> +#include <memory> #include <string> #include <vector> diff --git a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc index e8c9588bbe4..ead0a5a7094 100644 --- a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc +++ b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc @@ -1057,6 +1057,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) { AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); break; + case 0x51: + AppendToBuffer("vsqrtps %s,", NameOfXMMRegister(regop)); + current += PrintRightXMMOperand(current); + break; case 0x52: AppendToBuffer("vrsqrtps %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); @@ -1075,6 +1079,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) { NameOfXMMRegister(vvvv)); current += PrintRightXMMOperand(current); break; + case 0x56: + AppendToBuffer("vorps %s,%s,", NameOfXMMRegister(regop), + NameOfXMMRegister(vvvv)); + current += PrintRightXMMOperand(current); + break; case 0x57: AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); @@ -1138,11 +1147,25 @@ int DisassemblerIA32::AVXInstruction(byte* data) { int mod, regop, rm, vvvv = vex_vreg(); get_modrm(*current, &mod, ®op, &rm); switch (opcode) { + case 0x28: + AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop)); + current += PrintRightXMMOperand(current); + break; case 0x54: AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); current += PrintRightXMMOperand(current); break; + case 0x55: + AppendToBuffer("vandnpd %s,%s,", NameOfXMMRegister(regop), + NameOfXMMRegister(vvvv)); + current += PrintRightXMMOperand(current); + break; + case 0x56: + AppendToBuffer("vorpd %s,%s,", NameOfXMMRegister(regop), + NameOfXMMRegister(vvvv)); + current += PrintRightXMMOperand(current); + break; case 0x57: AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); @@ -1200,11 +1223,26 @@ int DisassemblerIA32::AVXInstruction(byte* data) { current++; AppendToBuffer(",%u", *current++); break; + case 0x73: + AppendToBuffer("vps%sq %s,%s", sf_str[regop / 2], + NameOfXMMRegister(vvvv), NameOfXMMRegister(rm)); + current++; + AppendToBuffer(",%u", *current++); + break; case 0x7E: AppendToBuffer("vmovd "); current += PrintRightOperand(current); AppendToBuffer(",%s", NameOfXMMRegister(regop)); break; + case 0xC2: { + const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"}; + AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop), + NameOfXMMRegister(vvvv)); + current += PrintRightXMMOperand(current); + AppendToBuffer(", (%s)", pseudo_op[*current]); + current++; + break; + } case 0xC4: AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); @@ -1212,6 +1250,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) { AppendToBuffer(",%d", Imm8(current)); current++; break; + case 0xC6: + AppendToBuffer("vshufpd %s,%s,", NameOfXMMRegister(regop), + NameOfXMMRegister(vvvv)); + current += PrintRightXMMOperand(current); + AppendToBuffer(",%d", Imm8(current)); + current++; + break; #define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \ case 0x##opcode: { \ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \ @@ -1763,17 +1808,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, get_modrm(*data, &mod, ®op, &rm); AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop)); data += PrintRightXMMOperand(data); - } else if (f0byte >= 0x52 && f0byte <= 0x5F) { + } else if (f0byte >= 0x51 && f0byte <= 0x5F) { const char* const pseudo_op[] = { - "rsqrtps", "rcpps", "andps", "andnps", "orps", - "xorps", "addps", "mulps", "cvtps2pd", "cvtdq2ps", - "subps", "minps", "divps", "maxps", + "sqrtps", "rsqrtps", "rcpps", "andps", "andnps", + "orps", "xorps", "addps", "mulps", "cvtps2pd", + "cvtdq2ps", "subps", "minps", "divps", "maxps", }; data += 2; int mod, regop, rm; get_modrm(*data, &mod, ®op, &rm); - AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x52], + AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x51], NameOfXMMRegister(regop)); data += PrintRightXMMOperand(data); } else if (f0byte == 0x50) { @@ -2026,7 +2071,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, data += 2; } else if (*data == 0x0F) { data++; - if (*data == 0x38) { + if (*data == 0x28) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movapd %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (*data == 0x38) { data++; byte op = *data; data++; @@ -2160,27 +2211,31 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, AppendToBuffer("movmskpd %s,%s", NameOfCPURegister(regop), NameOfXMMRegister(rm)); data++; - } else if (*data == 0x54) { - data++; - int mod, regop, rm; - get_modrm(*data, &mod, ®op, &rm); - AppendToBuffer("andpd %s,%s", NameOfXMMRegister(regop), - NameOfXMMRegister(rm)); - data++; - } else if (*data == 0x56) { + } else if (*data >= 0x54 && *data <= 0x59) { + const char* const pseudo_op[] = { + "andpd", "andnpd", "orpd", "xorpd", "addpd", "mulpd", + }; + byte op = *data; data++; int mod, regop, rm; get_modrm(*data, &mod, ®op, &rm); - AppendToBuffer("orpd %s,%s", NameOfXMMRegister(regop), - NameOfXMMRegister(rm)); - data++; - } else if (*data == 0x57) { + AppendToBuffer("%s %s,", pseudo_op[op - 0x54], + NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (*data >= 0x5c && *data <= 0x5f) { + const char* const pseudo_op[] = { + "subpd", + "minpd", + "divpd", + "maxpd", + }; + byte op = *data; data++; int mod, regop, rm; get_modrm(*data, &mod, ®op, &rm); - AppendToBuffer("xorpd %s,%s", NameOfXMMRegister(regop), - NameOfXMMRegister(rm)); - data++; + AppendToBuffer("%s %s,", pseudo_op[op - 0x5c], + NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); } else if (*data == 0x6E) { data++; int mod, regop, rm; @@ -2257,6 +2312,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, AppendToBuffer("movd "); data += PrintRightOperand(data); AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (*data == 0xC2) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"}; + AppendToBuffer("cmppd %s, ", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + AppendToBuffer(", (%s)", pseudo_op[*data]); + data++; } else if (*data == 0xC4) { data++; int mod, regop, rm; @@ -2265,6 +2329,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, data += PrintRightOperand(data); AppendToBuffer(",%d", Imm8(data)); data++; + } else if (*data == 0xC6) { + // shufpd xmm, xmm/m128, imm8 + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("shufpd %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + AppendToBuffer(",%d", Imm8(data)); + data++; } else if (*data == 0xE7) { data++; int mod, regop, rm; diff --git a/chromium/v8/src/diagnostics/objects-debug.cc b/chromium/v8/src/diagnostics/objects-debug.cc index 6860ead0223..11f8a075e35 100644 --- a/chromium/v8/src/diagnostics/objects-debug.cc +++ b/chromium/v8/src/diagnostics/objects-debug.cc @@ -26,6 +26,7 @@ #include "src/objects/field-type.h" #include "src/objects/foreign-inl.h" #include "src/objects/free-space-inl.h" +#include "src/objects/function-kind.h" #include "src/objects/hash-table-inl.h" #include "src/objects/js-array-inl.h" #include "src/objects/layout-descriptor.h" @@ -258,25 +259,25 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) { case JS_CONTEXT_EXTENSION_OBJECT_TYPE: JSObject::cast(*this).JSObjectVerify(isolate); break; - case WASM_MODULE_TYPE: + case WASM_MODULE_OBJECT_TYPE: WasmModuleObject::cast(*this).WasmModuleObjectVerify(isolate); break; - case WASM_TABLE_TYPE: + case WASM_TABLE_OBJECT_TYPE: WasmTableObject::cast(*this).WasmTableObjectVerify(isolate); break; - case WASM_MEMORY_TYPE: + case WASM_MEMORY_OBJECT_TYPE: WasmMemoryObject::cast(*this).WasmMemoryObjectVerify(isolate); break; - case WASM_GLOBAL_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: WasmGlobalObject::cast(*this).WasmGlobalObjectVerify(isolate); break; - case WASM_EXCEPTION_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: WasmExceptionObject::cast(*this).WasmExceptionObjectVerify(isolate); break; - case WASM_INSTANCE_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: WasmInstanceObject::cast(*this).WasmInstanceObjectVerify(isolate); break; - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: JSArgumentsObject::cast(*this).JSArgumentsObjectVerify(isolate); break; case JS_GENERATOR_OBJECT_TYPE: @@ -365,10 +366,10 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) { case JS_PROMISE_TYPE: JSPromise::cast(*this).JSPromiseVerify(isolate); break; - case JS_REGEXP_TYPE: + case JS_REG_EXP_TYPE: JSRegExp::cast(*this).JSRegExpVerify(isolate); break; - case JS_REGEXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorVerify(isolate); break; case FILLER_TYPE: @@ -425,34 +426,34 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) { CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate); break; #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: JSV8BreakIterator::cast(*this).JSV8BreakIteratorVerify(isolate); break; - case JS_INTL_COLLATOR_TYPE: + case JS_COLLATOR_TYPE: JSCollator::cast(*this).JSCollatorVerify(isolate); break; - case JS_INTL_DATE_TIME_FORMAT_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: JSDateTimeFormat::cast(*this).JSDateTimeFormatVerify(isolate); break; - case JS_INTL_LIST_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: JSListFormat::cast(*this).JSListFormatVerify(isolate); break; - case JS_INTL_LOCALE_TYPE: + case JS_LOCALE_TYPE: JSLocale::cast(*this).JSLocaleVerify(isolate); break; - case JS_INTL_NUMBER_FORMAT_TYPE: + case JS_NUMBER_FORMAT_TYPE: JSNumberFormat::cast(*this).JSNumberFormatVerify(isolate); break; - case JS_INTL_PLURAL_RULES_TYPE: + case JS_PLURAL_RULES_TYPE: JSPluralRules::cast(*this).JSPluralRulesVerify(isolate); break; - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatVerify(isolate); break; - case JS_INTL_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: JSSegmentIterator::cast(*this).JSSegmentIteratorVerify(isolate); break; - case JS_INTL_SEGMENTER_TYPE: + case JS_SEGMENTER_TYPE: JSSegmenter::cast(*this).JSSegmenterVerify(isolate); break; #endif // V8_INTL_SUPPORT @@ -512,8 +513,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) { USE_TORQUE_VERIFIER(FreeSpace) -USE_TORQUE_VERIFIER(FeedbackCell) - void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) { TorqueGeneratedClassVerifiers::FeedbackVectorVerify(*this, isolate); MaybeObject code = optimized_code_weak_or_smi(); @@ -590,7 +589,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) { bool is_transitionable_fast_elements_kind = IsTransitionableFastElementsKind(map().elements_kind()); - for (int i = 0; i < map().NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map().IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(i); if (details.location() == kField) { DCHECK_EQ(kData, details.kind()); @@ -653,8 +652,33 @@ void Map::MapVerify(Isolate* isolate) { CHECK(instance_size() == kVariableSizeSentinel || (kTaggedSize <= instance_size() && static_cast<size_t>(instance_size()) < heap->Capacity())); - CHECK(GetBackPointer().IsUndefined(isolate) || - !Map::cast(GetBackPointer()).is_stable()); + if (GetBackPointer().IsUndefined(isolate)) { + // Root maps must not have descriptors in the descriptor array that do not + // belong to the map. + CHECK_EQ(NumberOfOwnDescriptors(), + instance_descriptors().number_of_descriptors()); + } else { + // If there is a parent map it must be non-stable. + Map parent = Map::cast(GetBackPointer()); + CHECK(!parent.is_stable()); + DescriptorArray descriptors = instance_descriptors(); + if (descriptors == parent.instance_descriptors()) { + if (NumberOfOwnDescriptors() == parent.NumberOfOwnDescriptors() + 1) { + // Descriptors sharing through property transitions takes over + // ownership from the parent map. + CHECK(!parent.owns_descriptors()); + } else { + CHECK_EQ(NumberOfOwnDescriptors(), parent.NumberOfOwnDescriptors()); + // Descriptors sharing through special transitions properly takes over + // ownership from the parent map unless it uses the canonical empty + // descriptor array. + if (descriptors != ReadOnlyRoots(isolate).empty_descriptor_array()) { + CHECK_IMPLIES(owns_descriptors(), !parent.owns_descriptors()); + CHECK_IMPLIES(parent.owns_descriptors(), !owns_descriptors()); + } + } + } + } SLOW_DCHECK(instance_descriptors().IsSortedNoDuplicates()); DisallowHeapAllocation no_gc; SLOW_DCHECK( @@ -668,7 +692,7 @@ void Map::MapVerify(Isolate* isolate) { CHECK(!is_dictionary_map()); CHECK(!is_access_check_needed()); DescriptorArray const descriptors = instance_descriptors(); - for (int i = 0; i < NumberOfOwnDescriptors(); ++i) { + for (InternalIndex i : IterateOwnDescriptors()) { CHECK(!descriptors.GetKey(i).IsInterestingSymbol()); } } @@ -803,9 +827,9 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) { // Check that properties with private symbols names are non-enumerable, and // that fields are in order. int expected_field_index = 0; - for (int descriptor = 0; descriptor < number_of_descriptors(); - descriptor++) { - Object key = *(GetDescriptorSlot(descriptor) + kEntryKeyIndex); + for (InternalIndex descriptor : + InternalIndex::Range(number_of_descriptors())) { + Object key = *(GetDescriptorSlot(descriptor.as_int()) + kEntryKeyIndex); // number_of_descriptors() may be out of sync with the actual descriptors // written during descriptor array construction. if (key.IsUndefined(isolate)) continue; @@ -1050,7 +1074,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) { if (scope_info().length() > 0) { ScopeInfo info = scope_info(); CHECK(kind() == info.function_kind()); - CHECK_EQ(kind() == kModule, info.scope_type() == MODULE_SCOPE); + CHECK_EQ(internal::IsModule(kind()), info.scope_type() == MODULE_SCOPE); } if (IsApiFunction()) { @@ -1449,7 +1473,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) { CHECK(arr.get(JSRegExp::kIrregexpCaptureCountIndex).IsSmi()); CHECK(arr.get(JSRegExp::kIrregexpMaxRegisterCountIndex).IsSmi()); - CHECK(arr.get(JSRegExp::kIrregexpTierUpTicksIndex).IsSmi()); + CHECK(arr.get(JSRegExp::kIrregexpTicksUntilTierUpIndex).IsSmi()); break; } default: @@ -1543,10 +1567,18 @@ void Module::ModuleVerify(Isolate* isolate) { void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) { TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate); - CHECK((status() >= kEvaluating && code().IsSourceTextModuleInfo()) || - (status() == kInstantiated && code().IsJSGeneratorObject()) || - (status() == kInstantiating && code().IsJSFunction()) || - (code().IsSharedFunctionInfo())); + if (status() == kErrored) { + CHECK(code().IsSourceTextModuleInfo()); + } else if (status() == kEvaluating || status() == kEvaluated) { + CHECK(code().IsJSGeneratorObject()); + } else { + CHECK((status() == kInstantiated && code().IsJSGeneratorObject()) || + (status() == kInstantiating && code().IsJSFunction()) || + (status() == kPreInstantiating && code().IsSharedFunctionInfo()) || + (status() == kUninstantiated && code().IsSharedFunctionInfo())); + CHECK(top_level_capability().IsUndefined() && !AsyncParentModuleCount() && + !pending_async_dependencies() && !async_evaluating()); + } CHECK_EQ(requested_modules().length(), info().module_requests().length()); } @@ -1679,8 +1711,6 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) { USE_TORQUE_VERIFIER(AccessorInfo) -USE_TORQUE_VERIFIER(AccessorPair) - void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) { TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate); CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() || @@ -1733,8 +1763,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) { } } -USE_TORQUE_VERIFIER(DebugInfo) - USE_TORQUE_VERIFIER(StackFrameInfo) void PreparseData::PreparseDataVerify(Isolate* isolate) { @@ -1749,19 +1777,6 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) { } } -void UncompiledDataWithPreparseData::UncompiledDataWithPreparseDataVerify( - Isolate* isolate) { - CHECK(IsUncompiledDataWithPreparseData()); - VerifyPointer(isolate, inferred_name()); - VerifyPointer(isolate, preparse_data()); -} - -void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify( - Isolate* isolate) { - CHECK(IsUncompiledDataWithoutPreparseData()); - VerifyPointer(isolate, inferred_name()); -} - USE_TORQUE_VERIFIER(InterpreterData) #ifdef V8_INTL_SUPPORT diff --git a/chromium/v8/src/diagnostics/objects-printer.cc b/chromium/v8/src/diagnostics/objects-printer.cc index 39614091c74..20afb9e5204 100644 --- a/chromium/v8/src/diagnostics/objects-printer.cc +++ b/chromium/v8/src/diagnostics/objects-printer.cc @@ -215,25 +215,25 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT case JS_API_OBJECT_TYPE: case JS_SPECIAL_API_OBJECT_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE: - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: case JS_ERROR_TYPE: // TODO(titzer): debug printing for more wasm objects - case WASM_EXCEPTION_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: JSObject::cast(*this).JSObjectPrint(os); break; - case WASM_MODULE_TYPE: + case WASM_MODULE_OBJECT_TYPE: WasmModuleObject::cast(*this).WasmModuleObjectPrint(os); break; - case WASM_MEMORY_TYPE: + case WASM_MEMORY_OBJECT_TYPE: WasmMemoryObject::cast(*this).WasmMemoryObjectPrint(os); break; - case WASM_TABLE_TYPE: + case WASM_TABLE_OBJECT_TYPE: WasmTableObject::cast(*this).WasmTableObjectPrint(os); break; - case WASM_GLOBAL_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: WasmGlobalObject::cast(*this).WasmGlobalObjectPrint(os); break; - case WASM_INSTANCE_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os); break; case JS_ASYNC_FUNCTION_OBJECT_TYPE: @@ -247,10 +247,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT case JS_ARRAY_TYPE: JSArray::cast(*this).JSArrayPrint(os); break; - case JS_REGEXP_TYPE: + case JS_REG_EXP_TYPE: JSRegExp::cast(*this).JSRegExpPrint(os); break; - case JS_REGEXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorPrint(os); break; case ODDBALL_TYPE: @@ -362,34 +362,34 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT JSDataView::cast(*this).JSDataViewPrint(os); break; #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: JSV8BreakIterator::cast(*this).JSV8BreakIteratorPrint(os); break; - case JS_INTL_COLLATOR_TYPE: + case JS_COLLATOR_TYPE: JSCollator::cast(*this).JSCollatorPrint(os); break; - case JS_INTL_DATE_TIME_FORMAT_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: JSDateTimeFormat::cast(*this).JSDateTimeFormatPrint(os); break; - case JS_INTL_LIST_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: JSListFormat::cast(*this).JSListFormatPrint(os); break; - case JS_INTL_LOCALE_TYPE: + case JS_LOCALE_TYPE: JSLocale::cast(*this).JSLocalePrint(os); break; - case JS_INTL_NUMBER_FORMAT_TYPE: + case JS_NUMBER_FORMAT_TYPE: JSNumberFormat::cast(*this).JSNumberFormatPrint(os); break; - case JS_INTL_PLURAL_RULES_TYPE: + case JS_PLURAL_RULES_TYPE: JSPluralRules::cast(*this).JSPluralRulesPrint(os); break; - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatPrint(os); break; - case JS_INTL_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: JSSegmentIterator::cast(*this).JSSegmentIteratorPrint(os); break; - case JS_INTL_SEGMENTER_TYPE: + case JS_SEGMENTER_TYPE: JSSegmenter::cast(*this).JSSegmenterPrint(os); break; #endif // V8_INTL_SUPPORT @@ -477,8 +477,8 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT if (HasFastProperties()) { DescriptorArray descs = map().instance_descriptors(); int nof_inobject_properties = map().GetInObjectProperties(); - int i = 0; - for (; i < map().NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : + InternalIndex::Range(map().NumberOfOwnDescriptors())) { os << "\n "; descs.GetKey(i).NamePrint(os); os << ": "; @@ -506,7 +506,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT os << " properties[" << field_index << "]"; } } - return i > 0; + return map().NumberOfOwnDescriptors() > 0; } else if (IsJSGlobalObject()) { JSGlobalObject::cast(*this).global_dictionary().Print(os); } else { @@ -1379,7 +1379,6 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT if (is_detachable()) os << "\n - detachable"; if (was_detached()) os << "\n - detached"; if (is_shared()) os << "\n - shared"; - if (is_wasm_memory()) os << "\n - is_wasm_memory"; JSObjectPrintBody(os, *this, !was_detached()); } @@ -1389,6 +1388,12 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT os << "\n - byte_offset: " << byte_offset(); os << "\n - byte_length: " << byte_length(); os << "\n - length: " << length(); + os << "\n - data_ptr: " << DataPtr(); + Tagged_t base_ptr = static_cast<Tagged_t>(base_pointer().ptr()); + os << "\n - base_pointer: " + << reinterpret_cast<void*>(static_cast<Address>(base_ptr)); + os << "\n - external_pointer: " + << reinterpret_cast<void*>(external_pointer()); if (!buffer().IsJSArrayBuffer()) { os << "\n <invalid buffer>\n"; return; @@ -1627,7 +1632,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT os << "\n"; #ifdef ENABLE_DISASSEMBLER if (FLAG_use_verbose_printer) { - Disassemble(nullptr, os); + Disassemble(nullptr, os, GetIsolate()); } #endif } @@ -1911,9 +1916,6 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT if (has_asm_js_offset_table()) { os << "\n - asm_js_offset_table: " << Brief(asm_js_offset_table()); } - if (has_breakpoint_infos()) { - os << "\n - breakpoint_infos: " << Brief(breakpoint_infos()); - } os << "\n"; } @@ -2146,6 +2148,9 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT os << "\n - wrapped arguments: " << Brief(wrapped_arguments()); } os << "\n - eval from position: " << eval_from_position(); + if (has_wasm_breakpoint_infos()) { + os << "\n - wasm_breakpoint_infos: " << Brief(wasm_breakpoint_infos()); + } os << "\n - shared function infos: " << Brief(shared_function_infos()); os << "\n"; } @@ -2280,6 +2285,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT os << "\n - receiver: " << ReceiverVariableField::decode(flags); } if (HasClassBrand()) os << "\n - has class brand"; + if (HasSavedClassVariableIndex()) os << "\n - has saved class variable index"; if (HasNewTarget()) os << "\n - needs new target"; if (HasFunctionName()) { os << "\n - function name(" << FunctionVariableField::decode(flags) @@ -2578,9 +2584,9 @@ void Map::MapPrint(std::ostream& os) { // NOLINT } void DescriptorArray::PrintDescriptors(std::ostream& os) { - for (int i = 0; i < number_of_descriptors(); i++) { + for (InternalIndex i : InternalIndex::Range(number_of_descriptors())) { Name key = GetKey(i); - os << "\n [" << i << "]: "; + os << "\n [" << i.as_int() << "]: "; #ifdef OBJECT_PRINT key.NamePrint(os); #else @@ -2592,7 +2598,8 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) { os << "\n"; } -void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor, +void DescriptorArray::PrintDescriptorDetails(std::ostream& os, + InternalIndex descriptor, PropertyDetails::PrintMode mode) { PropertyDetails details = GetDetails(descriptor); details.PrintAsFastTo(os, mode); @@ -2655,7 +2662,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key, } else { DCHECK(!IsSpecialTransition(roots, key)); os << "(transition to "; - int descriptor = target.LastAdded(); + InternalIndex descriptor = target.LastAdded(); DescriptorArray descriptors = target.instance_descriptors(); descriptors.PrintDescriptorDetails(os, descriptor, PropertyDetails::kForTransitions); @@ -2733,7 +2740,7 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level, os << " "; DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key)); os << "to "; - int descriptor = target.LastAdded(); + InternalIndex descriptor = target.LastAdded(); DescriptorArray descriptors = target.instance_descriptors(); descriptors.PrintDescriptorDetails(os, descriptor, PropertyDetails::kForTransitions); @@ -2816,7 +2823,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) { } #ifdef ENABLE_DISASSEMBLER i::StdoutStream os; - code.Disassemble(nullptr, os, address); + code.Disassemble(nullptr, os, isolate, address); #else // ENABLE_DISASSEMBLER code.Print(); #endif // ENABLE_DISASSEMBLER diff --git a/chromium/v8/src/diagnostics/x64/disasm-x64.cc b/chromium/v8/src/diagnostics/x64/disasm-x64.cc index aada6a43813..2195556af7a 100644 --- a/chromium/v8/src/diagnostics/x64/disasm-x64.cc +++ b/chromium/v8/src/diagnostics/x64/disasm-x64.cc @@ -91,9 +91,9 @@ static const ByteMnemonic zero_operands_instr[] = { {0x61, UNSET_OP_ORDER, "popad"}, {0x9C, UNSET_OP_ORDER, "pushfd"}, {0x9D, UNSET_OP_ORDER, "popfd"}, {0x9E, UNSET_OP_ORDER, "sahf"}, {0x99, UNSET_OP_ORDER, "cdq"}, {0x9B, UNSET_OP_ORDER, "fwait"}, - {0xA4, UNSET_OP_ORDER, "movs"}, {0xA5, UNSET_OP_ORDER, "movs"}, - {0xA6, UNSET_OP_ORDER, "cmps"}, {0xA7, UNSET_OP_ORDER, "cmps"}, - {-1, UNSET_OP_ORDER, ""}}; + {0xAB, UNSET_OP_ORDER, "stos"}, {0xA4, UNSET_OP_ORDER, "movs"}, + {0xA5, UNSET_OP_ORDER, "movs"}, {0xA6, UNSET_OP_ORDER, "cmps"}, + {0xA7, UNSET_OP_ORDER, "cmps"}, {-1, UNSET_OP_ORDER, ""}}; static const ByteMnemonic call_jump_instr[] = {{0xE8, UNSET_OP_ORDER, "call"}, {0xE9, UNSET_OP_ORDER, "jmp"}, @@ -1845,7 +1845,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { current += 1; } else { const char* mnemonic; - if (opcode == 0x54) { + if (opcode == 0x51) { + mnemonic = "sqrtpd"; + } else if (opcode == 0x54) { mnemonic = "andpd"; } else if (opcode == 0x55) { mnemonic = "andnpd"; @@ -2432,13 +2434,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, byte_size_operand_ = idesc.byte_size_operation; switch (idesc.type) { case ZERO_OPERANDS_INSTR: - if (current >= 0xA4 && current <= 0xA7) { + if ((current >= 0xA4 && current <= 0xA7) || + (current >= 0xAA && current <= 0xAD)) { // String move or compare operations. if (group_1_prefix_ == REP_PREFIX) { // REP. AppendToBuffer("rep "); } - if (rex_w()) AppendToBuffer("REX.W "); AppendToBuffer("%s%c", idesc.mnem, operand_size_code()); } else { AppendToBuffer("%s%c", idesc.mnem, operand_size_code()); diff --git a/chromium/v8/src/execution/arguments-inl.h b/chromium/v8/src/execution/arguments-inl.h index ecdc4ef359a..4565f5d265e 100644 --- a/chromium/v8/src/execution/arguments-inl.h +++ b/chromium/v8/src/execution/arguments-inl.h @@ -14,15 +14,15 @@ namespace v8 { namespace internal { template <class S> -Handle<S> Arguments::at(int index) { +Handle<S> Arguments::at(int index) const { return Handle<S>::cast(at<Object>(index)); } -int Arguments::smi_at(int index) { +int Arguments::smi_at(int index) const { return Smi::ToInt(Object(*address_of_arg_at(index))); } -double Arguments::number_at(int index) { return (*this)[index].Number(); } +double Arguments::number_at(int index) const { return (*this)[index].Number(); } } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/execution/arguments.h b/chromium/v8/src/execution/arguments.h index 8f07dd9db3e..77bbe62dfc6 100644 --- a/chromium/v8/src/execution/arguments.h +++ b/chromium/v8/src/execution/arguments.h @@ -37,24 +37,26 @@ class Arguments { DCHECK_GE(length_, 0); } - Object operator[](int index) { return Object(*address_of_arg_at(index)); } + Object operator[](int index) const { + return Object(*address_of_arg_at(index)); + } template <class S = Object> - inline Handle<S> at(int index); + inline Handle<S> at(int index) const; - inline int smi_at(int index); + inline int smi_at(int index) const; - inline double number_at(int index); + inline double number_at(int index) const; inline void set_at(int index, Object value) { *address_of_arg_at(index) = value.ptr(); } - inline FullObjectSlot slot_at(int index) { + inline FullObjectSlot slot_at(int index) const { return FullObjectSlot(address_of_arg_at(index)); } - inline Address* address_of_arg_at(int index) { + inline Address* address_of_arg_at(int index) const { DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_)); return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) - index * kSystemPointerSize); @@ -64,8 +66,8 @@ class Arguments { int length() const { return static_cast<int>(length_); } // Arguments on the stack are in reverse order (compared to an array). - FullObjectSlot first_slot() { return slot_at(length() - 1); } - FullObjectSlot last_slot() { return slot_at(0); } + FullObjectSlot first_slot() const { return slot_at(length() - 1); } + FullObjectSlot last_slot() const { return slot_at(0); } private: intptr_t length_; @@ -73,7 +75,7 @@ class Arguments { }; template <> -inline Handle<Object> Arguments::at(int index) { +inline Handle<Object> Arguments::at(int index) const { return Handle<Object>(address_of_arg_at(index)); } diff --git a/chromium/v8/src/execution/arm/simulator-arm.cc b/chromium/v8/src/execution/arm/simulator-arm.cc index 26771350961..841ff4bfd4f 100644 --- a/chromium/v8/src/execution/arm/simulator-arm.cc +++ b/chromium/v8/src/execution/arm/simulator-arm.cc @@ -12,6 +12,8 @@ #include "src/base/bits.h" #include "src/base/lazy-instance.h" +#include "src/base/memory.h" +#include "src/base/overflowing-math.h" #include "src/codegen/arm/constants-arm.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler.h" @@ -899,16 +901,14 @@ int Simulator::ReadW(int32_t addr) { // check the alignment here. base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<intptr_t>(addr); } int Simulator::ReadExW(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word); GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_); - intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<intptr_t>(addr); } void Simulator::WriteW(int32_t addr, int value) { @@ -917,8 +917,7 @@ void Simulator::WriteW(int32_t addr, int value) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); - *ptr = value; + base::WriteUnalignedValue<intptr_t>(addr, value); } int Simulator::WriteExW(int32_t addr, int value) { @@ -926,8 +925,7 @@ int Simulator::WriteExW(int32_t addr, int value) { if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) && GlobalMonitor::Get()->NotifyStoreExcl_Locked( addr, &global_monitor_processor_)) { - intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); - *ptr = value; + base::WriteUnalignedValue<intptr_t>(addr, value); return 0; } else { return 1; @@ -939,8 +937,7 @@ uint16_t Simulator::ReadHU(int32_t addr) { // check the alignment here. base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<uint16_t>(addr); } int16_t Simulator::ReadH(int32_t addr) { @@ -948,16 +945,14 @@ int16_t Simulator::ReadH(int32_t addr) { // check the alignment here. base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - int16_t* ptr = reinterpret_cast<int16_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<int16_t>(addr); } uint16_t Simulator::ReadExHU(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord); GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_); - uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<uint16_t>(addr); } void Simulator::WriteH(int32_t addr, uint16_t value) { @@ -966,8 +961,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); } void Simulator::WriteH(int32_t addr, int16_t value) { @@ -976,8 +970,7 @@ void Simulator::WriteH(int32_t addr, int16_t value) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - int16_t* ptr = reinterpret_cast<int16_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); } int Simulator::WriteExH(int32_t addr, uint16_t value) { @@ -985,8 +978,7 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) { if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) && GlobalMonitor::Get()->NotifyStoreExcl_Locked( addr, &global_monitor_processor_)) { - uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); return 0; } else { return 1; @@ -996,39 +988,34 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) { uint8_t Simulator::ReadBU(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<uint8_t>(addr); } int8_t Simulator::ReadB(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - int8_t* ptr = reinterpret_cast<int8_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<int8_t>(addr); } uint8_t Simulator::ReadExBU(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte); GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_); - uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); - return *ptr; + return base::ReadUnalignedValue<uint8_t>(addr); } void Simulator::WriteB(int32_t addr, uint8_t value) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); } void Simulator::WriteB(int32_t addr, int8_t value) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - int8_t* ptr = reinterpret_cast<int8_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); } int Simulator::WriteExB(int32_t addr, uint8_t value) { @@ -1036,8 +1023,7 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) { if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) && GlobalMonitor::Get()->NotifyStoreExcl_Locked( addr, &global_monitor_processor_)) { - uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); - *ptr = value; + base::WriteUnalignedValue(addr, value); return 0; } else { return 1; @@ -1049,16 +1035,14 @@ int32_t* Simulator::ReadDW(int32_t addr) { // check the alignment here. base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoad(addr); - int32_t* ptr = reinterpret_cast<int32_t*>(addr); - return ptr; + return reinterpret_cast<int32_t*>(addr); } int32_t* Simulator::ReadExDW(int32_t addr) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord); GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_); - int32_t* ptr = reinterpret_cast<int32_t*>(addr); - return ptr; + return reinterpret_cast<int32_t*>(addr); } void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { @@ -1067,9 +1051,8 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); local_monitor_.NotifyStore(addr); GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_); - int32_t* ptr = reinterpret_cast<int32_t*>(addr); - *ptr++ = value1; - *ptr = value2; + base::WriteUnalignedValue(addr, value1); + base::WriteUnalignedValue(addr + sizeof(value1), value2); } int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) { @@ -1077,9 +1060,8 @@ int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) { if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) && GlobalMonitor::Get()->NotifyStoreExcl_Locked( addr, &global_monitor_processor_)) { - intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); - *ptr++ = value1; - *ptr = value2; + base::WriteUnalignedValue(addr, value1); + base::WriteUnalignedValue(addr + sizeof(value1), value2); return 0; } else { return 1; @@ -1291,9 +1273,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { if (shift_amount == 0) { *carry_out = c_flag_; } else { - result <<= (shift_amount - 1); + result = static_cast<uint32_t>(result) << (shift_amount - 1); *carry_out = (result < 0); - result <<= 1; + result = static_cast<uint32_t>(result) << 1; } break; } @@ -1316,9 +1298,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { if (shift_amount == 0) { *carry_out = c_flag_; } else { - uint32_t left = static_cast<uint32_t>(result) >> shift_amount; - uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount); - result = right | left; + result = base::bits::RotateRight32(result, shift_amount); *carry_out = (static_cast<uint32_t>(result) >> 31) != 0; } break; @@ -1358,9 +1338,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { if (shift_amount == 0) { *carry_out = c_flag_; } else if (shift_amount < 32) { - result <<= (shift_amount - 1); + result = static_cast<uint32_t>(result) << (shift_amount - 1); *carry_out = (result < 0); - result <<= 1; + result = static_cast<uint32_t>(result) << 1; } else if (shift_amount == 32) { *carry_out = (result & 1) == 1; result = 0; @@ -1395,9 +1375,8 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { if (shift_amount == 0) { *carry_out = c_flag_; } else { - uint32_t left = static_cast<uint32_t>(result) >> shift_amount; - uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount); - result = right | left; + // Avoid undefined behavior. Rotating by multiples of 32 is no-op. + result = base::bits::RotateRight32(result, shift_amount & 31); *carry_out = (static_cast<uint32_t>(result) >> 31) != 0; } break; @@ -1580,6 +1559,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1); using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1, void* arg2); +// Separate for fine-grained UBSan blacklisting. Casting any given C++ +// function to {SimulatorRuntimeCall} is undefined behavior; but since +// the target function can indeed be any function that's exposed via +// the "fast C call" mechanism, we can't reconstruct its signature here. +int64_t UnsafeGenericFunctionCall(intptr_t function, int32_t arg0, int32_t arg1, + int32_t arg2, int32_t arg3, int32_t arg4, + int32_t arg5, int32_t arg6, int32_t arg7, + int32_t arg8, int32_t arg9) { + SimulatorRuntimeCall target = + reinterpret_cast<SimulatorRuntimeCall>(function); + return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); +} +void UnsafeDirectApiCall(intptr_t function, int32_t arg0) { + SimulatorRuntimeDirectApiCall target = + reinterpret_cast<SimulatorRuntimeDirectApiCall>(function); + target(arg0); +} +void UnsafeProfilingApiCall(intptr_t function, int32_t arg0, int32_t arg1) { + SimulatorRuntimeProfilingApiCall target = + reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function); + target(arg0, Redirection::ReverseRedirection(arg1)); +} +void UnsafeDirectGetterCall(intptr_t function, int32_t arg0, int32_t arg1) { + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function); + target(arg0, arg1); +} + // Software interrupt instructions are used by the simulator to call into the // C-based V8 runtime. void Simulator::SoftwareInterrupt(Instruction* instr) { @@ -1710,9 +1717,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("\n"); } CHECK(stack_aligned); - SimulatorRuntimeDirectApiCall target = - reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); - target(arg0); + UnsafeDirectApiCall(external, arg0); } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF("Call to host function at %p args %08x %08x", @@ -1723,9 +1728,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("\n"); } CHECK(stack_aligned); - SimulatorRuntimeProfilingApiCall target = - reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external); - target(arg0, Redirection::ReverseRedirection(arg1)); + UnsafeProfilingApiCall(external, arg0, arg1); } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF("Call to host function at %p args %08x %08x", @@ -1736,9 +1739,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { PrintF("\n"); } CHECK(stack_aligned); - SimulatorRuntimeDirectGetterCall target = - reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); - target(arg0, arg1); + UnsafeDirectGetterCall(external, arg0, arg1); } else if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) { @@ -1757,14 +1758,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { // builtin call. DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); - SimulatorRuntimeCall target = - reinterpret_cast<SimulatorRuntimeCall>(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF( "Call to host function at %p " "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x", - reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, - arg3, arg4, arg5, arg6, arg7, arg8, arg9); + reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4, + arg5, arg6, arg7, arg8, arg9); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } @@ -1772,7 +1771,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { } CHECK(stack_aligned); int64_t result = - target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4, + arg5, arg6, arg7, arg8, arg9); int32_t lo_res = static_cast<int32_t>(result); int32_t hi_res = static_cast<int32_t>(result >> 32); if (::v8::internal::FLAG_trace_sim) { @@ -1938,7 +1938,7 @@ void Simulator::DecodeType01(Instruction* instr) { // Rn field to encode it. // Format(instr, "mul'cond's 'rn, 'rm, 'rs"); int rd = rn; // Remap the rn field to the Rd register. - int32_t alu_out = rm_val * rs_val; + int32_t alu_out = base::MulWithWraparound(rm_val, rs_val); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -1952,13 +1952,13 @@ void Simulator::DecodeType01(Instruction* instr) { // Rn field to encode the Rd register and the Rd field to encode // the Rn register. // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); - int32_t mul_out = rm_val * rs_val; - int32_t result = acc_value + mul_out; + int32_t mul_out = base::MulWithWraparound(rm_val, rs_val); + int32_t result = base::AddWithWraparound(acc_value, mul_out); set_register(rn, result); } else { // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd"); - int32_t mul_out = rm_val * rs_val; - int32_t result = acc_value - mul_out; + int32_t mul_out = base::MulWithWraparound(rm_val, rs_val); + int32_t result = base::SubWithWraparound(acc_value, mul_out); set_register(rn, result); } } @@ -2096,7 +2096,7 @@ void Simulator::DecodeType01(Instruction* instr) { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm"); DCHECK(!instr->HasW()); addr = rn_val; - rn_val -= rm_val; + rn_val = base::SubWithWraparound(rn_val, rm_val); set_register(rn, rn_val); break; } @@ -2104,13 +2104,13 @@ void Simulator::DecodeType01(Instruction* instr) { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm"); DCHECK(!instr->HasW()); addr = rn_val; - rn_val += rm_val; + rn_val = base::AddWithWraparound(rn_val, rm_val); set_register(rn, rn_val); break; } case db_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w"); - rn_val -= rm_val; + rn_val = base::SubWithWraparound(rn_val, rm_val); addr = rn_val; if (instr->HasW()) { set_register(rn, rn_val); @@ -2119,7 +2119,7 @@ void Simulator::DecodeType01(Instruction* instr) { } case ib_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w"); - rn_val += rm_val; + rn_val = base::AddWithWraparound(rn_val, rm_val); addr = rn_val; if (instr->HasW()) { set_register(rn, rn_val); @@ -2139,7 +2139,7 @@ void Simulator::DecodeType01(Instruction* instr) { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8"); DCHECK(!instr->HasW()); addr = rn_val; - rn_val -= imm_val; + rn_val = base::SubWithWraparound(rn_val, imm_val); set_register(rn, rn_val); break; } @@ -2147,13 +2147,13 @@ void Simulator::DecodeType01(Instruction* instr) { // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8"); DCHECK(!instr->HasW()); addr = rn_val; - rn_val += imm_val; + rn_val = base::AddWithWraparound(rn_val, imm_val); set_register(rn, rn_val); break; } case db_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w"); - rn_val -= imm_val; + rn_val = base::SubWithWraparound(rn_val, imm_val); addr = rn_val; if (instr->HasW()) { set_register(rn, rn_val); @@ -2162,7 +2162,7 @@ void Simulator::DecodeType01(Instruction* instr) { } case ib_x: { // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w"); - rn_val += imm_val; + rn_val = base::AddWithWraparound(rn_val, imm_val); addr = rn_val; if (instr->HasW()) { set_register(rn, rn_val); @@ -2328,7 +2328,7 @@ void Simulator::DecodeType01(Instruction* instr) { case SUB: { // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm"); // Format(instr, "sub'cond's 'rd, 'rn, 'imm"); - alu_out = rn_val - shifter_operand; + alu_out = base::SubWithWraparound(rn_val, shifter_operand); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -2341,7 +2341,7 @@ void Simulator::DecodeType01(Instruction* instr) { case RSB: { // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm"); // Format(instr, "rsb'cond's 'rd, 'rn, 'imm"); - alu_out = shifter_operand - rn_val; + alu_out = base::SubWithWraparound(shifter_operand, rn_val); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -2354,7 +2354,7 @@ void Simulator::DecodeType01(Instruction* instr) { case ADD: { // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm"); // Format(instr, "add'cond's 'rd, 'rn, 'imm"); - alu_out = rn_val + shifter_operand; + alu_out = base::AddWithWraparound(rn_val, shifter_operand); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -2367,7 +2367,8 @@ void Simulator::DecodeType01(Instruction* instr) { case ADC: { // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm"); // Format(instr, "adc'cond's 'rd, 'rn, 'imm"); - alu_out = rn_val + shifter_operand + GetCarry(); + alu_out = base::AddWithWraparound( + base::AddWithWraparound(rn_val, shifter_operand), GetCarry()); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -2380,7 +2381,9 @@ void Simulator::DecodeType01(Instruction* instr) { case SBC: { // Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm"); // Format(instr, "sbc'cond's 'rd, 'rn, 'imm"); - alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1); + alu_out = base::SubWithWraparound( + base::SubWithWraparound(rn_val, shifter_operand), + (GetCarry() ? 0 : 1)); set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); @@ -2430,7 +2433,7 @@ void Simulator::DecodeType01(Instruction* instr) { if (instr->HasS()) { // Format(instr, "cmp'cond 'rn, 'shift_rm"); // Format(instr, "cmp'cond 'rn, 'imm"); - alu_out = rn_val - shifter_operand; + alu_out = base::SubWithWraparound(rn_val, shifter_operand); SetNZFlags(alu_out); SetCFlag(!BorrowFrom(rn_val, shifter_operand)); SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false)); @@ -2447,7 +2450,7 @@ void Simulator::DecodeType01(Instruction* instr) { if (instr->HasS()) { // Format(instr, "cmn'cond 'rn, 'shift_rm"); // Format(instr, "cmn'cond 'rn, 'imm"); - alu_out = rn_val + shifter_operand; + alu_out = base::AddWithWraparound(rn_val, shifter_operand); SetNZFlags(alu_out); SetCFlag(CarryFrom(rn_val, shifter_operand)); SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true)); @@ -2937,7 +2940,7 @@ void Simulator::DecodeType3(Instruction* instr) { } else { // sbfx - signed bitfield extract. int32_t rm_val = get_register(instr->RmValue()); - int32_t extr_val = rm_val << (31 - msbit); + int32_t extr_val = static_cast<uint32_t>(rm_val) << (31 - msbit); extr_val = extr_val >> (31 - widthminus1); set_register(instr->RdValue(), extr_val); } @@ -2969,7 +2972,7 @@ void Simulator::DecodeType3(Instruction* instr) { return; } else { // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w"); - addr = rn_val + shifter_operand; + addr = base::AddWithWraparound(rn_val, shifter_operand); if (instr->HasW()) { set_register(rn, addr); } @@ -3010,7 +3013,8 @@ void Simulator::DecodeType4(Instruction* instr) { void Simulator::DecodeType5(Instruction* instr) { // Format(instr, "b'l'cond 'target"); - int off = (instr->SImmed24Value() << 2); + int off = + static_cast<int>(static_cast<uint32_t>(instr->SImmed24Value()) << 2); intptr_t pc_address = get_pc(); if (instr->HasLink()) { set_register(lr, pc_address + kInstrSize); @@ -3259,14 +3263,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { if (instr->SzValue() == 0x1) { double dn_value = get_double_from_d_register(vn).get_scalar(); double dm_value = get_double_from_d_register(vm).get_scalar(); - double dd_value = dn_value / dm_value; + double dd_value = base::Divide(dn_value, dm_value); div_zero_vfp_flag_ = (dm_value == 0); dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else { float sn_value = get_float_from_s_register(n).get_scalar(); float sm_value = get_float_from_s_register(m).get_scalar(); - float sd_value = sn_value / sm_value; + float sd_value = base::Divide(sn_value, sm_value); div_zero_vfp_flag_ = (sm_value == 0); sd_value = canonicalizeNaN(sd_value); set_s_register_from_float(d, sd_value); @@ -3594,10 +3598,22 @@ int VFPConversionSaturate(double val, bool unsigned_res) { int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer, VFPRoundingMode mode) { - // TODO(jkummerow): These casts are undefined behavior if the integral - // part of {val} does not fit into the destination type. - int32_t result = - unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val); + int32_t result; + if (unsigned_integer) { + // The FastD2UI helper does not have the rounding behavior we want here + // (it doesn't guarantee any particular rounding, and it doesn't check + // for or handle overflow), so do the conversion by hand. + using limits = std::numeric_limits<uint32_t>; + if (val > limits::max()) { + result = limits::max(); + } else if (!(val >= 0)) { // Negation to catch NaNs. + result = 0; + } else { + result = static_cast<uint32_t>(val); + } + } else { + result = FastD2IChecked(val); + } inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer); @@ -3617,7 +3633,9 @@ int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer, result += val_sign; } else if (abs_diff == 0.5) { // Round to even if exactly halfway. - result = ((result % 2) == 0) ? result : result + val_sign; + result = ((result % 2) == 0) + ? result + : base::AddWithWraparound(result, val_sign); } break; } @@ -3873,7 +3891,11 @@ void Neg(Simulator* simulator, int Vd, int Vm) { T src[kElems]; simulator->get_neon_register<T, SIZE>(Vm, src); for (int i = 0; i < kElems; i++) { - src[i] = -src[i]; + if (src[i] != std::numeric_limits<T>::min()) { + src[i] = -src[i]; + } else { + // The respective minimum (negative) value maps to itself. + } } simulator->set_neon_register<T, SIZE>(Vd, src); } @@ -3998,6 +4020,17 @@ void Sub(Simulator* simulator, int Vd, int Vm, int Vn) { simulator->set_neon_register<T, SIZE>(Vd, src1); } +namespace { +uint32_t Multiply(uint32_t a, uint32_t b) { return a * b; } +uint8_t Multiply(uint8_t a, uint8_t b) { return a * b; } +// 16-bit integers are special due to C++'s implicit conversion rules. +// See https://bugs.llvm.org/show_bug.cgi?id=25580. +uint16_t Multiply(uint16_t a, uint16_t b) { + uint32_t result = static_cast<uint32_t>(a) * static_cast<uint32_t>(b); + return static_cast<uint16_t>(result); +} +} // namespace + template <typename T, int SIZE> void Mul(Simulator* simulator, int Vd, int Vm, int Vn) { static const int kElems = SIZE / sizeof(T); @@ -4005,7 +4038,7 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) { simulator->get_neon_register<T, SIZE>(Vn, src1); simulator->get_neon_register<T, SIZE>(Vm, src2); for (int i = 0; i < kElems; i++) { - src1[i] *= src2[i]; + src1[i] = Multiply(src1[i], src2[i]); } simulator->set_neon_register<T, SIZE>(Vd, src1); } @@ -4090,7 +4123,8 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) { if (shift_value >= size) { src[i] = 0; } else { - src[i] <<= shift_value; + using unsignedT = typename std::make_unsigned<T>::type; + src[i] = static_cast<unsignedT>(src[i]) << shift_value; } } else { // If the shift value is greater/equal than size, always end up with -1. @@ -5721,7 +5755,7 @@ void Simulator::Execute() { // should be stopping at a particular executed instruction. while (program_counter != end_sim_pc) { Instruction* instr = reinterpret_cast<Instruction*>(program_counter); - icount_++; + icount_ = base::AddWithWraparound(icount_, 1); InstructionDecode(instr); program_counter = get_pc(); } @@ -5730,7 +5764,7 @@ void Simulator::Execute() { // we reach the particular instruction count. while (program_counter != end_sim_pc) { Instruction* instr = reinterpret_cast<Instruction*>(program_counter); - icount_++; + icount_ = base::AddWithWraparound(icount_, 1); if (icount_ == ::v8::internal::FLAG_stop_sim_at) { ArmDebugger dbg(this); dbg.Debug(); diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.cc b/chromium/v8/src/execution/arm64/simulator-arm64.cc index 71fedd5b2ff..d3a73cbad87 100644 --- a/chromium/v8/src/execution/arm64/simulator-arm64.cc +++ b/chromium/v8/src/execution/arm64/simulator-arm64.cc @@ -12,6 +12,7 @@ #include <type_traits> #include "src/base/lazy-instance.h" +#include "src/base/overflowing-math.h" #include "src/codegen/arm64/decoder-arm64-inl.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler.h" @@ -154,6 +155,22 @@ void Simulator::CallImpl(Address entry, CallArgument* args) { set_sp(original_stack); } +#ifdef DEBUG +namespace { +int PopLowestIndexAsCode(CPURegList* list) { + if (list->IsEmpty()) { + return -1; + } + RegList reg_list = list->list(); + int index = base::bits::CountTrailingZeros(reg_list); + DCHECK((1LL << index) & reg_list); + list->Remove(index); + + return index; +} +} // namespace +#endif + void Simulator::CheckPCSComplianceAndRun() { // Adjust JS-based stack limit to C-based stack limit. isolate_->stack_guard()->AdjustStackLimitForSimulator(); @@ -171,10 +188,10 @@ void Simulator::CheckPCSComplianceAndRun() { for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { // x31 is not a caller saved register, so no need to specify if we want // the stack or zero. - saved_registers[i] = xreg(register_list.PopLowestIndex().code()); + saved_registers[i] = xreg(PopLowestIndexAsCode(®ister_list)); } for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { - saved_fpregisters[i] = dreg_bits(fpregister_list.PopLowestIndex().code()); + saved_fpregisters[i] = dreg_bits(PopLowestIndexAsCode(&fpregister_list)); } int64_t original_stack = sp(); #endif @@ -186,11 +203,11 @@ void Simulator::CheckPCSComplianceAndRun() { register_list = kCalleeSaved; fpregister_list = kCalleeSavedV; for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { - DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); + DCHECK_EQ(saved_registers[i], xreg(PopLowestIndexAsCode(®ister_list))); } for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { DCHECK(saved_fpregisters[i] == - dreg_bits(fpregister_list.PopLowestIndex().code())); + dreg_bits(PopLowestIndexAsCode(&fpregister_list))); } // Corrupt caller saved register minus the return regiters. @@ -217,13 +234,13 @@ void Simulator::CheckPCSComplianceAndRun() { void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { if (list->type() == CPURegister::kRegister) { while (!list->IsEmpty()) { - unsigned code = list->PopLowestIndex().code(); + unsigned code = PopLowestIndexAsCode(list); set_xreg(code, value | code); } } else { DCHECK_EQ(list->type(), CPURegister::kVRegister); while (!list->IsEmpty()) { - unsigned code = list->PopLowestIndex().code(); + unsigned code = PopLowestIndexAsCode(list); set_dreg_bits(code, value | code); } } @@ -414,6 +431,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, void* arg2); +// Separate for fine-grained UBSan blacklisting. Casting any given C++ +// function to {SimulatorRuntimeCall} is undefined behavior; but since +// the target function can indeed be any function that's exposed via +// the "fast C call" mechanism, we can't reconstruct its signature here. +ObjectPair UnsafeGenericFunctionCall(int64_t function, int64_t arg0, + int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, int64_t arg6, + int64_t arg7, int64_t arg8, int64_t arg9) { + SimulatorRuntimeCall target = + reinterpret_cast<SimulatorRuntimeCall>(function); + return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); +} +void UnsafeDirectApiCall(int64_t function, int64_t arg0) { + SimulatorRuntimeDirectApiCall target = + reinterpret_cast<SimulatorRuntimeDirectApiCall>(function); + target(arg0); +} +void UnsafeProfilingApiCall(int64_t function, int64_t arg0, void* arg1) { + SimulatorRuntimeProfilingApiCall target = + reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function); + target(arg0, arg1); +} +void UnsafeDirectGetterCall(int64_t function, int64_t arg0, int64_t arg1) { + SimulatorRuntimeDirectGetterCall target = + reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function); + target(arg0, arg1); +} + void Simulator::DoRuntimeCall(Instruction* instr) { Redirection* redirection = Redirection::FromInstruction(instr); @@ -515,10 +560,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) { ", " "0x%016" PRIx64 ", 0x%016" PRIx64, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); - SimulatorRuntimeCall target = - reinterpret_cast<SimulatorRuntimeCall>(external); - ObjectPair result = - target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + ObjectPair result = UnsafeGenericFunctionCall( + external, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x), reinterpret_cast<void*>(result.y)); #ifdef DEBUG @@ -532,10 +575,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) { case ExternalReference::DIRECT_API_CALL: { // void f(v8::FunctionCallbackInfo&) TraceSim("Type: DIRECT_API_CALL\n"); - SimulatorRuntimeDirectApiCall target = - reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0)); - target(xreg(0)); + UnsafeDirectApiCall(external, xreg(0)); TraceSim("No return value."); #ifdef DEBUG CorruptAllCallerSavedCPURegisters(); @@ -606,11 +647,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) { case ExternalReference::DIRECT_GETTER_CALL: { // void f(Local<String> property, PropertyCallbackInfo& info) TraceSim("Type: DIRECT_GETTER_CALL\n"); - SimulatorRuntimeDirectGetterCall target = - reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", xreg(0), xreg(1)); - target(xreg(0), xreg(1)); + UnsafeDirectGetterCall(external, xreg(0), xreg(1)); TraceSim("No return value."); #ifdef DEBUG CorruptAllCallerSavedCPURegisters(); @@ -621,11 +660,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) { case ExternalReference::PROFILING_API_CALL: { // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback) TraceSim("Type: PROFILING_API_CALL\n"); - SimulatorRuntimeProfilingApiCall target = - reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external); void* arg1 = Redirection::ReverseRedirection(xreg(1)); TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1); - target(xreg(0), arg1); + UnsafeProfilingApiCall(external, xreg(0), arg1); TraceSim("No return value."); #ifdef DEBUG CorruptAllCallerSavedCPURegisters(); @@ -849,10 +886,12 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) { if (amount == 0) { return value; } + // Larger shift {amount}s would be undefined behavior in C++. + DCHECK(amount < sizeof(value) * kBitsPerByte); switch (shift_type) { case LSL: - return value << amount; + return static_cast<unsignedT>(value) << amount; case LSR: return static_cast<unsignedT>(value) >> amount; case ASR: @@ -873,6 +912,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) { const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8; const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8; const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8; + using unsignedT = typename std::make_unsigned<T>::type; switch (extend_type) { case UXTB: @@ -885,13 +925,19 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) { value &= kWordMask; break; case SXTB: - value = (value << kSignExtendBShift) >> kSignExtendBShift; + value = + static_cast<T>(static_cast<unsignedT>(value) << kSignExtendBShift) >> + kSignExtendBShift; break; case SXTH: - value = (value << kSignExtendHShift) >> kSignExtendHShift; + value = + static_cast<T>(static_cast<unsignedT>(value) << kSignExtendHShift) >> + kSignExtendHShift; break; case SXTW: - value = (value << kSignExtendWShift) >> kSignExtendWShift; + value = + static_cast<T>(static_cast<unsignedT>(value) << kSignExtendWShift) >> + kSignExtendWShift; break; case UXTX: case SXTX: @@ -899,7 +945,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) { default: UNREACHABLE(); } - return value << left_shift; + return static_cast<T>(static_cast<unsignedT>(value) << left_shift); } template <typename T> @@ -2283,7 +2329,9 @@ void Simulator::VisitConditionalSelect(Instruction* instr) { break; case CSNEG_w: case CSNEG_x: - new_val = (uint64_t)(-(int64_t)new_val); + // Simulate two's complement (instead of casting to signed and negating) + // to avoid undefined behavior on signed overflow. + new_val = (~new_val) + 1; break; default: UNIMPLEMENTED(); @@ -2446,23 +2494,27 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) { switch (instr->Mask(DataProcessing3SourceMask)) { case MADD_w: case MADD_x: - result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm())); + result = base::AddWithWraparound( + xreg(instr->Ra()), + base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm()))); break; case MSUB_w: case MSUB_x: - result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm())); + result = base::SubWithWraparound( + xreg(instr->Ra()), + base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm()))); break; case SMADDL_x: - result = xreg(instr->Ra()) + (rn_s32 * rm_s32); + result = base::AddWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32)); break; case SMSUBL_x: - result = xreg(instr->Ra()) - (rn_s32 * rm_s32); + result = base::SubWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32)); break; case UMADDL_x: - result = xreg(instr->Ra()) + (rn_u32 * rm_u32); + result = static_cast<uint64_t>(xreg(instr->Ra())) + (rn_u32 * rm_u32); break; case UMSUBL_x: - result = xreg(instr->Ra()) - (rn_u32 * rm_u32); + result = static_cast<uint64_t>(xreg(instr->Ra())) - (rn_u32 * rm_u32); break; case SMULH_x: DCHECK_EQ(instr->Ra(), kZeroRegCode); @@ -2488,10 +2540,10 @@ void Simulator::BitfieldHelper(Instruction* instr) { T diff = S - R; T mask; if (diff >= 0) { - mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1 + mask = diff < reg_size - 1 ? (static_cast<unsignedT>(1) << (diff + 1)) - 1 : static_cast<T>(-1); } else { - uint64_t umask = ((1LL << (S + 1)) - 1); + uint64_t umask = ((1ULL << (S + 1)) - 1); umask = (umask >> R) | (umask << (reg_size - R)); mask = static_cast<T>(umask); diff += reg_size; @@ -2522,11 +2574,15 @@ void Simulator::BitfieldHelper(Instruction* instr) { T dst = inzero ? 0 : reg<T>(instr->Rd()); T src = reg<T>(instr->Rn()); // Rotate source bitfield into place. - T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R)); + T result = R == 0 ? src + : (static_cast<unsignedT>(src) >> R) | + (static_cast<unsignedT>(src) << (reg_size - R)); // Determine the sign extension. - T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1; - T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0) - << (diff + 1); + T topbits_preshift = (static_cast<unsignedT>(1) << (reg_size - diff - 1)) - 1; + T signbits = + diff >= reg_size - 1 + ? 0 + : ((extend && ((src >> S) & 1) ? topbits_preshift : 0) << (diff + 1)); // Merge sign extension, dest/zero and bitfield. result = signbits | (result & mask) | (dst & ~mask); diff --git a/chromium/v8/src/execution/frames.cc b/chromium/v8/src/execution/frames.cc index 3b334739da3..04768a365c2 100644 --- a/chromium/v8/src/execution/frames.cc +++ b/chromium/v8/src/execution/frames.cc @@ -553,14 +553,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator, return WASM_EXIT; case wasm::WasmCode::kWasmToJsWrapper: return WASM_TO_JS; - case wasm::WasmCode::kRuntimeStub: - // Some stubs, like e.g. {WasmCode::kWasmCompileLazy} build their own - // specialized frame which already carries a type marker. - // TODO(mstarzinger): This is only needed for the case where embedded - // builtins are disabled. It can be removed once all non-embedded - // builtins are gone. - if (StackFrame::IsTypeMarker(marker)) break; - return STUB; case wasm::WasmCode::kInterpreterEntry: return WASM_INTERPRETER_ENTRY; default: @@ -1079,13 +1071,12 @@ Address StubFrame::GetCallerStackPointer() const { return fp() + ExitFrameConstants::kCallerSPOffset; } -int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) { +int StubFrame::LookupExceptionHandlerInTable() { Code code = LookupCode(); DCHECK(code.is_turbofanned()); DCHECK_EQ(code.kind(), Code::BUILTIN); HandlerTable table(code); int pc_offset = static_cast<int>(pc() - code.InstructionStart()); - *stack_slots = code.stack_slots(); return table.LookupReturn(pc_offset); } @@ -1271,6 +1262,7 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function, if (maybe_script.IsScript()) { Script script = Script::cast(maybe_script); ic_info.line_num = script.GetLineNumber(source_pos) + 1; + ic_info.column_num = script.GetColumnNumber(source_pos); ic_info.script_name = ic_stats->GetOrCacheScriptName(script); } } @@ -1627,7 +1619,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const { } int OptimizedFrame::LookupExceptionHandlerInTable( - int* stack_slots, HandlerTable::CatchPrediction* prediction) { + int* data, HandlerTable::CatchPrediction* prediction) { // We cannot perform exception prediction on optimized code. Instead, we need // to use FrameSummary to find the corresponding code offset in unoptimized // code to perform prediction there. @@ -1635,7 +1627,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable( Code code = LookupCode(); HandlerTable table(code); int pc_offset = static_cast<int>(pc() - code.InstructionStart()); - if (stack_slots) *stack_slots = code.stack_slots(); + DCHECK_NULL(data); // Data is not used and will not return a value. // When the return pc has been replaced by a trampoline there won't be // a handler for this trampoline. Thus we need to use the return pc that @@ -1676,8 +1668,8 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData( Object OptimizedFrame::receiver() const { Code code = LookupCode(); if (code.kind() == Code::BUILTIN) { - Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset; - intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr); + intptr_t argc = static_cast<int>( + Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset)); intptr_t args_size = (StandardFrameConstants::kFixedSlotCountAboveFp + argc) * kSystemPointerSize; @@ -1950,15 +1942,13 @@ bool WasmCompiledFrame::at_to_number_conversion() const { return !!pos; } -int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) { - DCHECK_NOT_NULL(stack_slots); +int WasmCompiledFrame::LookupExceptionHandlerInTable() { wasm::WasmCode* code = isolate()->wasm_engine()->code_manager()->LookupCode(pc()); if (!code->IsAnonymous() && code->handler_table_size() > 0) { HandlerTable table(code->handler_table(), code->handler_table_size(), HandlerTable::kReturnAddressBasedEncoding); int pc_offset = static_cast<int>(pc() - code->instruction_start()); - *stack_slots = static_cast<int>(code->stack_slots()); return table.LookupReturn(pc_offset); } return -1; diff --git a/chromium/v8/src/execution/frames.h b/chromium/v8/src/execution/frames.h index d1e7a7890d6..165ff854647 100644 --- a/chromium/v8/src/execution/frames.h +++ b/chromium/v8/src/execution/frames.h @@ -145,7 +145,12 @@ class StackFrame { intptr_t type = marker >> kSmiTagSize; // TODO(petermarshall): There is a bug in the arm simulators that causes // invalid frame markers. -#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)) +#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM) + if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) { + // Appease UBSan. + return Type::NUMBER_OF_TYPES; + } +#else DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES); #endif return static_cast<Type>(type); @@ -733,7 +738,7 @@ class JavaScriptFrame : public StandardFrame { // Lookup exception handler for current {pc}, returns -1 if none found. Also // returns data associated with the handler site specific to the frame type: - // - OptimizedFrame : Data is the stack slot count of the entire frame. + // - OptimizedFrame : Data is not used and will not return a value. // - InterpretedFrame: Data is the register index holding the context. virtual int LookupExceptionHandlerInTable( int* data, HandlerTable::CatchPrediction* prediction); @@ -783,10 +788,8 @@ class StubFrame : public StandardFrame { Code unchecked_code() const override; // Lookup exception handler for current {pc}, returns -1 if none found. Only - // TurboFan stub frames are supported. Also returns data associated with the - // handler site: - // - TurboFan stub: Data is the stack slot count of the entire frame. - int LookupExceptionHandlerInTable(int* data); + // TurboFan stub frames are supported. + int LookupExceptionHandlerInTable(); protected: inline explicit StubFrame(StackFrameIteratorBase* iterator); @@ -938,9 +941,8 @@ class WasmCompiledFrame : public StandardFrame { void Print(StringStream* accumulator, PrintMode mode, int index) const override; - // Lookup exception handler for current {pc}, returns -1 if none found. Also - // returns the stack slot count of the entire frame. - int LookupExceptionHandlerInTable(int* data); + // Lookup exception handler for current {pc}, returns -1 if none found. + int LookupExceptionHandlerInTable(); // Determine the code for the frame. Code unchecked_code() const override; diff --git a/chromium/v8/src/execution/isolate-inl.h b/chromium/v8/src/execution/isolate-inl.h index e1b021b921d..091b185a302 100644 --- a/chromium/v8/src/execution/isolate-inl.h +++ b/chromium/v8/src/execution/isolate-inl.h @@ -113,61 +113,6 @@ Isolate::ExceptionScope::~ExceptionScope() { NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR) #undef NATIVE_CONTEXT_FIELD_ACCESSOR -bool Isolate::IsArrayConstructorIntact() { - Cell array_constructor_cell = - Cell::cast(root(RootIndex::kArrayConstructorProtector)); - return array_constructor_cell.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsTypedArraySpeciesLookupChainIntact() { - PropertyCell species_cell = - PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector)); - return species_cell.value().IsSmi() && - Smi::ToInt(species_cell.value()) == kProtectorValid; -} - -bool Isolate::IsPromiseSpeciesLookupChainIntact() { - PropertyCell species_cell = - PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector)); - return species_cell.value().IsSmi() && - Smi::ToInt(species_cell.value()) == kProtectorValid; -} - -bool Isolate::IsStringLengthOverflowIntact() { - Cell string_length_cell = Cell::cast(root(RootIndex::kStringLengthProtector)); - return string_length_cell.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsArrayBufferDetachingIntact() { - PropertyCell buffer_detaching = - PropertyCell::cast(root(RootIndex::kArrayBufferDetachingProtector)); - return buffer_detaching.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsArrayIteratorLookupChainIntact() { - PropertyCell array_iterator_cell = - PropertyCell::cast(root(RootIndex::kArrayIteratorProtector)); - return array_iterator_cell.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsMapIteratorLookupChainIntact() { - PropertyCell map_iterator_cell = - PropertyCell::cast(root(RootIndex::kMapIteratorProtector)); - return map_iterator_cell.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsSetIteratorLookupChainIntact() { - PropertyCell set_iterator_cell = - PropertyCell::cast(root(RootIndex::kSetIteratorProtector)); - return set_iterator_cell.value() == Smi::FromInt(kProtectorValid); -} - -bool Isolate::IsStringIteratorLookupChainIntact() { - PropertyCell string_iterator_cell = - PropertyCell::cast(root(RootIndex::kStringIteratorProtector)); - return string_iterator_cell.value() == Smi::FromInt(kProtectorValid); -} - } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/execution/isolate.cc b/chromium/v8/src/execution/isolate.cc index ad54ae0a7cb..e2d5ce8a40a 100644 --- a/chromium/v8/src/execution/isolate.cc +++ b/chromium/v8/src/execution/isolate.cc @@ -15,7 +15,6 @@ #include "src/api/api-inl.h" #include "src/ast/ast-value-factory.h" #include "src/ast/scopes.h" -#include "src/base/adapters.h" #include "src/base/hashmap.h" #include "src/base/platform/platform.h" #include "src/base/sys-info.h" @@ -36,6 +35,7 @@ #include "src/execution/isolate-inl.h" #include "src/execution/messages.h" #include "src/execution/microtask-queue.h" +#include "src/execution/protectors-inl.h" #include "src/execution/runtime-profiler.h" #include "src/execution/simulator.h" #include "src/execution/v8threads.h" @@ -51,6 +51,7 @@ #include "src/logging/counters.h" #include "src/logging/log.h" #include "src/numbers/hash-seed-inl.h" +#include "src/objects/backing-store.h" #include "src/objects/elements.h" #include "src/objects/frame-array-inl.h" #include "src/objects/hash-table-inl.h" @@ -320,7 +321,9 @@ Isolate::FindOrAllocatePerThreadDataForThisThread() { base::MutexGuard lock_guard(&thread_data_table_mutex_); per_thread = thread_data_table_.Lookup(thread_id); if (per_thread == nullptr) { - base::OS::AdjustSchedulingParams(); + if (FLAG_adjust_os_scheduling_parameters) { + base::OS::AdjustSchedulingParams(); + } per_thread = new PerIsolateThreadData(this, thread_id); thread_data_table_.Insert(per_thread); } @@ -1703,22 +1706,20 @@ Object Isolate::UnwindAndFindHandler() { // currently being executed. wasm::WasmCodeRefScope code_ref_scope; WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame); - int stack_slots = 0; // Will contain stack slot count of frame. - int offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots); + wasm::WasmCode* wasm_code = + wasm_engine()->code_manager()->LookupCode(frame->pc()); + int offset = wasm_frame->LookupExceptionHandlerInTable(); if (offset < 0) break; // Compute the stack pointer from the frame pointer. This ensures that // argument slots on the stack are dropped as returning would. Address return_sp = frame->fp() + StandardFrameConstants::kFixedFrameSizeAboveFp - - stack_slots * kSystemPointerSize; + wasm_code->stack_slots() * kSystemPointerSize; // This is going to be handled by Wasm, so we need to set the TLS flag // again. It was cleared above assuming the frame would be unwound. trap_handler::SetThreadInWasm(); - // Gather information from the frame. - wasm::WasmCode* wasm_code = - wasm_engine()->code_manager()->LookupCode(frame->pc()); return FoundHandler(Context(), wasm_code->instruction_start(), offset, wasm_code->constant_pool(), return_sp, frame->fp()); } @@ -1737,18 +1738,14 @@ Object Isolate::UnwindAndFindHandler() { // For optimized frames we perform a lookup in the handler table. if (!catchable_by_js) break; OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame); - int stack_slots = 0; // Will contain stack slot count of frame. - int offset = - js_frame->LookupExceptionHandlerInTable(&stack_slots, nullptr); + Code code = frame->LookupCode(); + int offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr); if (offset < 0) break; // Compute the stack pointer from the frame pointer. This ensures // that argument slots on the stack are dropped as returning would. Address return_sp = frame->fp() + StandardFrameConstants::kFixedFrameSizeAboveFp - - stack_slots * kSystemPointerSize; - - // Gather information from the frame. - Code code = frame->LookupCode(); + code.stack_slots() * kSystemPointerSize; // TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED, // but do not have a code kind of OPTIMIZED_FUNCTION. @@ -1769,31 +1766,24 @@ Object Isolate::UnwindAndFindHandler() { // Some stubs are able to handle exceptions. if (!catchable_by_js) break; StubFrame* stub_frame = static_cast<StubFrame*>(frame); +#ifdef DEBUG wasm::WasmCodeRefScope code_ref_scope; - wasm::WasmCode* wasm_code = - wasm_engine()->code_manager()->LookupCode(frame->pc()); - if (wasm_code != nullptr) { - // It is safe to skip Wasm runtime stubs as none of them contain local - // exception handlers. - CHECK_EQ(wasm::WasmCode::kRuntimeStub, wasm_code->kind()); - CHECK_EQ(0, wasm_code->handler_table_size()); - break; - } + DCHECK_NULL(wasm_engine()->code_manager()->LookupCode(frame->pc())); +#endif // DEBUG Code code = stub_frame->LookupCode(); if (!code.IsCode() || code.kind() != Code::BUILTIN || !code.has_handler_table() || !code.is_turbofanned()) { break; } - int stack_slots = 0; // Will contain stack slot count of frame. - int offset = stub_frame->LookupExceptionHandlerInTable(&stack_slots); + int offset = stub_frame->LookupExceptionHandlerInTable(); if (offset < 0) break; // Compute the stack pointer from the frame pointer. This ensures // that argument slots on the stack are dropped as returning would. Address return_sp = frame->fp() + StandardFrameConstants::kFixedFrameSizeAboveFp - - stack_slots * kSystemPointerSize; + code.stack_slots() * kSystemPointerSize; return FoundHandler(Context(), code.InstructionStart(), offset, code.constant_pool(), return_sp, frame->fp()); @@ -2065,7 +2055,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) { for (int i = 0; i < frames->length(); ++i) { Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this); - SerializeStackTraceFrame(this, frame, builder); + SerializeStackTraceFrame(this, frame, &builder); } Handle<String> stack_trace = builder.Finish().ToHandleChecked(); @@ -2823,7 +2813,7 @@ Isolate* Isolate::New(IsolateAllocationMode mode) { // IsolateAllocator allocates the memory for the Isolate object according to // the given allocation mode. std::unique_ptr<IsolateAllocator> isolate_allocator = - base::make_unique<IsolateAllocator>(mode); + std::make_unique<IsolateAllocator>(mode); // Construct Isolate object in the allocated memory. void* isolate_ptr = isolate_allocator->isolate_memory(); Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator)); @@ -2988,7 +2978,7 @@ void Isolate::Deinit() { optimizing_compile_dispatcher_ = nullptr; } - wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this); + BackingStore::RemoveSharedWasmMemoryObjects(this); heap_.mark_compact_collector()->EnsureSweepingCompleted(); heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted(); @@ -3807,308 +3797,12 @@ bool Isolate::IsInAnyContext(Object object, uint32_t index) { return false; } -bool Isolate::IsNoElementsProtectorIntact(Context context) { - PropertyCell no_elements_cell = heap()->no_elements_protector(); - bool cell_reports_intact = - no_elements_cell.value().IsSmi() && - Smi::ToInt(no_elements_cell.value()) == kProtectorValid; - -#ifdef DEBUG - Context native_context = context.native_context(); - - Map root_array_map = - native_context.GetInitialJSArrayMap(GetInitialFastElementsKind()); - JSObject initial_array_proto = JSObject::cast( - native_context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)); - JSObject initial_object_proto = JSObject::cast( - native_context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX)); - JSObject initial_string_proto = JSObject::cast( - native_context.get(Context::INITIAL_STRING_PROTOTYPE_INDEX)); - - if (root_array_map.is_null() || initial_array_proto == initial_object_proto) { - // We are in the bootstrapping process, and the entire check sequence - // shouldn't be performed. - return cell_reports_intact; - } - - // Check that the array prototype hasn't been altered WRT empty elements. - if (root_array_map.prototype() != initial_array_proto) { - DCHECK_EQ(false, cell_reports_intact); - return cell_reports_intact; - } - - FixedArrayBase elements = initial_array_proto.elements(); - ReadOnlyRoots roots(heap()); - if (elements != roots.empty_fixed_array() && - elements != roots.empty_slow_element_dictionary()) { - DCHECK_EQ(false, cell_reports_intact); - return cell_reports_intact; - } - - // Check that the Object.prototype hasn't been altered WRT empty elements. - elements = initial_object_proto.elements(); - if (elements != roots.empty_fixed_array() && - elements != roots.empty_slow_element_dictionary()) { - DCHECK_EQ(false, cell_reports_intact); - return cell_reports_intact; - } - - // Check that the Array.prototype has the Object.prototype as its - // [[Prototype]] and that the Object.prototype has a null [[Prototype]]. - PrototypeIterator iter(this, initial_array_proto); - if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) { - DCHECK_EQ(false, cell_reports_intact); - DCHECK(!has_pending_exception()); - return cell_reports_intact; - } - iter.Advance(); - if (!iter.IsAtEnd()) { - DCHECK_EQ(false, cell_reports_intact); - DCHECK(!has_pending_exception()); - return cell_reports_intact; - } - DCHECK(!has_pending_exception()); - - // Check that the String.prototype hasn't been altered WRT empty elements. - elements = initial_string_proto.elements(); - if (elements != roots.empty_fixed_array() && - elements != roots.empty_slow_element_dictionary()) { - DCHECK_EQ(false, cell_reports_intact); - return cell_reports_intact; - } - - // Check that the String.prototype has the Object.prototype - // as its [[Prototype]] still. - if (initial_string_proto.map().prototype() != initial_object_proto) { - DCHECK_EQ(false, cell_reports_intact); - return cell_reports_intact; - } -#endif - - return cell_reports_intact; -} - -bool Isolate::IsNoElementsProtectorIntact() { - return Isolate::IsNoElementsProtectorIntact(context()); -} - -bool Isolate::IsIsConcatSpreadableLookupChainIntact() { - Cell is_concat_spreadable_cell = heap()->is_concat_spreadable_protector(); - bool is_is_concat_spreadable_set = - Smi::ToInt(is_concat_spreadable_cell.value()) == kProtectorInvalid; -#ifdef DEBUG - Map root_array_map = - raw_native_context().GetInitialJSArrayMap(GetInitialFastElementsKind()); - if (root_array_map.is_null()) { - // Ignore the value of is_concat_spreadable during bootstrap. - return !is_is_concat_spreadable_set; - } - Handle<Object> array_prototype(array_function()->prototype(), this); - Handle<Symbol> key = factory()->is_concat_spreadable_symbol(); - Handle<Object> value; - LookupIterator it(this, array_prototype, key); - if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) { - // TODO(cbruni): Currently we do not revert if we unset the - // @@isConcatSpreadable property on Array.prototype or Object.prototype - // hence the reverse implication doesn't hold. - DCHECK(is_is_concat_spreadable_set); - return false; - } -#endif // DEBUG - - return !is_is_concat_spreadable_set; -} - -bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver) { - if (!IsIsConcatSpreadableLookupChainIntact()) return false; - return !receiver.HasProxyInPrototype(this); -} - -bool Isolate::IsPromiseHookProtectorIntact() { - PropertyCell promise_hook_cell = heap()->promise_hook_protector(); - bool is_promise_hook_protector_intact = - Smi::ToInt(promise_hook_cell.value()) == kProtectorValid; - DCHECK_IMPLIES(is_promise_hook_protector_intact, - !promise_hook_or_async_event_delegate_); - DCHECK_IMPLIES(is_promise_hook_protector_intact, - !promise_hook_or_debug_is_active_or_async_event_delegate_); - return is_promise_hook_protector_intact; -} - -bool Isolate::IsPromiseResolveLookupChainIntact() { - Cell promise_resolve_cell = heap()->promise_resolve_protector(); - bool is_promise_resolve_protector_intact = - Smi::ToInt(promise_resolve_cell.value()) == kProtectorValid; - return is_promise_resolve_protector_intact; -} - -bool Isolate::IsPromiseThenLookupChainIntact() { - PropertyCell promise_then_cell = heap()->promise_then_protector(); - bool is_promise_then_protector_intact = - Smi::ToInt(promise_then_cell.value()) == kProtectorValid; - return is_promise_then_protector_intact; -} - -bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) { - DisallowHeapAllocation no_gc; - if (!receiver->IsJSPromise()) return false; - if (!IsInAnyContext(receiver->map().prototype(), - Context::PROMISE_PROTOTYPE_INDEX)) { - return false; - } - return IsPromiseThenLookupChainIntact(); -} - void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) { DisallowHeapAllocation no_gc; if (!object->map().is_prototype_map()) return; - if (!IsNoElementsProtectorIntact()) return; + if (!Protectors::IsNoElementsIntact(this)) return; if (!IsArrayOrObjectOrStringPrototype(*object)) return; - PropertyCell::SetValueWithInvalidation( - this, "no_elements_protector", factory()->no_elements_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); -} - -void Isolate::TraceProtectorInvalidation(const char* protector_name) { - static constexpr char kInvalidateProtectorTracingCategory[] = - "V8.InvalidateProtector"; - static constexpr char kInvalidateProtectorTracingArg[] = "protector-name"; - - DCHECK(FLAG_trace_protector_invalidation); - - // TODO(jgruber): Remove the PrintF once tracing can output to stdout. - i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name, - this); - TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory, - TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg, - protector_name); -} - -void Isolate::InvalidateIsConcatSpreadableProtector() { - DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi()); - DCHECK(IsIsConcatSpreadableLookupChainIntact()); - if (FLAG_trace_protector_invalidation) { - TraceProtectorInvalidation("is_concat_spreadable_protector"); - } - factory()->is_concat_spreadable_protector()->set_value( - Smi::FromInt(kProtectorInvalid)); - DCHECK(!IsIsConcatSpreadableLookupChainIntact()); -} - -void Isolate::InvalidateArrayConstructorProtector() { - DCHECK(factory()->array_constructor_protector()->value().IsSmi()); - DCHECK(IsArrayConstructorIntact()); - if (FLAG_trace_protector_invalidation) { - TraceProtectorInvalidation("array_constructor_protector"); - } - factory()->array_constructor_protector()->set_value( - Smi::FromInt(kProtectorInvalid)); - DCHECK(!IsArrayConstructorIntact()); -} - -void Isolate::InvalidateTypedArraySpeciesProtector() { - DCHECK(factory()->typed_array_species_protector()->value().IsSmi()); - DCHECK(IsTypedArraySpeciesLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "typed_array_species_protector", - factory()->typed_array_species_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsTypedArraySpeciesLookupChainIntact()); -} - -void Isolate::InvalidatePromiseSpeciesProtector() { - DCHECK(factory()->promise_species_protector()->value().IsSmi()); - DCHECK(IsPromiseSpeciesLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "promise_species_protector", factory()->promise_species_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsPromiseSpeciesLookupChainIntact()); -} - -void Isolate::InvalidateStringLengthOverflowProtector() { - DCHECK(factory()->string_length_protector()->value().IsSmi()); - DCHECK(IsStringLengthOverflowIntact()); - if (FLAG_trace_protector_invalidation) { - TraceProtectorInvalidation("string_length_protector"); - } - factory()->string_length_protector()->set_value( - Smi::FromInt(kProtectorInvalid)); - DCHECK(!IsStringLengthOverflowIntact()); -} - -void Isolate::InvalidateArrayIteratorProtector() { - DCHECK(factory()->array_iterator_protector()->value().IsSmi()); - DCHECK(IsArrayIteratorLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "array_iterator_protector", factory()->array_iterator_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsArrayIteratorLookupChainIntact()); -} - -void Isolate::InvalidateMapIteratorProtector() { - DCHECK(factory()->map_iterator_protector()->value().IsSmi()); - DCHECK(IsMapIteratorLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "map_iterator_protector", factory()->map_iterator_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsMapIteratorLookupChainIntact()); -} - -void Isolate::InvalidateSetIteratorProtector() { - DCHECK(factory()->set_iterator_protector()->value().IsSmi()); - DCHECK(IsSetIteratorLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "set_iterator_protector", factory()->set_iterator_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsSetIteratorLookupChainIntact()); -} - -void Isolate::InvalidateStringIteratorProtector() { - DCHECK(factory()->string_iterator_protector()->value().IsSmi()); - DCHECK(IsStringIteratorLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "string_iterator_protector", factory()->string_iterator_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsStringIteratorLookupChainIntact()); -} - -void Isolate::InvalidateArrayBufferDetachingProtector() { - DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi()); - DCHECK(IsArrayBufferDetachingIntact()); - PropertyCell::SetValueWithInvalidation( - this, "array_buffer_detaching_protector", - factory()->array_buffer_detaching_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsArrayBufferDetachingIntact()); -} - -void Isolate::InvalidatePromiseHookProtector() { - DCHECK(factory()->promise_hook_protector()->value().IsSmi()); - DCHECK(IsPromiseHookProtectorIntact()); - PropertyCell::SetValueWithInvalidation( - this, "promise_hook_protector", factory()->promise_hook_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsPromiseHookProtectorIntact()); -} - -void Isolate::InvalidatePromiseResolveProtector() { - DCHECK(factory()->promise_resolve_protector()->value().IsSmi()); - DCHECK(IsPromiseResolveLookupChainIntact()); - if (FLAG_trace_protector_invalidation) { - TraceProtectorInvalidation("promise_resolve_protector"); - } - factory()->promise_resolve_protector()->set_value( - Smi::FromInt(kProtectorInvalid)); - DCHECK(!IsPromiseResolveLookupChainIntact()); -} - -void Isolate::InvalidatePromiseThenProtector() { - DCHECK(factory()->promise_then_protector()->value().IsSmi()); - DCHECK(IsPromiseThenLookupChainIntact()); - PropertyCell::SetValueWithInvalidation( - this, "promise_then_protector", factory()->promise_then_protector(), - handle(Smi::FromInt(kProtectorInvalid), this)); - DCHECK(!IsPromiseThenLookupChainIntact()); + Protectors::InvalidateNoElements(this); } bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) { @@ -4258,9 +3952,9 @@ void Isolate::PromiseHookStateUpdated() { bool promise_hook_or_debug_is_active_or_async_event_delegate = promise_hook_or_async_event_delegate || debug()->is_active(); if (promise_hook_or_debug_is_active_or_async_event_delegate && - IsPromiseHookProtectorIntact()) { + Protectors::IsPromiseHookIntact(this)) { HandleScope scope(this); - InvalidatePromiseHookProtector(); + Protectors::InvalidatePromiseHook(this); } promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate; promise_hook_or_debug_is_active_or_async_event_delegate_ = @@ -4580,12 +4274,20 @@ void Isolate::AddDetachedContext(Handle<Context> context) { HandleScope scope(this); Handle<WeakArrayList> detached_contexts = factory()->detached_contexts(); detached_contexts = WeakArrayList::AddToEnd( - this, detached_contexts, MaybeObjectHandle(Smi::kZero, this)); - detached_contexts = WeakArrayList::AddToEnd(this, detached_contexts, - MaybeObjectHandle::Weak(context)); + this, detached_contexts, MaybeObjectHandle(Smi::kZero, this), + MaybeObjectHandle::Weak(context)); heap()->set_detached_contexts(*detached_contexts); } +void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) { + HandleScope scope(this); + Handle<WeakArrayList> shared_wasm_memories = + factory()->shared_wasm_memories(); + shared_wasm_memories = WeakArrayList::AddToEnd( + this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object)); + heap()->set_shared_wasm_memories(*shared_wasm_memories); +} + void Isolate::CheckDetachedContextsAfterGC() { HandleScope scope(this); Handle<WeakArrayList> detached_contexts = factory()->detached_contexts(); diff --git a/chromium/v8/src/execution/isolate.h b/chromium/v8/src/execution/isolate.h index 4eadb42438f..20aea6066c9 100644 --- a/chromium/v8/src/execution/isolate.h +++ b/chromium/v8/src/execution/isolate.h @@ -1163,87 +1163,8 @@ class Isolate final : private HiddenFactory { #endif // V8_INTL_SUPPORT - static const int kProtectorValid = 1; - static const int kProtectorInvalid = 0; - - inline bool IsArrayConstructorIntact(); - - // The version with an explicit context parameter can be used when - // Isolate::context is not set up, e.g. when calling directly into C++ from - // CSA. - bool IsNoElementsProtectorIntact(Context context); - V8_EXPORT_PRIVATE bool IsNoElementsProtectorIntact(); - bool IsArrayOrObjectOrStringPrototype(Object object); - inline bool IsTypedArraySpeciesLookupChainIntact(); - - // Check that the @@species protector is intact, which guards the lookup of - // "constructor" on JSPromise instances, whose [[Prototype]] is the initial - // %PromisePrototype%, and the Symbol.species lookup on the - // %PromisePrototype%. - inline bool IsPromiseSpeciesLookupChainIntact(); - - bool IsIsConcatSpreadableLookupChainIntact(); - bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver); - inline bool IsStringLengthOverflowIntact(); - inline bool IsArrayIteratorLookupChainIntact(); - - // The MapIterator protector protects the original iteration behaviors of - // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries(). - // It does not protect the original iteration behavior of - // Map.prototype[Symbol.iterator](). The protector is invalidated when: - // * The 'next' property is set on an object where the property holder is the - // %MapIteratorPrototype% (e.g. because the object is that very prototype). - // * The 'Symbol.iterator' property is set on an object where the property - // holder is the %IteratorPrototype%. Note that this also invalidates the - // SetIterator protector (see below). - inline bool IsMapIteratorLookupChainIntact(); - - // The SetIterator protector protects the original iteration behavior of - // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(), - // and Set.prototype[Symbol.iterator](). The protector is invalidated when: - // * The 'next' property is set on an object where the property holder is the - // %SetIteratorPrototype% (e.g. because the object is that very prototype). - // * The 'Symbol.iterator' property is set on an object where the property - // holder is the %SetPrototype% OR %IteratorPrototype%. This means that - // setting Symbol.iterator on a MapIterator object can also invalidate the - // SetIterator protector, and vice versa, setting Symbol.iterator on a - // SetIterator object can also invalidate the MapIterator. This is an over- - // approximation for the sake of simplicity. - inline bool IsSetIteratorLookupChainIntact(); - - // The StringIteratorProtector protects the original string iteration behavior - // for primitive strings. As long as the StringIteratorProtector is valid, - // iterating over a primitive string is guaranteed to be unobservable from - // user code and can thus be cut short. More specifically, the protector gets - // invalidated as soon as either String.prototype[Symbol.iterator] or - // String.prototype[Symbol.iterator]().next is modified. This guarantee does - // not apply to string objects (as opposed to primitives), since they could - // define their own Symbol.iterator. - // String.prototype itself does not need to be protected, since it is - // non-configurable and non-writable. - inline bool IsStringIteratorLookupChainIntact(); - - // Make sure we do check for detached array buffers. - inline bool IsArrayBufferDetachingIntact(); - - // Disable promise optimizations if promise (debug) hooks have ever been - // active, because those can observe promises. - bool IsPromiseHookProtectorIntact(); - - // Make sure a lookup of "resolve" on the %Promise% intrinsic object - // yeidls the initial Promise.resolve method. - bool IsPromiseResolveLookupChainIntact(); - - // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the - // initial %PromisePrototype% yields the initial method. In addition this - // protector also guards the negative lookup of "then" on the intrinsic - // %ObjectPrototype%, meaning that such lookups are guaranteed to yield - // undefined without triggering any side-effects. - bool IsPromiseThenLookupChainIntact(); - bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver); - // On intent to set an element in object, make sure that appropriate // notifications occur if the set is on the elements of the array or // object prototype. Also ensure that changes to prototype chain between @@ -1259,24 +1180,6 @@ class Isolate final : private HiddenFactory { UpdateNoElementsProtectorOnSetElement(object); } - // The `protector_name` C string must be statically allocated. - void TraceProtectorInvalidation(const char* protector_name); - - void InvalidateArrayConstructorProtector(); - void InvalidateTypedArraySpeciesProtector(); - void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context); - void InvalidatePromiseSpeciesProtector(); - void InvalidateIsConcatSpreadableProtector(); - void InvalidateStringLengthOverflowProtector(); - void InvalidateArrayIteratorProtector(); - void InvalidateMapIteratorProtector(); - void InvalidateSetIteratorProtector(); - void InvalidateStringIteratorProtector(); - void InvalidateArrayBufferDetachingProtector(); - V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector(); - void InvalidatePromiseResolveProtector(); - void InvalidatePromiseThenProtector(); - // Returns true if array is the initial array prototype in any native context. bool IsAnyInitialArrayPrototype(Handle<JSArray> array); @@ -1406,6 +1309,8 @@ class Isolate final : private HiddenFactory { void AddDetachedContext(Handle<Context> context); void CheckDetachedContextsAfterGC(); + void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object); + std::vector<Object>* partial_snapshot_cache() { return &partial_snapshot_cache_; } @@ -1513,6 +1418,11 @@ class Isolate final : private HiddenFactory { bool HasPrepareStackTraceCallback() const; void SetAddCrashKeyCallback(AddCrashKeyCallback callback); + void AddCrashKey(CrashKeyId id, const std::string& value) { + if (add_crash_key_callback_) { + add_crash_key_callback_(id, value); + } + } void SetRAILMode(RAILMode rail_mode); diff --git a/chromium/v8/src/execution/messages.cc b/chromium/v8/src/execution/messages.cc index 63d1e2be1ff..96fb94cd4e6 100644 --- a/chromium/v8/src/execution/messages.cc +++ b/chromium/v8/src/execution/messages.cc @@ -603,7 +603,7 @@ int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); } int WasmStackFrame::GetModuleOffset() const { const int function_offset = - wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_); + GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_); return function_offset + GetPosition(); } @@ -631,7 +631,7 @@ Handle<Object> AsmJsWasmStackFrame::GetReceiver() const { } Handle<Object> AsmJsWasmStackFrame::GetFunction() const { - // TODO(clemensh): Return lazily created JSFunction. + // TODO(clemensb): Return lazily created JSFunction. return Null(); } @@ -894,7 +894,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate, Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)), isolate); - SerializeStackTraceFrame(isolate, frame, builder); + SerializeStackTraceFrame(isolate, frame, &builder); if (isolate->has_pending_exception()) { // CallSite.toString threw. Parts of the current frame might have been diff --git a/chromium/v8/src/execution/protectors.cc b/chromium/v8/src/execution/protectors.cc index 3ac07eede38..b5b4c47a1bb 100644 --- a/chromium/v8/src/execution/protectors.cc +++ b/chromium/v8/src/execution/protectors.cc @@ -16,12 +16,32 @@ namespace v8 { namespace internal { +namespace { +void TraceProtectorInvalidation(const char* protector_name) { + DCHECK(FLAG_trace_protector_invalidation); + static constexpr char kInvalidateProtectorTracingCategory[] = + "V8.InvalidateProtector"; + static constexpr char kInvalidateProtectorTracingArg[] = "protector-name"; + + DCHECK(FLAG_trace_protector_invalidation); + + // TODO(jgruber): Remove the PrintF once tracing can output to stdout. + i::PrintF("Invalidating protector cell %s", protector_name); + TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory, + TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg, + protector_name); +} +} // namespace + #define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell) \ void Protectors::Invalidate##name(Isolate* isolate, \ Handle<NativeContext> native_context) { \ DCHECK_EQ(*native_context, isolate->raw_native_context()); \ DCHECK(native_context->cell().value().IsSmi()); \ DCHECK(Is##name##Intact(native_context)); \ + if (FLAG_trace_protector_invalidation) { \ + TraceProtectorInvalidation(#name); \ + } \ Handle<PropertyCell> species_cell(native_context->cell(), isolate); \ PropertyCell::SetValueWithInvalidation( \ isolate, #cell, species_cell, \ @@ -36,6 +56,9 @@ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT( void Protectors::Invalidate##name(Isolate* isolate) { \ DCHECK(isolate->factory()->cell()->value().IsSmi()); \ DCHECK(Is##name##Intact(isolate)); \ + if (FLAG_trace_protector_invalidation) { \ + TraceProtectorInvalidation(#name); \ + } \ PropertyCell::SetValueWithInvalidation( \ isolate, #cell, isolate->factory()->cell(), \ handle(Smi::FromInt(kProtectorInvalid), isolate)); \ diff --git a/chromium/v8/src/execution/protectors.h b/chromium/v8/src/execution/protectors.h index 5c54613bb19..4601f16cf01 100644 --- a/chromium/v8/src/execution/protectors.h +++ b/chromium/v8/src/execution/protectors.h @@ -18,19 +18,82 @@ class Protectors : public AllStatic { #define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \ V(RegExpSpeciesLookupChainProtector, regexp_species_protector) -#define DECLARED_PROTECTORS_ON_ISOLATE(V) \ - V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector) +#define DECLARED_PROTECTORS_ON_ISOLATE(V) \ + V(ArrayBufferDetaching, ArrayBufferDetachingProtector, \ + array_buffer_detaching_protector) \ + V(ArrayConstructor, ArrayConstructorProtector, array_constructor_protector) \ + V(ArrayIteratorLookupChain, ArrayIteratorProtector, \ + array_iterator_protector) \ + V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector) \ + V(IsConcatSpreadableLookupChain, IsConcatSpreadableProtector, \ + is_concat_spreadable_protector) \ + V(NoElements, NoElementsProtector, no_elements_protector) \ + \ + /* The MapIterator protector protects the original iteration behaviors */ \ + /* of Map.prototype.keys(), Map.prototype.values(), and */ \ + /* Set.prototype.entries(). It does not protect the original iteration */ \ + /* behavior of Map.prototype[Symbol.iterator](). */ \ + /* The protector is invalidated when: */ \ + /* * The 'next' property is set on an object where the property holder */ \ + /* is the %MapIteratorPrototype% (e.g. because the object is that very */ \ + /* prototype). */ \ + /* * The 'Symbol.iterator' property is set on an object where the */ \ + /* property holder is the %IteratorPrototype%. Note that this also */ \ + /* invalidates the SetIterator protector (see below). */ \ + V(MapIteratorLookupChain, MapIteratorProtector, map_iterator_protector) \ + V(PromiseHook, PromiseHookProtector, promise_hook_protector) \ + V(PromiseThenLookupChain, PromiseThenProtector, promise_then_protector) \ + V(PromiseResolveLookupChain, PromiseResolveProtector, \ + promise_resolve_protector) \ + V(PromiseSpeciesLookupChain, PromiseSpeciesProtector, \ + promise_species_protector) \ + \ + /* The SetIterator protector protects the original iteration behavior of */ \ + /* Set.prototype.keys(), Set.prototype.values(), */ \ + /* Set.prototype.entries(), and Set.prototype[Symbol.iterator](). The */ \ + /* protector is invalidated when: */ \ + /* * The 'next' property is set on an object where the property holder */ \ + /* is the %SetIteratorPrototype% (e.g. because the object is that very */ \ + /* prototype). */ \ + /* * The 'Symbol.iterator' property is set on an object where the */ \ + /* property holder is the %SetPrototype% OR %IteratorPrototype%. This */ \ + /* means that setting Symbol.iterator on a MapIterator object can also */ \ + /* invalidate the SetIterator protector, and vice versa, setting */ \ + /* Symbol.iterator on a SetIterator object can also invalidate the */ \ + /* MapIterator. This is an over-approximation for the sake of */ \ + /* simplicity. */ \ + V(SetIteratorLookupChain, SetIteratorProtector, set_iterator_protector) \ + \ + /* The StringIteratorProtector protects the original string iteration */ \ + /* behavior for primitive strings. As long as the */ \ + /* StringIteratorProtector is valid, iterating over a primitive string */ \ + /* is guaranteed to be unobservable from user code and can thus be cut */ \ + /* short. More specifically, the protector gets invalidated as soon as */ \ + /* either String.prototype[Symbol.iterator] or */ \ + /* String.prototype[Symbol.iterator]().next is modified. This guarantee */ \ + /* does not apply to string objects (as opposed to primitives), since */ \ + /* they could define their own Symbol.iterator. */ \ + /* String.prototype itself does not need to be protected, since it is */ \ + /* non-configurable and non-writable. */ \ + V(StringIteratorLookupChain, StringIteratorProtector, \ + string_iterator_protector) \ + V(StringLengthOverflowLookupChain, StringLengthProtector, \ + string_length_protector) \ + V(TypedArraySpeciesLookupChain, TypedArraySpeciesProtector, \ + typed_array_species_protector) + +#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \ + V8_EXPORT_PRIVATE static inline bool Is##name##Intact( \ + Handle<NativeContext> native_context); \ + V8_EXPORT_PRIVATE static void Invalidate##name( \ + Isolate* isolate, Handle<NativeContext> native_context); -#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \ - static inline bool Is##name##Intact(Handle<NativeContext> native_context); \ - static void Invalidate##name(Isolate* isolate, \ - Handle<NativeContext> native_context); DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT) #undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT #define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \ - static inline bool Is##name##Intact(Isolate* isolate); \ - static void Invalidate##name(Isolate* isolate); + V8_EXPORT_PRIVATE static inline bool Is##name##Intact(Isolate* isolate); \ + V8_EXPORT_PRIVATE static void Invalidate##name(Isolate* isolate); DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE) #undef DECLARE_PROTECTOR_ON_ISOLATE diff --git a/chromium/v8/src/execution/s390/frame-constants-s390.h b/chromium/v8/src/execution/s390/frame-constants-s390.h index a48a78fd42d..34ae136aada 100644 --- a/chromium/v8/src/execution/s390/frame-constants-s390.h +++ b/chromium/v8/src/execution/s390/frame-constants-s390.h @@ -14,7 +14,7 @@ namespace internal { class EntryFrameConstants : public AllStatic { public: static constexpr int kCallerFPOffset = - -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); + -(StandardFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize); // Stack offsets for arguments passed to JSEntry. static constexpr int kArgvOffset = 20 * kSystemPointerSize; }; @@ -25,13 +25,13 @@ class ExitFrameConstants : public TypedFrameConstants { DEFINE_TYPED_FRAME_SIZES(1); // The caller fields are below the frame pointer on the stack. - static constexpr int kCallerFPOffset = 0 * kPointerSize; + static constexpr int kCallerFPOffset = 0 * kSystemPointerSize; // The calling JS function is below FP. - static constexpr int kCallerPCOffset = 1 * kPointerSize; + static constexpr int kCallerPCOffset = 1 * kSystemPointerSize; // FP-relative displacement of the caller's SP. It points just // below the saved PC. - static constexpr int kCallerSPDisplacement = 2 * kPointerSize; + static constexpr int kCallerSPDisplacement = 2 * kSystemPointerSize; }; class WasmCompileLazyFrameConstants : public TypedFrameConstants { @@ -47,7 +47,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants { static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0); static constexpr int kFixedFrameSizeFromFp = TypedFrameConstants::kFixedFrameSizeFromFp + - kNumberOfSavedGpParamRegs * kPointerSize + + kNumberOfSavedGpParamRegs * kSystemPointerSize + kNumberOfSavedFpParamRegs * kDoubleSize; }; @@ -56,13 +56,13 @@ class JavaScriptFrameConstants : public AllStatic { // FP-relative. static constexpr int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; - static constexpr int kLastParameterOffset = +2 * kPointerSize; + static constexpr int kLastParameterOffset = +2 * kSystemPointerSize; static constexpr int kFunctionOffset = StandardFrameConstants::kFunctionOffset; // Caller SP-relative. - static constexpr int kParam0Offset = -2 * kPointerSize; - static constexpr int kReceiverOffset = -1 * kPointerSize; + static constexpr int kParam0Offset = -2 * kSystemPointerSize; + static constexpr int kReceiverOffset = -1 * kSystemPointerSize; }; } // namespace internal diff --git a/chromium/v8/src/execution/simulator.h b/chromium/v8/src/execution/simulator.h index 9f98f2039bc..9c5cae7e971 100644 --- a/chromium/v8/src/execution/simulator.h +++ b/chromium/v8/src/execution/simulator.h @@ -115,15 +115,26 @@ class GeneratedCode { #ifdef USE_SIMULATOR // Defined in simulator-base.h. Return Call(Args... args) { +#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) + FATAL("Generated code execution not possible during cross-compilation."); +#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) return Simulator::current(isolate_)->template Call<Return>( reinterpret_cast<Address>(fn_ptr_), args...); } - DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { return Call(args...); } + DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { +#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) + FATAL("Generated code execution not possible during cross-compilation."); +#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) + return Call(args...); + } #else DISABLE_CFI_ICALL Return Call(Args... args) { // When running without a simulator we call the entry directly. +#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) + FATAL("Generated code execution not possible during cross-compilation."); +#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) #if V8_OS_AIX // AIX ABI requires function descriptors (FD). Artificially create a pseudo // FD to ensure correct dispatch to generated code. The 'volatile' @@ -141,6 +152,9 @@ class GeneratedCode { DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { // When running without a simulator we call the entry directly. +#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) + FATAL("Generated code execution not possible during cross-compilation."); +#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) return fn_ptr_(args...); } #endif // USE_SIMULATOR diff --git a/chromium/v8/src/execution/stack-guard.cc b/chromium/v8/src/execution/stack-guard.cc index 1cf4c4605a6..d37327f1c3d 100644 --- a/chromium/v8/src/execution/stack-guard.cc +++ b/chromium/v8/src/execution/stack-guard.cc @@ -10,6 +10,7 @@ #include "src/execution/runtime-profiler.h" #include "src/execution/simulator.h" #include "src/logging/counters.h" +#include "src/objects/backing-store.h" #include "src/roots/roots-inl.h" #include "src/utils/memcopy.h" #include "src/wasm/wasm-engine.h" @@ -86,6 +87,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) { current->intercepted_flags_ &= ~scope->intercept_mask_; } thread_local_.interrupt_flags_ |= restored_flags; + + if (has_pending_interrupts(access)) set_interrupt_limits(access); } if (!has_pending_interrupts(access)) reset_limits(access); // Add scope to the chain. @@ -271,8 +274,7 @@ Object StackGuard::HandleInterrupts() { if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "V8.WasmGrowSharedMemory"); - isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances( - isolate_); + BackingStore::UpdateSharedWasmMemoryObjects(isolate_); } if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) { @@ -305,8 +307,6 @@ Object StackGuard::HandleInterrupts() { } isolate_->counters()->stack_interrupts()->Increment(); - isolate_->counters()->runtime_profiler_ticks()->Increment(); - isolate_->runtime_profiler()->MarkCandidatesForOptimization(); return ReadOnlyRoots(isolate_).undefined_value(); } diff --git a/chromium/v8/src/execution/x64/frame-constants-x64.h b/chromium/v8/src/execution/x64/frame-constants-x64.h index 5af35b1b3bf..49d69829f03 100644 --- a/chromium/v8/src/execution/x64/frame-constants-x64.h +++ b/chromium/v8/src/execution/x64/frame-constants-x64.h @@ -13,7 +13,7 @@ namespace internal { class EntryFrameConstants : public AllStatic { public: -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN static constexpr int kCalleeSaveXMMRegisters = 10; static constexpr int kXMMRegisterSize = 16; static constexpr int kXMMRegistersBlockSize = diff --git a/chromium/v8/src/extensions/free-buffer-extension.cc b/chromium/v8/src/extensions/free-buffer-extension.cc index 975e9543c82..9fdfe920eb8 100644 --- a/chromium/v8/src/extensions/free-buffer-extension.cc +++ b/chromium/v8/src/extensions/free-buffer-extension.cc @@ -21,9 +21,8 @@ void FreeBufferExtension::FreeBuffer( const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Local<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>(); v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize(); - Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate()); - isolate->array_buffer_allocator()->Free(contents.Data(), - contents.ByteLength()); + contents.Deleter()(contents.Data(), contents.ByteLength(), + contents.DeleterData()); } } // namespace internal diff --git a/chromium/v8/src/extensions/gc-extension.cc b/chromium/v8/src/extensions/gc-extension.cc index 4f446627fd0..fddd40b3525 100644 --- a/chromium/v8/src/extensions/gc-extension.cc +++ b/chromium/v8/src/extensions/gc-extension.cc @@ -4,23 +4,160 @@ #include "src/extensions/gc-extension.h" +#include "include/v8.h" #include "src/base/platform/platform.h" +#include "src/execution/isolate.h" +#include "src/heap/heap.h" +#include "src/tasks/cancelable-task.h" namespace v8 { namespace internal { +namespace { + +enum class ExecutionType { kAsync, kSync }; + +struct GCOptions { + v8::Isolate::GarbageCollectionType type; + ExecutionType execution; +}; + +Maybe<bool> IsProperty(v8::Isolate* isolate, v8::Local<v8::Context> ctx, + v8::Local<v8::Object> object, const char* key, + const char* value) { + auto k = v8::String::NewFromUtf8(isolate, key).ToLocalChecked(); + // Get will return undefined for non-existing keys which will make + // StrictEquals fail. + auto maybe_property = object->Get(ctx, k); + if (maybe_property.IsEmpty()) return Nothing<bool>(); + return Just<bool>(maybe_property.ToLocalChecked()->StrictEquals( + v8::String::NewFromUtf8(isolate, value).ToLocalChecked())); +} + +Maybe<GCOptions> Parse(v8::Isolate* isolate, + const v8::FunctionCallbackInfo<v8::Value>& args) { + // Default values. + auto options = + GCOptions{v8::Isolate::GarbageCollectionType::kFullGarbageCollection, + ExecutionType::kSync}; + bool found_options_object = false; + + if (args.Length() > 0 && args[0]->IsObject()) { + v8::HandleScope scope(isolate); + auto ctx = isolate->GetCurrentContext(); + auto param = v8::Local<v8::Object>::Cast(args[0]); + auto maybe_type = IsProperty(isolate, ctx, param, "type", "minor"); + if (maybe_type.IsNothing()) return Nothing<GCOptions>(); + if (maybe_type.ToChecked()) { + found_options_object = true; + options.type = + v8::Isolate::GarbageCollectionType::kMinorGarbageCollection; + } + auto maybe_execution = + IsProperty(isolate, ctx, param, "execution", "async"); + if (maybe_execution.IsNothing()) return Nothing<GCOptions>(); + if (maybe_execution.ToChecked()) { + found_options_object = true; + options.execution = ExecutionType::kAsync; + } + } + + // If no options object is present default to legacy behavior. + if (!found_options_object) { + options.type = + args[0]->BooleanValue(isolate) + ? v8::Isolate::GarbageCollectionType::kMinorGarbageCollection + : v8::Isolate::GarbageCollectionType::kFullGarbageCollection; + } + + return Just<GCOptions>(options); +} + +void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type, + v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) { + Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap(); + switch (type) { + case v8::Isolate::GarbageCollectionType::kMinorGarbageCollection: + heap->CollectGarbage(i::NEW_SPACE, i::GarbageCollectionReason::kTesting, + kGCCallbackFlagForced); + break; + case v8::Isolate::GarbageCollectionType::kFullGarbageCollection: + heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state); + heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags, + i::GarbageCollectionReason::kTesting, + kGCCallbackFlagForced); + break; + } +} + +class AsyncGC final : public CancelableTask { + public: + ~AsyncGC() final = default; + + AsyncGC(v8::Isolate* isolate, v8::Local<v8::Promise::Resolver> resolver, + v8::Isolate::GarbageCollectionType type) + : CancelableTask(reinterpret_cast<Isolate*>(isolate)), + isolate_(isolate), + ctx_(isolate, isolate->GetCurrentContext()), + resolver_(isolate, resolver), + type_(type) {} + + void RunInternal() final { + v8::HandleScope scope(isolate_); + InvokeGC(isolate_, type_, + v8::EmbedderHeapTracer::EmbedderStackState::kEmpty); + auto resolver = v8::Local<v8::Promise::Resolver>::New(isolate_, resolver_); + auto ctx = Local<v8::Context>::New(isolate_, ctx_); + resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked(); + } + + private: + v8::Isolate* isolate_; + v8::Persistent<v8::Context> ctx_; + v8::Persistent<v8::Promise::Resolver> resolver_; + v8::Isolate::GarbageCollectionType type_; + + DISALLOW_COPY_AND_ASSIGN(AsyncGC); +}; + +} // namespace v8::Local<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate( v8::Isolate* isolate, v8::Local<v8::String> str) { return v8::FunctionTemplate::New(isolate, GCExtension::GC); } - void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) { - args.GetIsolate()->RequestGarbageCollectionForTesting( - args[0]->BooleanValue(args.GetIsolate()) - ? v8::Isolate::kMinorGarbageCollection - : v8::Isolate::kFullGarbageCollection); + v8::Isolate* isolate = args.GetIsolate(); + + // Immediate bailout if no arguments are provided. + if (args.Length() == 0) { + InvokeGC(isolate, + v8::Isolate::GarbageCollectionType::kFullGarbageCollection, + v8::EmbedderHeapTracer::EmbedderStackState::kUnknown); + return; + } + + auto maybe_options = Parse(isolate, args); + if (maybe_options.IsNothing()) return; + GCOptions options = maybe_options.ToChecked(); + switch (options.execution) { + case ExecutionType::kSync: + InvokeGC(isolate, options.type, + v8::EmbedderHeapTracer::EmbedderStackState::kUnknown); + break; + case ExecutionType::kAsync: { + v8::HandleScope scope(isolate); + auto resolver = v8::Promise::Resolver::New(isolate->GetCurrentContext()) + .ToLocalChecked(); + args.GetReturnValue().Set(resolver->GetPromise()); + auto task_runner = + V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate); + CHECK(task_runner->NonNestableTasksEnabled()); + task_runner->PostNonNestableTask( + std::make_unique<AsyncGC>(isolate, resolver, options.type)); + } break; + } } } // namespace internal diff --git a/chromium/v8/src/extensions/gc-extension.h b/chromium/v8/src/extensions/gc-extension.h index 7b517637f7a..061bb725a31 100644 --- a/chromium/v8/src/extensions/gc-extension.h +++ b/chromium/v8/src/extensions/gc-extension.h @@ -11,6 +11,21 @@ namespace v8 { namespace internal { +// Provides garbage collection on invoking |fun_name|(options), where +// - options is a dictionary like object. See supported properties below. +// - no parameter refers to options: +// {type: 'major', execution: 'sync'}. +// - truthy parameter that is not setting any options: +// {type: 'minor', execution: 'sync'}. +// +// Supported options: +// - type: 'major' or 'minor' for full GC and Scavenge, respectively. +// - execution: 'sync' or 'async' for synchronous and asynchronous execution, +// respectively. +// - Defaults to {type: 'major', execution: 'sync'}. +// +// Returns a Promise that resolves when GC is done when asynchronous execution +// is requested, and undefined otherwise. class GCExtension : public v8::Extension { public: explicit GCExtension(const char* fun_name) diff --git a/chromium/v8/src/flags/flag-definitions.h b/chromium/v8/src/flags/flag-definitions.h index c7c07e6dc65..b05c36ccdd1 100644 --- a/chromium/v8/src/flags/flag-definitions.h +++ b/chromium/v8/src/flags/flag-definitions.h @@ -204,32 +204,33 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import) // Features that are still work in progress (behind individual flags). #define HARMONY_INPROGRESS_BASE(V) \ - V(harmony_private_methods, "harmony private methods in class literals") \ V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \ V(harmony_weak_refs, "harmony weak references") \ - V(harmony_optional_chaining, "harmony optional chaining syntax") \ - V(harmony_nullish, "harmony nullish operator") + V(harmony_regexp_match_indices, "harmony regexp match indices") \ + V(harmony_top_level_await, "harmony top level await") #ifdef V8_INTL_SUPPORT -#define HARMONY_INPROGRESS(V) \ - HARMONY_INPROGRESS_BASE(V) \ - V(harmony_intl_dateformat_quarter, "Add quarter option to DateTimeFormat") +#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V) #else #define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V) #endif // Features that are complete (but still behind --harmony/es-staging flag). -#define HARMONY_STAGED_BASE(V) +#define HARMONY_STAGED_BASE(V) \ + V(harmony_optional_chaining, "harmony optional chaining syntax") \ + V(harmony_nullish, "harmony nullish operator") \ + V(harmony_private_methods, "harmony private methods in class literals") #ifdef V8_INTL_SUPPORT -#define HARMONY_STAGED(V) \ - HARMONY_STAGED_BASE(V) \ - V(harmony_intl_add_calendar_numbering_system, \ - "Add calendar and numberingSystem to DateTimeFormat") \ - V(harmony_intl_dateformat_day_period, \ - "Add dayPeriod option to DateTimeFormat") \ - V(harmony_intl_dateformat_fractional_second_digits, \ - "Add fractionalSecondDigits option to DateTimeFormat") \ +#define HARMONY_STAGED(V) \ + HARMONY_STAGED_BASE(V) \ + V(harmony_intl_add_calendar_numbering_system, \ + "Add calendar and numberingSystem to DateTimeFormat") \ + V(harmony_intl_dateformat_day_period, \ + "Add dayPeriod option to DateTimeFormat") \ + V(harmony_intl_dateformat_fractional_second_digits, \ + "Add fractionalSecondDigits option to DateTimeFormat") \ + V(harmony_intl_other_calendars, "DateTimeFormat other calendars") \ V(harmony_intl_segmenter, "Intl.Segmenter") #else #define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V) @@ -245,12 +246,7 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import) V(harmony_promise_all_settled, "harmony Promise.allSettled") #ifdef V8_INTL_SUPPORT -#define HARMONY_SHIPPING(V) \ - HARMONY_SHIPPING_BASE(V) \ - V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \ - V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \ - V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat") \ - V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features") +#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V) #else #define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V) #endif @@ -390,7 +386,7 @@ DEFINE_BOOL(enable_one_shot_optimization, true, "only be executed once") // Flag for sealed, frozen elements kind instead of dictionary elements kind -DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, false, +DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true, "Enable sealed, frozen elements kind") // Flags for data representation optimizations @@ -469,6 +465,12 @@ DEFINE_BOOL(trace_track_allocation_sites, false, DEFINE_BOOL(trace_migration, false, "trace object migration") DEFINE_BOOL(trace_generalization, false, "trace map generalization") +// Flags for TurboProp. +DEFINE_BOOL(turboprop, false, + "enable experimental turboprop mid-tier compiler.") +DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining) +DEFINE_NEG_IMPLICATION(turboprop, inline_accessors) + // Flags for concurrent recompilation. DEFINE_BOOL(concurrent_recompilation, true, "optimizing hot functions asynchronously on a separate thread") @@ -485,9 +487,12 @@ DEFINE_BOOL(concurrent_inlining, false, DEFINE_IMPLICATION(future, concurrent_inlining) DEFINE_BOOL(trace_heap_broker_verbose, false, "trace the heap broker verbosely (all reports)") +DEFINE_BOOL(trace_heap_broker_memory, false, + "trace the heap broker memory (refs analysis and zone numbers)") DEFINE_BOOL(trace_heap_broker, false, "trace the heap broker (reports on missing data only)") DEFINE_IMPLICATION(trace_heap_broker_verbose, trace_heap_broker) +DEFINE_IMPLICATION(trace_heap_broker_memory, trace_heap_broker) // Flags for stress-testing the compiler. DEFINE_INT(stress_runs, 0, "number of stress runs") @@ -499,7 +504,7 @@ DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points") DEFINE_BOOL(opt, true, "use adaptive optimizations") DEFINE_BOOL(turbo_sp_frame_access, false, "use stack pointer-relative access to frame wherever possible") -DEFINE_BOOL(turbo_control_flow_aware_allocation, false, +DEFINE_BOOL(turbo_control_flow_aware_allocation, true, "consider control flow while allocating registers") DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler") @@ -608,8 +613,6 @@ DEFINE_BOOL(turbo_store_elimination, true, DEFINE_BOOL(trace_store_elimination, false, "trace store elimination") DEFINE_BOOL(turbo_rewrite_far_jumps, true, "rewrite far to near jumps (ia32,x64)") -DEFINE_BOOL(experimental_inline_promise_constructor, true, - "inline the Promise constructor in TurboFan") DEFINE_BOOL( stress_gc_during_compilation, false, "simulate GC/compiler thread race related to https://crbug.com/v8/8520") @@ -730,9 +733,6 @@ DEFINE_BOOL(wasm_math_intrinsics, true, DEFINE_BOOL(wasm_shared_engine, true, "shares one wasm engine between all isolates within a process") DEFINE_IMPLICATION(future, wasm_shared_engine) -DEFINE_BOOL(wasm_shared_code, true, - "shares code underlying a wasm module when it is transferred") -DEFINE_IMPLICATION(future, wasm_shared_code) DEFINE_BOOL(wasm_trap_handler, true, "use signal handlers to catch out of bounds memory access in wasm" " (currently Linux x86_64 only)") @@ -750,7 +750,7 @@ DEFINE_BOOL(wasm_lazy_compilation, false, "enable lazy compilation for all wasm modules") DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false, "trace lazy compilation of wasm functions") -DEFINE_BOOL(wasm_grow_shared_memory, false, +DEFINE_BOOL(wasm_grow_shared_memory, true, "allow growing shared WebAssembly memory objects") DEFINE_BOOL(wasm_lazy_validation, false, "enable lazy validation for lazily compiled wasm functions") @@ -762,6 +762,11 @@ DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code") DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code") DEFINE_BOOL(stress_wasm_code_gc, false, "stress test garbage collection of wasm code") +DEFINE_BOOL(wasm_far_jump_table, true, + "use multiple separate code spaces that might require far jumps " + "between them") +DEFINE_INT(wasm_max_initial_code_space_reservation, 0, + "maximum size of the initial wasm code space reservation (in MB)") // Profiler flags. DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler") @@ -885,6 +890,7 @@ DEFINE_BOOL(trace_gc_object_stats, false, DEFINE_BOOL(trace_zone_stats, false, "trace zone memory usage") DEFINE_BOOL(track_retaining_path, false, "enable support for tracking retaining path") +DEFINE_DEBUG_BOOL(trace_backing_store, false, "trace backing store events") DEFINE_BOOL(concurrent_array_buffer_freeing, true, "free array buffer allocations on a background thread") DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics") @@ -961,6 +967,10 @@ DEFINE_BOOL(gc_experiment_less_compaction, false, DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function") +DEFINE_BOOL(randomize_all_allocations, false, + "randomize virtual memory reservations by ignoring any hints " + "passed when allocating pages") + DEFINE_BOOL(manual_evacuation_candidates_selection, false, "Test mode only flag. It allows an unit test to select evacuation " "candidates pages (requires --stress_compaction).") @@ -1227,6 +1237,8 @@ DEFINE_BOOL(print_all_exceptions, false, DEFINE_BOOL( detailed_error_stack_trace, false, "includes arguments for each function call in the error stack frames array") +DEFINE_BOOL(adjust_os_scheduling_parameters, true, + "adjust OS specific scheduling params for the isolate") // runtime.cc DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times") @@ -1254,9 +1266,26 @@ DEFINE_UINT(serialization_chunk_size, 4096, DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code") DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.") DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code") -DEFINE_BOOL(regexp_tier_up, false, - "enable regexp interpreter and tier up to the compiler") -DEFINE_NEG_IMPLICATION(regexp_interpret_all, regexp_tier_up) +#ifdef V8_TARGET_BIG_ENDIAN +#define REGEXP_PEEPHOLE_OPTIMIZATION_BOOL false +#else +#define REGEXP_PEEPHOLE_OPTIMIZATION_BOOL true +#endif +DEFINE_BOOL(regexp_tier_up, true, + "enable regexp interpreter and tier up to the compiler after the " + "number of executions set by the tier up ticks flag") +DEFINE_INT(regexp_tier_up_ticks, 1, + "set the number of executions for the regexp interpreter before " + "tiering-up to the compiler") +DEFINE_BOOL(regexp_peephole_optimization, REGEXP_PEEPHOLE_OPTIMIZATION_BOOL, + "enable peephole optimization for regexp bytecode") +DEFINE_BOOL(trace_regexp_peephole_optimization, false, + "trace regexp bytecode peephole optimization") +DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution") +DEFINE_BOOL(trace_regexp_assembler, false, + "trace regexp macro assembler calls.") +DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing") +DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution") // Testing flags test/cctest/test-{flags,api,serialization}.cc DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag") @@ -1391,11 +1420,6 @@ DEFINE_BOOL(trace_isolates, false, "trace isolate state changes") // Regexp DEFINE_BOOL(regexp_possessive_quantifier, false, "enable possessive quantifier syntax for testing") -DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution") -DEFINE_BOOL(trace_regexp_assembler, false, - "trace regexp macro assembler calls.") -DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing") -DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution") // Debugger DEFINE_BOOL(print_break_location, false, "print source location on debug break") @@ -1498,6 +1522,11 @@ DEFINE_BOOL(interpreted_frames_native_stack, false, "profilers).") #endif +// TODO(v8:9206, solanes): remove this when smi-corrupting reducer is fully on. +DEFINE_BOOL_READONLY(turbo_decompression_elimination, true, + "enable the decompression elimination system when " + "pointer compression is enabled.") + // // Disassembler only flags // diff --git a/chromium/v8/src/handles/global-handles.cc b/chromium/v8/src/handles/global-handles.cc index aed5b3fa834..23a635c0521 100644 --- a/chromium/v8/src/handles/global-handles.cc +++ b/chromium/v8/src/handles/global-handles.cc @@ -637,6 +637,10 @@ class GlobalHandles::TracedNode final bool has_destructor() const { return HasDestructor::decode(flags_); } void set_has_destructor(bool v) { flags_ = HasDestructor::update(flags_, v); } + bool markbit() const { return Markbit::decode(flags_); } + void clear_markbit() { flags_ = Markbit::update(flags_, false); } + void set_markbit() { flags_ = Markbit::update(flags_, true); } + void SetFinalizationCallback(void* parameter, WeakCallbackInfo<void>::Callback callback) { set_parameter(parameter); @@ -678,14 +682,18 @@ class GlobalHandles::TracedNode final using IsInYoungList = NodeState::Next<bool, 1>; using IsRoot = IsInYoungList::Next<bool, 1>; using HasDestructor = IsRoot::Next<bool, 1>; + using Markbit = HasDestructor::Next<bool, 1>; void ClearImplFields() { set_root(true); + // Nodes are black allocated for simplicity. + set_markbit(); callback_ = nullptr; } void CheckImplFieldsAreCleared() const { DCHECK(is_root()); + DCHECK(markbit()); DCHECK_NULL(callback_); } @@ -793,6 +801,12 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) { } } +void GlobalHandles::MarkTraced(Address* location) { + TracedNode* node = TracedNode::FromLocation(location); + node->set_markbit(); + DCHECK(node->IsInUse()); +} + void GlobalHandles::Destroy(Address* location) { if (location != nullptr) { NodeSpace<Node>::Release(Node::FromLocation(location)); @@ -867,8 +881,26 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles( } } for (TracedNode* node : *traced_nodes_) { - if (node->IsInUse() && - should_reset_handle(isolate()->heap(), node->location())) { + if (!node->IsInUse()) continue; + // Detect unreachable nodes first. + if (!node->markbit() && node->IsPhantomResetHandle() && + !node->has_destructor()) { + // The handle is unreachable and does not have a callback and a + // destructor associated with it. We can clear it even if the target V8 + // object is alive. Note that the desctructor and the callback may + // access the handle, that is why we avoid clearing it. + node->ResetPhantomHandle(HandleHolder::kDead); + ++number_of_phantom_handle_resets_; + continue; + } else if (node->markbit()) { + // Clear the markbit for the next GC. + node->clear_markbit(); + } + DCHECK(node->IsInUse()); + // Detect nodes with unreachable target objects. + if (should_reset_handle(isolate()->heap(), node->location())) { + // If the node allows eager resetting, then reset it here. Otherwise, + // collect its callback that will reset it. if (node->IsPhantomResetHandle()) { node->ResetPhantomHandle(node->has_destructor() ? HandleHolder::kLive : HandleHolder::kDead); @@ -901,8 +933,13 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects( DCHECK(node->is_root()); if (is_unmodified(node->location())) { v8::Value* value = ToApi<v8::Value>(node->handle()); - node->set_root(tracer->IsRootForNonTracingGC( - *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value))); + if (node->has_destructor()) { + node->set_root(tracer->IsRootForNonTracingGC( + *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value))); + } else { + node->set_root(tracer->IsRootForNonTracingGC( + *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value))); + } } } } @@ -990,7 +1027,7 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles( } else { v8::Value* value = ToApi<v8::Value>(node->handle()); tracer->ResetHandleInNonTracingGC( - *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)); + *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)); DCHECK(!node->IsInUse()); } @@ -1271,8 +1308,13 @@ void GlobalHandles::IterateTracedNodes( for (TracedNode* node : *traced_nodes_) { if (node->IsInUse()) { v8::Value* value = ToApi<v8::Value>(node->handle()); - visitor->VisitTracedGlobalHandle( - *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)); + if (node->has_destructor()) { + visitor->VisitTracedGlobalHandle( + *reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)); + } else { + visitor->VisitTracedReference( + *reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)); + } } } } diff --git a/chromium/v8/src/handles/global-handles.h b/chromium/v8/src/handles/global-handles.h index a07f7a772a9..26390b9fe21 100644 --- a/chromium/v8/src/handles/global-handles.h +++ b/chromium/v8/src/handles/global-handles.h @@ -5,6 +5,7 @@ #ifndef V8_HANDLES_GLOBAL_HANDLES_H_ #define V8_HANDLES_GLOBAL_HANDLES_H_ +#include <memory> #include <type_traits> #include <utility> #include <vector> @@ -86,6 +87,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final { static void SetFinalizationCallbackForTraced( Address* location, void* parameter, WeakCallbackInfo<void>::Callback callback); + static void MarkTraced(Address* location); explicit GlobalHandles(Isolate* isolate); ~GlobalHandles(); diff --git a/chromium/v8/src/handles/handles.cc b/chromium/v8/src/handles/handles.cc index 7f320a271c3..87c435061e5 100644 --- a/chromium/v8/src/handles/handles.cc +++ b/chromium/v8/src/handles/handles.cc @@ -28,7 +28,7 @@ ASSERT_TRIVIALLY_COPYABLE(Handle<Object>); ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>); #ifdef DEBUG -bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const { +bool HandleBase::IsDereferenceAllowed() const { DCHECK_NOT_NULL(location_); Object object(*location_); if (object.IsSmi()) return true; @@ -40,16 +40,7 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const { RootsTable::IsImmortalImmovable(root_index)) { return true; } - if (!AllowHandleDereference::IsAllowed()) return false; - if (mode == INCLUDE_DEFERRED_CHECK && - !AllowDeferredHandleDereference::IsAllowed()) { - // Accessing cells, maps and internalized strings is safe. - if (heap_object.IsCell()) return true; - if (heap_object.IsMap()) return true; - if (heap_object.IsInternalizedString()) return true; - return !isolate->IsDeferredHandle(location_); - } - return true; + return AllowHandleDereference::IsAllowed(); } #endif @@ -188,13 +179,13 @@ DeferredHandleScope::DeferredHandleScope(Isolate* isolate) } DeferredHandleScope::~DeferredHandleScope() { - impl_->isolate()->handle_scope_data()->level--; DCHECK(handles_detached_); - DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_); + impl_->isolate()->handle_scope_data()->level--; + DCHECK_EQ(impl_->isolate()->handle_scope_data()->level, prev_level_); } -DeferredHandles* DeferredHandleScope::Detach() { - DeferredHandles* deferred = impl_->Detach(prev_limit_); +std::unique_ptr<DeferredHandles> DeferredHandleScope::Detach() { + std::unique_ptr<DeferredHandles> deferred = impl_->Detach(prev_limit_); HandleScopeData* data = impl_->isolate()->handle_scope_data(); data->next = prev_next_; data->limit = prev_limit_; diff --git a/chromium/v8/src/handles/handles.h b/chromium/v8/src/handles/handles.h index 5f9b170d4b1..2fea55d1a0d 100644 --- a/chromium/v8/src/handles/handles.h +++ b/chromium/v8/src/handles/handles.h @@ -41,11 +41,8 @@ class HandleBase { // Check if this handle refers to the exact same object as the other handle. V8_INLINE bool is_identical_to(const HandleBase that) const { - // Dereferencing deferred handles to check object equality is safe. - SLOW_DCHECK((this->location_ == nullptr || - this->IsDereferenceAllowed(NO_DEFERRED_CHECK)) && - (that.location_ == nullptr || - that.IsDereferenceAllowed(NO_DEFERRED_CHECK))); + SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) && + (that.location_ == nullptr || that.IsDereferenceAllowed())); if (this->location_ == that.location_) return true; if (this->location_ == nullptr || that.location_ == nullptr) return false; return *this->location_ == *that.location_; @@ -59,20 +56,16 @@ class HandleBase { // Returns the address to where the raw pointer is stored. V8_INLINE Address* location() const { - SLOW_DCHECK(location_ == nullptr || - IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK)); + SLOW_DCHECK(location_ == nullptr || IsDereferenceAllowed()); return location_; } protected: - enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK }; #ifdef DEBUG - bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const; + bool V8_EXPORT_PRIVATE IsDereferenceAllowed() const; #else V8_INLINE - bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const { - return true; - } + bool V8_EXPORT_PRIVATE IsDereferenceAllowed() const { return true; } #endif // DEBUG // This uses type Address* as opposed to a pointer type to a typed @@ -140,7 +133,7 @@ class Handle final : public HandleBase { V8_INLINE T operator*() const { // unchecked_cast because we rather trust Handle<T> to contain a T than // include all the respective -inl.h headers for SLOW_DCHECKs. - SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK)); + SLOW_DCHECK(IsDereferenceAllowed()); return T::unchecked_cast(Object(*location())); } @@ -318,7 +311,7 @@ class V8_EXPORT_PRIVATE DeferredHandleScope final { // The DeferredHandles object returned stores the Handles created // since the creation of this DeferredHandleScope. The Handles are // alive as long as the DeferredHandles object is alive. - DeferredHandles* Detach(); + std::unique_ptr<DeferredHandles> Detach(); ~DeferredHandleScope(); private: diff --git a/chromium/v8/src/heap/array-buffer-collector.cc b/chromium/v8/src/heap/array-buffer-collector.cc index b6d7df8191f..672d5e68f05 100644 --- a/chromium/v8/src/heap/array-buffer-collector.cc +++ b/chromium/v8/src/heap/array-buffer-collector.cc @@ -14,33 +14,22 @@ namespace v8 { namespace internal { -namespace { - -void FreeAllocationsHelper( - Heap* heap, const std::vector<JSArrayBuffer::Allocation>& allocations) { - for (JSArrayBuffer::Allocation alloc : allocations) { - JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc); - } -} - -} // namespace - void ArrayBufferCollector::QueueOrFreeGarbageAllocations( - std::vector<JSArrayBuffer::Allocation> allocations) { + std::vector<std::shared_ptr<BackingStore>> backing_stores) { if (heap_->ShouldReduceMemory()) { - FreeAllocationsHelper(heap_, allocations); + // Destruct the vector, which destructs the std::shared_ptrs, freeing + // the backing stores. + backing_stores.clear(); } else { base::MutexGuard guard(&allocations_mutex_); - allocations_.push_back(std::move(allocations)); + allocations_.push_back(std::move(backing_stores)); } } void ArrayBufferCollector::PerformFreeAllocations() { base::MutexGuard guard(&allocations_mutex_); - for (const std::vector<JSArrayBuffer::Allocation>& allocations : - allocations_) { - FreeAllocationsHelper(heap_, allocations); - } + // Destruct the vector, which destructs the vecotr of std::shared_ptrs, + // freeing the backing stores if their refcount drops to zero. allocations_.clear(); } diff --git a/chromium/v8/src/heap/array-buffer-collector.h b/chromium/v8/src/heap/array-buffer-collector.h index 784092e936d..2d060cc595b 100644 --- a/chromium/v8/src/heap/array-buffer-collector.h +++ b/chromium/v8/src/heap/array-buffer-collector.h @@ -31,7 +31,7 @@ class ArrayBufferCollector { // // FreeAllocations() potentially triggers a background task for processing. void QueueOrFreeGarbageAllocations( - std::vector<JSArrayBuffer::Allocation> allocations); + std::vector<std::shared_ptr<BackingStore>> allocations); // Calls FreeAllocations() on a background thread. void FreeAllocations(); @@ -45,7 +45,7 @@ class ArrayBufferCollector { Heap* const heap_; base::Mutex allocations_mutex_; - std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_; + std::vector<std::vector<std::shared_ptr<BackingStore>>> allocations_; }; } // namespace internal diff --git a/chromium/v8/src/heap/array-buffer-tracker-inl.h b/chromium/v8/src/heap/array-buffer-tracker-inl.h index 763300cffe2..21106cee4b1 100644 --- a/chromium/v8/src/heap/array-buffer-tracker-inl.h +++ b/chromium/v8/src/heap/array-buffer-tracker-inl.h @@ -12,16 +12,31 @@ #include "src/objects/js-array-buffer-inl.h" #include "src/objects/objects.h" +#define TRACE_BS(...) \ + do { \ + if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + namespace v8 { namespace internal { -void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) { - if (buffer.backing_store() == nullptr) return; +inline size_t PerIsolateAccountingLength(JSArrayBuffer buffer) { + // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems with + // accounting for per-isolate external memory. In particular, sharing the same + // array buffer or memory multiple times, which happens in stress tests, can + // cause overcounting, leading to GC thrashing. Fix with global accounting? + return buffer.is_shared() ? 0 : buffer.byte_length(); +} + +void ArrayBufferTracker::RegisterNew( + Heap* heap, JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store) { + if (!backing_store) return; // ArrayBuffer tracking works only for small objects. DCHECK(!heap->IsLargeObject(buffer)); + DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store()); - const size_t length = buffer.byte_length(); Page* page = Page::FromHeapObject(buffer); { base::MutexGuard guard(page->mutex()); @@ -31,44 +46,63 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) { tracker = page->local_tracker(); } DCHECK_NOT_NULL(tracker); - tracker->Add(buffer, length); + TRACE_BS("ABT:reg bs=%p mem=%p (length=%zu) cnt=%ld\n", + backing_store.get(), backing_store->buffer_start(), + backing_store->byte_length(), backing_store.use_count()); + tracker->Add(buffer, std::move(backing_store)); } // TODO(wez): Remove backing-store from external memory accounting. // We may go over the limit of externally allocated memory here. We call the // api function to trigger a GC in this case. + const size_t length = PerIsolateAccountingLength(buffer); reinterpret_cast<v8::Isolate*>(heap->isolate()) ->AdjustAmountOfExternalAllocatedMemory(length); } -void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) { - if (buffer.backing_store() == nullptr) return; +std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister( + Heap* heap, JSArrayBuffer buffer) { + std::shared_ptr<BackingStore> backing_store; + const size_t length = PerIsolateAccountingLength(buffer); Page* page = Page::FromHeapObject(buffer); - const size_t length = buffer.byte_length(); { base::MutexGuard guard(page->mutex()); LocalArrayBufferTracker* tracker = page->local_tracker(); DCHECK_NOT_NULL(tracker); - tracker->Remove(buffer, length); + backing_store = tracker->Remove(buffer); } // TODO(wez): Remove backing-store from external memory accounting. heap->update_external_memory(-static_cast<intptr_t>(length)); + return backing_store; +} + +std::shared_ptr<BackingStore> ArrayBufferTracker::Lookup(Heap* heap, + JSArrayBuffer buffer) { + if (buffer.backing_store() == nullptr) return {}; + + Page* page = Page::FromHeapObject(buffer); + base::MutexGuard guard(page->mutex()); + LocalArrayBufferTracker* tracker = page->local_tracker(); + DCHECK_NOT_NULL(tracker); + return tracker->Lookup(buffer); } template <typename Callback> void LocalArrayBufferTracker::Free(Callback should_free) { size_t freed_memory = 0; - Isolate* isolate = page_->heap()->isolate(); for (TrackingData::iterator it = array_buffers_.begin(); it != array_buffers_.end();) { // Unchecked cast because the map might already be dead at this point. JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first); - const size_t length = it->second.length; + const size_t length = PerIsolateAccountingLength(buffer); if (should_free(buffer)) { - JSArrayBuffer::FreeBackingStore(isolate, it->second); + // Destroy the shared pointer, (perhaps) freeing the backing store. + TRACE_BS("ABT:die bs=%p mem=%p (length=%zu) cnt=%ld\n", + it->second.get(), it->second->buffer_start(), + it->second->byte_length(), it->second.use_count()); it = array_buffers_.erase(it); freed_memory += length; } else { @@ -97,35 +131,60 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) { } } -void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) { +void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store) { + auto length = PerIsolateAccountingLength(buffer); page_->IncrementExternalBackingStoreBytes( ExternalBackingStoreType::kArrayBuffer, length); - AddInternal(buffer, length); + AddInternal(buffer, std::move(backing_store)); } -void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) { - auto ret = array_buffers_.insert( - {buffer, - {buffer.backing_store(), length, buffer.backing_store(), - buffer.is_wasm_memory()}}); +void LocalArrayBufferTracker::AddInternal( + JSArrayBuffer buffer, std::shared_ptr<BackingStore> backing_store) { + auto ret = array_buffers_.insert({buffer, std::move(backing_store)}); USE(ret); // Check that we indeed inserted a new value and did not overwrite an existing // one (which would be a bug). DCHECK(ret.second); } -void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) { - page_->DecrementExternalBackingStoreBytes( - ExternalBackingStoreType::kArrayBuffer, length); - +std::shared_ptr<BackingStore> LocalArrayBufferTracker::Remove( + JSArrayBuffer buffer) { TrackingData::iterator it = array_buffers_.find(buffer); + // Check that we indeed find a key to remove. DCHECK(it != array_buffers_.end()); - DCHECK_EQ(length, it->second.length); + + // Steal the underlying shared pointer before erasing the entry. + std::shared_ptr<BackingStore> backing_store = std::move(it->second); + + TRACE_BS("ABT:rm bs=%p mem=%p (length=%zu) cnt=%ld\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length(), + backing_store.use_count()); + + // Erase the entry. array_buffers_.erase(it); + + // Update accounting. + auto length = PerIsolateAccountingLength(buffer); + page_->DecrementExternalBackingStoreBytes( + ExternalBackingStoreType::kArrayBuffer, length); + + return backing_store; +} + +std::shared_ptr<BackingStore> LocalArrayBufferTracker::Lookup( + JSArrayBuffer buffer) { + TrackingData::iterator it = array_buffers_.find(buffer); + if (it != array_buffers_.end()) { + return it->second; + } + return {}; } +#undef TRACE_BS + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/heap/array-buffer-tracker.cc b/chromium/v8/src/heap/array-buffer-tracker.cc index fdca6e8df27..b284a65f66a 100644 --- a/chromium/v8/src/heap/array-buffer-tracker.cc +++ b/chromium/v8/src/heap/array-buffer-tracker.cc @@ -11,6 +11,11 @@ #include "src/heap/heap.h" #include "src/heap/spaces.h" +#define TRACE_BS(...) \ + do { \ + if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + namespace v8 { namespace internal { @@ -20,7 +25,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() { template <typename Callback> void LocalArrayBufferTracker::Process(Callback callback) { - std::vector<JSArrayBuffer::Allocation> backing_stores_to_free; + std::vector<std::shared_ptr<BackingStore>> backing_stores_to_free; TrackingData kept_array_buffers; JSArrayBuffer new_buffer; @@ -32,8 +37,9 @@ void LocalArrayBufferTracker::Process(Callback callback) { DCHECK_EQ(page_, Page::FromHeapObject(old_buffer)); const CallbackResult result = callback(old_buffer, &new_buffer); if (result == kKeepEntry) { - kept_array_buffers.insert(*it); + kept_array_buffers.insert(std::move(*it)); } else if (result == kUpdateEntry) { + DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length()); DCHECK(!new_buffer.is_null()); Page* target_page = Page::FromHeapObject(new_buffer); { @@ -44,22 +50,28 @@ void LocalArrayBufferTracker::Process(Callback callback) { tracker = target_page->local_tracker(); } DCHECK_NOT_NULL(tracker); - const size_t length = it->second.length; + const size_t length = PerIsolateAccountingLength(old_buffer); // We should decrement before adding to avoid potential overflows in // the external memory counters. - DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory); - tracker->AddInternal(new_buffer, length); + tracker->AddInternal(new_buffer, std::move(it->second)); MemoryChunk::MoveExternalBackingStoreBytes( ExternalBackingStoreType::kArrayBuffer, static_cast<MemoryChunk*>(page_), static_cast<MemoryChunk*>(target_page), length); } } else if (result == kRemoveEntry) { - freed_memory += it->second.length; - // We pass backing_store() and stored length to the collector for freeing - // the backing store. Wasm allocations will go through their own tracker - // based on the backing store. - backing_stores_to_free.push_back(it->second); + freed_memory += PerIsolateAccountingLength(old_buffer); + auto backing_store = std::move(it->second); + TRACE_BS("ABT:queue bs=%p mem=%p (length=%zu) cnt=%ld\n", + backing_store.get(), backing_store->buffer_start(), + backing_store->byte_length(), backing_store.use_count()); + if (!backing_store->is_shared()) { + // Only retain non-shared backing stores. For shared backing stores, + // drop the shared_ptr right away, since this should be cheap, + // as it only updates a refcount, except that last, which will + // destruct it, which is rare. + backing_stores_to_free.push_back(backing_store); + } } else { UNREACHABLE(); } @@ -147,3 +159,4 @@ void ArrayBufferTracker::TearDown(Heap* heap) { } // namespace internal } // namespace v8 +#undef TRACE_BS diff --git a/chromium/v8/src/heap/array-buffer-tracker.h b/chromium/v8/src/heap/array-buffer-tracker.h index b7950c25069..156c2264062 100644 --- a/chromium/v8/src/heap/array-buffer-tracker.h +++ b/chromium/v8/src/heap/array-buffer-tracker.h @@ -9,6 +9,7 @@ #include "src/base/platform/mutex.h" #include "src/common/globals.h" +#include "src/objects/backing-store.h" #include "src/objects/js-array-buffer.h" #include "src/utils/allocation.h" @@ -31,8 +32,12 @@ class ArrayBufferTracker : public AllStatic { // Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all // access to the tracker by taking the page lock for the corresponding page. - inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer); - inline static void Unregister(Heap* heap, JSArrayBuffer buffer); + inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer, + std::shared_ptr<BackingStore>); + inline static std::shared_ptr<BackingStore> Unregister(Heap* heap, + JSArrayBuffer buffer); + inline static std::shared_ptr<BackingStore> Lookup(Heap* heap, + JSArrayBuffer buffer); // Identifies all backing store pointers for dead JSArrayBuffers in new space. // Does not take any locks and can only be called during Scavenge. @@ -70,8 +75,10 @@ class LocalArrayBufferTracker { explicit LocalArrayBufferTracker(Page* page) : page_(page) {} ~LocalArrayBufferTracker(); - inline void Add(JSArrayBuffer buffer, size_t length); - inline void Remove(JSArrayBuffer buffer, size_t length); + inline void Add(JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store); + inline std::shared_ptr<BackingStore> Remove(JSArrayBuffer buffer); + inline std::shared_ptr<BackingStore> Lookup(JSArrayBuffer buffer); // Frees up array buffers. // @@ -105,17 +112,13 @@ class LocalArrayBufferTracker { } }; - // Keep track of the backing store and the corresponding length at time of - // registering. The length is accessed from JavaScript and can be a - // HeapNumber. The reason for tracking the length is that in the case of - // length being a HeapNumber, the buffer and its length may be stored on - // different memory pages, making it impossible to guarantee order of freeing. using TrackingData = - std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>; + std::unordered_map<JSArrayBuffer, std::shared_ptr<BackingStore>, Hasher>; // Internal version of add that does not update counters. Requires separate // logic for updating external memory counters. - inline void AddInternal(JSArrayBuffer buffer, size_t length); + inline void AddInternal(JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store); Page* page_; // The set contains raw heap pointers which are removed by the GC upon diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h index 65fc072bd24..c0d4ade522b 100644 --- a/chromium/v8/src/heap/basic-memory-chunk.h +++ b/chromium/v8/src/heap/basic-memory-chunk.h @@ -10,12 +10,19 @@ #include "src/base/atomic-utils.h" #include "src/common/globals.h" #include "src/heap/marking.h" +#include "src/heap/slot-set.h" namespace v8 { namespace internal { class MemoryChunk; +enum RememberedSetType { + OLD_TO_NEW, + OLD_TO_OLD, + NUMBER_OF_REMEMBERED_SET_TYPES +}; + class BasicMemoryChunk { public: enum Flag { @@ -170,6 +177,11 @@ class BasicMemoryChunk { static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize; static const intptr_t kHeaderSentinelOffset = kHeapOffset + kSystemPointerSize; + static const intptr_t kAreaStartOffset = + kHeaderSentinelOffset + kSystemPointerSize; + static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize; + static const intptr_t kOldToNewSlotSetOffset = + kAreaEndOffset + kSystemPointerSize; static const size_t kHeaderSize = kSizeOffset + kSizetSize // size_t size @@ -178,7 +190,8 @@ class BasicMemoryChunk { + kSystemPointerSize // Heap* heap_ + kSystemPointerSize // Address header_sentinel_ + kSystemPointerSize // Address area_start_ - + kSystemPointerSize; // Address area_end_ + + kSystemPointerSize // Address area_end_ + + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array protected: // Overall size of the chunk, including the header and guards. @@ -204,6 +217,11 @@ class BasicMemoryChunk { Address area_start_; Address area_end_; + // A single slot set for small pages (of size kPageSize) or an array of slot + // set for large pages. In the latter case the number of entries in the array + // is ceil(size() / kPageSize). + SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; + friend class BasicMemoryChunkValidator; }; @@ -221,6 +239,8 @@ class BasicMemoryChunkValidator { offsetof(BasicMemoryChunk, heap_)); STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset == offsetof(BasicMemoryChunk, header_sentinel_)); + STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset == + offsetof(BasicMemoryChunk, slot_set_)); }; } // namespace internal diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc index 12bb28f1c8e..6a155c78ea8 100644 --- a/chromium/v8/src/heap/concurrent-marking.cc +++ b/chromium/v8/src/heap/concurrent-marking.cc @@ -8,7 +8,6 @@ #include <unordered_map> #include "include/v8config.h" -#include "src/base/template-utils.h" #include "src/execution/isolate.h" #include "src/heap/gc-tracer.h" #include "src/heap/heap-inl.h" @@ -225,6 +224,9 @@ class ConcurrentMarkingVisitor final } if (weak_ref.target().IsHeapObject()) { HeapObject target = HeapObject::cast(weak_ref.target()); +#ifdef THREAD_SANITIZER + MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad(); +#endif if (marking_state_.IsBlackOrGrey(target)) { // Record the slot inside the JSWeakRef, since the // VisitJSObjectSubclass above didn't visit it. @@ -247,6 +249,9 @@ class ConcurrentMarkingVisitor final WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this); if (weak_cell.target().IsHeapObject()) { HeapObject target = HeapObject::cast(weak_cell.target()); +#ifdef THREAD_SANITIZER + MemoryChunk::FromHeapObject(target)->SynchronizedHeapLoad(); +#endif if (marking_state_.IsBlackOrGrey(target)) { // Record the slot inside the WeakCell, since the IterateBody above // didn't visit it. @@ -478,6 +483,9 @@ class ConcurrentMarkingVisitor final ObjectSlot key_slot = table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)); HeapObject key = HeapObject::cast(table.KeyAt(i)); +#ifdef THREAD_SANITIZER + MemoryChunk::FromHeapObject(key)->SynchronizedHeapLoad(); +#endif MarkCompactCollector::RecordSlot(table, key_slot, key); ObjectSlot value_slot = @@ -491,6 +499,9 @@ class ConcurrentMarkingVisitor final if (value_obj.IsHeapObject()) { HeapObject value = HeapObject::cast(value_obj); +#ifdef THREAD_SANITIZER + MemoryChunk::FromHeapObject(value)->SynchronizedHeapLoad(); +#endif MarkCompactCollector::RecordSlot(table, value_slot, value); // Revisit ephemerons with both key and value unreachable at end @@ -864,8 +875,7 @@ void ConcurrentMarking::ScheduleTasks() { DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(!heap_->IsTearingDown()); base::MutexGuard guard(&pending_lock_); - DCHECK_EQ(0, pending_task_count_); - if (task_count_ == 0) { + if (total_task_count_ == 0) { static const int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1; #if defined(V8_OS_MACOSX) @@ -873,15 +883,18 @@ void ConcurrentMarking::ScheduleTasks() { // marking on competing hyper-threads (regresses Octane/Splay). As such, // only use num_cores/2, leaving one of those for the main thread. // TODO(ulan): Use all cores on Mac 10.12+. - task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1)); + total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1)); #else // defined(OS_MACOSX) // On other platforms use all logical cores, leaving one for the main // thread. - task_count_ = Max(1, Min(kMaxTasks, num_cores - 1)); + total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 1)); #endif // defined(OS_MACOSX) + DCHECK_LE(total_task_count_, kMaxTasks); + // One task is for the main thread. + STATIC_ASSERT(kMaxTasks + 1 <= MarkingWorklist::kMaxNumTasks); } // Task id 0 is for the main thread. - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { if (!is_pending_[i]) { if (FLAG_trace_concurrent_marking) { heap_->isolate()->PrintWithTimestamp( @@ -894,12 +907,12 @@ void ConcurrentMarking::ScheduleTasks() { is_pending_[i] = true; ++pending_task_count_; auto task = - base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i); + std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i); cancelable_id_[i] = task->id(); V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); } } - DCHECK_EQ(task_count_, pending_task_count_); + DCHECK_EQ(total_task_count_, pending_task_count_); } void ConcurrentMarking::RescheduleTasksIfNeeded() { @@ -907,11 +920,15 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() { if (heap_->IsTearingDown()) return; { base::MutexGuard guard(&pending_lock_); - if (pending_task_count_ > 0) return; + // The total task count is initialized in ScheduleTasks from + // NumberOfWorkerThreads of the platform. + if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) { + return; + } } if (!shared_->IsGlobalPoolEmpty() || - !weak_objects_->current_ephemerons.IsEmpty() || - !weak_objects_->discovered_ephemerons.IsEmpty()) { + !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() || + !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) { ScheduleTasks(); } } @@ -925,7 +942,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) { if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) { CancelableTaskManager* task_manager = heap_->isolate()->cancelable_task_manager(); - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { if (is_pending_[i]) { if (task_manager->TryAbort(cancelable_id_[i]) == TryAbortResult::kTaskAborted) { @@ -940,7 +957,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) { while (pending_task_count_ > 0) { pending_condition_.Wait(&pending_lock_); } - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { DCHECK(!is_pending_[i]); } return true; @@ -956,7 +973,7 @@ bool ConcurrentMarking::IsStopped() { void ConcurrentMarking::FlushMemoryChunkData( MajorNonAtomicMarkingState* marking_state) { DCHECK_EQ(pending_task_count_, 0); - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data; for (auto& pair : memory_chunk_data) { // ClearLiveness sets the live bytes to zero. @@ -978,7 +995,7 @@ void ConcurrentMarking::FlushMemoryChunkData( } void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) { - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { auto it = task_state_[i].memory_chunk_data.find(chunk); if (it != task_state_[i].memory_chunk_data.end()) { it->second.live_bytes = 0; @@ -989,7 +1006,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) { size_t ConcurrentMarking::TotalMarkedBytes() { size_t result = 0; - for (int i = 1; i <= task_count_; i++) { + for (int i = 1; i <= total_task_count_; i++) { result += base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes); } diff --git a/chromium/v8/src/heap/concurrent-marking.h b/chromium/v8/src/heap/concurrent-marking.h index be2fc03d462..c08a9c47b05 100644 --- a/chromium/v8/src/heap/concurrent-marking.h +++ b/chromium/v8/src/heap/concurrent-marking.h @@ -5,6 +5,8 @@ #ifndef V8_HEAP_CONCURRENT_MARKING_H_ #define V8_HEAP_CONCURRENT_MARKING_H_ +#include <memory> + #include "include/v8-platform.h" #include "src/base/atomic-utils.h" #include "src/base/platform/condition-variable.h" @@ -86,8 +88,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { // scavenge and is going to be re-used. void ClearMemoryChunkData(MemoryChunk* chunk); - int TaskCount() { return task_count_; } - // Checks if all threads are stopped. bool IsStopped(); @@ -124,7 +124,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { int pending_task_count_ = 0; bool is_pending_[kMaxTasks + 1] = {}; CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {}; - int task_count_ = 0; + int total_task_count_ = 0; }; } // namespace internal diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h index 7c67ccfab71..a150f2c26a3 100644 --- a/chromium/v8/src/heap/embedder-tracing.h +++ b/chromium/v8/src/heap/embedder-tracing.h @@ -57,7 +57,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final { bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) { return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle); } - void ResetHandleInNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) { + + bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>& handle) { + return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle); + } + + void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>& handle) { // Resetting is only called when IsRootForNonTracingGC returns false which // can only happen the EmbedderHeapTracer is set on API level. DCHECK(InUse()); diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h index bcad5d27141..f0f61bbb2c1 100644 --- a/chromium/v8/src/heap/factory-inl.h +++ b/chromium/v8/src/heap/factory-inl.h @@ -43,44 +43,41 @@ Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) { return NewProperSubString(str, begin, end); } -Handle<Object> Factory::NewNumberFromSize(size_t value, - AllocationType allocation) { +Handle<Object> Factory::NewNumberFromSize(size_t value) { // We can't use Smi::IsValid() here because that operates on a signed // intptr_t, and casting from size_t could create a bogus sign bit. if (value <= static_cast<size_t>(Smi::kMaxValue)) { return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)), isolate()); } - return NewNumber(static_cast<double>(value), allocation); + return NewNumber(static_cast<double>(value)); } -Handle<Object> Factory::NewNumberFromInt64(int64_t value, - AllocationType allocation) { +Handle<Object> Factory::NewNumberFromInt64(int64_t value) { if (value <= std::numeric_limits<int32_t>::max() && value >= std::numeric_limits<int32_t>::min() && Smi::IsValid(static_cast<int32_t>(value))) { return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate()); } - return NewNumber(static_cast<double>(value), allocation); + return NewNumber(static_cast<double>(value)); } -Handle<HeapNumber> Factory::NewHeapNumber(double value, - AllocationType allocation) { - Handle<HeapNumber> heap_number = NewHeapNumber(allocation); +template <AllocationType allocation> +Handle<HeapNumber> Factory::NewHeapNumber(double value) { + Handle<HeapNumber> heap_number = NewHeapNumber<allocation>(); heap_number->set_value(value); return heap_number; } -Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits, - AllocationType allocation) { - Handle<HeapNumber> heap_number = NewHeapNumber(allocation); +template <AllocationType allocation> +Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits) { + Handle<HeapNumber> heap_number = NewHeapNumber<allocation>(); heap_number->set_value_as_bits(bits); return heap_number; } -Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN( - AllocationType allocation) { - return NewHeapNumberFromBits(kHoleNanInt64, allocation); +Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN() { + return NewHeapNumberFromBits(kHoleNanInt64); } Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements, diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc index 9bf46be6e81..d92ee41ae49 100644 --- a/chromium/v8/src/heap/factory.cc +++ b/chromium/v8/src/heap/factory.cc @@ -11,6 +11,7 @@ #include "src/builtins/constants-table-builder.h" #include "src/codegen/compiler.h" #include "src/execution/isolate-inl.h" +#include "src/execution/protectors-inl.h" #include "src/heap/heap-inl.h" #include "src/heap/incremental-marking.h" #include "src/heap/mark-compact-inl.h" @@ -117,11 +118,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal( CodePageCollectionMemoryModificationScope code_allocation(heap); HeapObject result; if (retry_allocation_or_fail) { - result = - heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode); + result = heap->AllocateRawWith<Heap::kRetryOrFail>(object_size, + AllocationType::kCode); } else { - result = - heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode); + result = heap->AllocateRawWith<Heap::kLightRetry>(object_size, + AllocationType::kCode); // Return an empty handle if we cannot allocate the code object. if (result.is_null()) return MaybeHandle<Code>(); } @@ -209,8 +210,8 @@ HeapObject Factory::AllocateRawWithImmortalMap(int size, AllocationType allocation, Map map, AllocationAlignment alignment) { - HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail( - size, allocation, alignment); + HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>( + size, allocation, AllocationOrigin::kRuntime, alignment); result.set_map_after_allocation(map, SKIP_WRITE_BARRIER); return result; } @@ -222,7 +223,7 @@ HeapObject Factory::AllocateRawWithAllocationSite( int size = map->instance_size(); if (!allocation_site.is_null()) size += AllocationMemento::kSize; HeapObject result = - isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation); + isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation); WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER; @@ -247,7 +248,7 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento, HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) { HeapObject result = - isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation); + isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation); if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) { MemoryChunk* chunk = MemoryChunk::FromHeapObject(result); chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR); @@ -275,7 +276,7 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) { DCHECK(map->instance_type() != MAP_TYPE); int size = map->instance_size(); HeapObject result = - isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation); + isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation); // New space objects are allocated white. WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung ? SKIP_WRITE_BARRIER @@ -289,8 +290,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align, AllocationOrigin origin) { AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned; Heap* heap = isolate()->heap(); - HeapObject result = - heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment); + HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>( + size, allocation, origin, alignment); heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo); return Handle<HeapObject>(result, isolate()); } @@ -323,17 +324,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2, return result; } -Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2, - Handle<Object> value3, - AllocationType allocation) { - Handle<Tuple3> result = - Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, allocation)); - result->set_value1(*value1); - result->set_value2(*value2); - result->set_value3(*value3); - return result; -} - Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription( ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) { Handle<ArrayBoilerplateDescription> result = @@ -358,24 +348,23 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription( Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string, Handle<Object> to_number, - const char* type_of, byte kind, - AllocationType allocation) { - Handle<Oddball> oddball(Oddball::cast(New(map, allocation)), isolate()); + const char* type_of, byte kind) { + Handle<Oddball> oddball(Oddball::cast(New(map, AllocationType::kReadOnly)), + isolate()); Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind); return oddball; } -Handle<Oddball> Factory::NewSelfReferenceMarker(AllocationType allocation) { +Handle<Oddball> Factory::NewSelfReferenceMarker() { return NewOddball(self_reference_marker_map(), "self_reference_marker", handle(Smi::FromInt(-1), isolate()), "undefined", - Oddball::kSelfReferenceMarker, allocation); + Oddball::kSelfReferenceMarker); } -Handle<PropertyArray> Factory::NewPropertyArray(int length, - AllocationType allocation) { +Handle<PropertyArray> Factory::NewPropertyArray(int length) { DCHECK_LE(0, length); if (length == 0) return empty_property_array(); - HeapObject result = AllocateRawFixedArray(length, allocation); + HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung); result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER); Handle<PropertyArray> array(PropertyArray::cast(result), isolate()); array->initialize_length(length); @@ -419,7 +408,7 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index, DCHECK_LT(0, length); HeapObject result = - AllocateRawArray(WeakFixedArray::SizeFor(length), allocation); + AllocateRawArray(WeakFixedArray::SizeFor(length), AllocationType::kOld); Map map = Map::cast(isolate()->root(map_root_index)); result.set_map_after_allocation(map, SKIP_WRITE_BARRIER); @@ -485,8 +474,7 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length, *the_hole_value(), allocation); } -Handle<FixedArray> Factory::NewUninitializedFixedArray( - int length, AllocationType allocation) { +Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) { DCHECK_LE(0, length); if (length == 0) return empty_fixed_array(); @@ -494,30 +482,30 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray( // array. After getting canary/performance coverage, either remove the // function or revert to returning uninitilized array. return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length, - *undefined_value(), allocation); + *undefined_value(), AllocationType::kYoung); } Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray( - int length, AllocationType allocation) { + int length) { if (length == 0) return empty_closure_feedback_cell_array(); Handle<ClosureFeedbackCellArray> feedback_cell_array = NewFixedArrayWithMap<ClosureFeedbackCellArray>( - RootIndex::kClosureFeedbackCellArrayMap, length, allocation); + RootIndex::kClosureFeedbackCellArrayMap, length, + AllocationType::kYoung); return feedback_cell_array; } Handle<FeedbackVector> Factory::NewFeedbackVector( Handle<SharedFunctionInfo> shared, - Handle<ClosureFeedbackCellArray> closure_feedback_cell_array, - AllocationType allocation) { + Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) { int length = shared->feedback_metadata().slot_count(); DCHECK_LE(0, length); int size = FeedbackVector::SizeFor(length); - HeapObject result = - AllocateRawWithImmortalMap(size, allocation, *feedback_vector_map()); + HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld, + *feedback_vector_map()); Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate()); vector->set_shared_function_info(*shared); vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum( @@ -534,13 +522,12 @@ Handle<FeedbackVector> Factory::NewFeedbackVector( return vector; } -Handle<EmbedderDataArray> Factory::NewEmbedderDataArray( - int length, AllocationType allocation) { +Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) { DCHECK_LE(0, length); int size = EmbedderDataArray::SizeFor(length); - HeapObject result = - AllocateRawWithImmortalMap(size, allocation, *embedder_data_array_map()); + HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung, + *embedder_data_array_map()); Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate()); array->set_length(length); @@ -589,25 +576,23 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription( return description; } -Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length, - AllocationType allocation) { +Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length) { if (length == 0) return empty_fixed_array(); if (length < 0 || length > FixedDoubleArray::kMaxLength) { isolate()->heap()->FatalProcessOutOfMemory("invalid array length"); } int size = FixedDoubleArray::SizeFor(length); Map map = *fixed_double_array_map(); - HeapObject result = - AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned); + HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kYoung, + map, kDoubleAligned); Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate()); array->set_length(length); return array; } -Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles( - int length, AllocationType allocation) { +Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(int length) { DCHECK_LE(0, length); - Handle<FixedArrayBase> array = NewFixedDoubleArray(length, allocation); + Handle<FixedArrayBase> array = NewFixedDoubleArray(length); if (length > 0) { Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length); } @@ -633,11 +618,10 @@ Handle<FeedbackMetadata> Factory::NewFeedbackMetadata( return data; } -Handle<FrameArray> Factory::NewFrameArray(int number_of_frames, - AllocationType allocation) { +Handle<FrameArray> Factory::NewFrameArray(int number_of_frames) { DCHECK_LE(0, number_of_frames); - Handle<FixedArray> result = NewFixedArrayWithHoles( - FrameArray::LengthFor(number_of_frames), allocation); + Handle<FixedArray> result = + NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames)); result->set(FrameArray::kFrameCountIndex, Smi::kZero); return Handle<FrameArray>::cast(result); } @@ -1438,7 +1422,7 @@ Handle<Context> Factory::NewContext(RootIndex map_root_index, int size, Map map = Map::cast(isolate()->root(map_root_index)); HeapObject result = AllocateRawWithImmortalMap(size, allocation, map); Handle<Context> context(Context::cast(result), isolate()); - context->set_length(variadic_part_length); + context->initialize_length_and_extension_bit(variadic_part_length); DCHECK_EQ(context->SizeFromMap(map), size); if (size > Context::kTodoHeaderSize) { ObjectSlot start = context->RawField(Context::kTodoHeaderSize); @@ -1461,6 +1445,7 @@ Handle<NativeContext> Factory::NewNativeContext() { context->set_math_random_index(Smi::zero()); context->set_serialized_objects(*empty_fixed_array()); context->set_microtask_queue(nullptr); + context->set_osr_code_cache(*empty_weak_fixed_array()); return context; } @@ -1549,8 +1534,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous, Handle<ScopeInfo> scope_info, Handle<JSReceiver> extension, Handle<Context> wrapped, - Handle<StringSet> whitelist) { - STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1); + Handle<StringSet> blacklist) { + STATIC_ASSERT(Context::BLACK_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1); DCHECK(scope_info->IsDebugEvaluateScope()); Handle<HeapObject> ext = extension.is_null() ? Handle<HeapObject>::cast(the_hole_value()) @@ -1565,7 +1550,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous, c->set_native_context(previous->native_context()); c->set_extension(*ext); if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped); - if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist); + if (!blacklist.is_null()) c->set(Context::BLACK_LIST_INDEX, *blacklist); return c; } @@ -1648,20 +1633,16 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() { return info; } -Handle<Script> Factory::NewScript(Handle<String> source, - AllocationType allocation) { - return NewScriptWithId(source, isolate()->heap()->NextScriptId(), allocation); +Handle<Script> Factory::NewScript(Handle<String> source) { + return NewScriptWithId(source, isolate()->heap()->NextScriptId()); } -Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id, - AllocationType allocation) { - DCHECK(allocation == AllocationType::kOld || - allocation == AllocationType::kReadOnly); +Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id) { // Create and initialize script object. Heap* heap = isolate()->heap(); ReadOnlyRoots roots(heap); Handle<Script> script = - Handle<Script>::cast(NewStruct(SCRIPT_TYPE, allocation)); + Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld)); script->set_source(*source); script->set_name(roots.undefined_value()); script->set_id(script_id); @@ -1748,20 +1729,19 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask( return microtask; } -Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) { +Handle<Foreign> Factory::NewForeign(Address addr) { // Statically ensure that it is safe to allocate foreigns in paged spaces. STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize); Map map = *foreign_map(); - HeapObject result = - AllocateRawWithImmortalMap(map.instance_size(), allocation, map); + HeapObject result = AllocateRawWithImmortalMap(map.instance_size(), + AllocationType::kYoung, map); Handle<Foreign> foreign(Foreign::cast(result), isolate()); foreign->set_foreign_address(addr); return foreign; } Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) { - DCHECK_LE(0, length); - if (length > ByteArray::kMaxLength) { + if (length < 0 || length > ByteArray::kMaxLength) { isolate()->heap()->FatalProcessOutOfMemory("invalid array length"); } int size = ByteArray::SizeFor(length); @@ -1776,8 +1756,7 @@ Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) { Handle<BytecodeArray> Factory::NewBytecodeArray( int length, const byte* raw_bytecodes, int frame_size, int parameter_count, Handle<FixedArray> constant_pool) { - DCHECK_LE(0, length); - if (length > BytecodeArray::kMaxLength) { + if (length < 0 || length > BytecodeArray::kMaxLength) { isolate()->heap()->FatalProcessOutOfMemory("invalid array length"); } // Bytecode array is AllocationType::kOld, so constant pool array should be @@ -1806,7 +1785,6 @@ Handle<BytecodeArray> Factory::NewBytecodeArray( } Handle<Cell> Factory::NewCell(Handle<Object> value) { - AllowDeferredHandleDereference convert_to_cell; STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize); HeapObject result = AllocateRawWithImmortalMap( Cell::kSize, AllocationType::kOld, *cell_map()); @@ -1816,7 +1794,6 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) { } Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) { - AllowDeferredHandleDereference convert_to_cell; HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize, AllocationType::kOld, *no_closures_cell_map()); Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate()); @@ -1827,7 +1804,6 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) { } Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) { - AllowDeferredHandleDereference convert_to_cell; HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize, AllocationType::kOld, *one_closure_cell_map()); Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate()); @@ -1838,7 +1814,6 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) { } Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) { - AllowDeferredHandleDereference convert_to_cell; HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize, AllocationType::kOld, *many_closures_cell_map()); Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate()); @@ -1864,15 +1839,13 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name, } Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors, - int slack, - AllocationType allocation) { - DCHECK(Heap::IsRegularObjectAllocation(allocation)); + int slack) { int number_of_all_descriptors = number_of_descriptors + slack; // Zero-length case must be handled outside. DCHECK_LT(0, number_of_all_descriptors); int size = DescriptorArray::SizeFor(number_of_all_descriptors); - HeapObject obj = - isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation); + HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>( + size, AllocationType::kYoung); obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER); DescriptorArray array = DescriptorArray::cast(obj); array.Initialize(*empty_enum_cache(), *undefined_value(), @@ -1923,7 +1896,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size, !Map::CanHaveFastTransitionableElementsKind(type), IsDictionaryElementsKind(elements_kind) || IsTerminalElementsKind(elements_kind)); - HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail( + HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>( Map::kSize, AllocationType::kMap); result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER); return handle(InitializeMap(Map::cast(result), type, instance_size, @@ -1985,23 +1958,23 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite( // We can only clone regexps, normal objects, api objects, errors or arrays. // Copying anything else will break invariants. - CHECK(map->instance_type() == JS_REGEXP_TYPE || + CHECK(map->instance_type() == JS_REG_EXP_TYPE || map->instance_type() == JS_OBJECT_TYPE || map->instance_type() == JS_ERROR_TYPE || map->instance_type() == JS_ARRAY_TYPE || map->instance_type() == JS_API_OBJECT_TYPE || - map->instance_type() == WASM_GLOBAL_TYPE || - map->instance_type() == WASM_INSTANCE_TYPE || - map->instance_type() == WASM_MEMORY_TYPE || - map->instance_type() == WASM_MODULE_TYPE || - map->instance_type() == WASM_TABLE_TYPE || + map->instance_type() == WASM_GLOBAL_OBJECT_TYPE || + map->instance_type() == WASM_INSTANCE_OBJECT_TYPE || + map->instance_type() == WASM_MEMORY_OBJECT_TYPE || + map->instance_type() == WASM_MODULE_OBJECT_TYPE || + map->instance_type() == WASM_TABLE_OBJECT_TYPE || map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE); DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type())); int object_size = map->instance_size(); int adjusted_object_size = site.is_null() ? object_size : object_size + AllocationMemento::kSize; - HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail( + HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>( adjusted_object_size, AllocationType::kYoung); DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation); @@ -2062,6 +2035,13 @@ void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) { array->initialize_length(length); } +inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) { + auto count = obj->GetEmbedderFieldCount(); + for (int i = 0; i < count; i++) { + obj->SetEmbedderField(i, Smi::kZero); + } +} + } // namespace template <typename T> @@ -2107,15 +2087,14 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array, } Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array, - int grow_by, - AllocationType allocation) { - return CopyArrayAndGrow(array, grow_by, allocation); + int grow_by) { + return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung); } Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow( - Handle<WeakFixedArray> src, int grow_by, AllocationType allocation) { + Handle<WeakFixedArray> src, int grow_by) { DCHECK(!src->IsTransitionArray()); // Compacted by GC, this code doesn't work - return CopyArrayAndGrow(src, grow_by, allocation); + return CopyArrayAndGrow(src, grow_by, AllocationType::kOld); } Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow( @@ -2142,8 +2121,8 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow( } Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow( - Handle<PropertyArray> array, int grow_by, AllocationType allocation) { - return CopyArrayAndGrow(array, grow_by, allocation); + Handle<PropertyArray> array, int grow_by) { + return CopyArrayAndGrow(array, grow_by, AllocationType::kYoung); } Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array, @@ -2187,8 +2166,8 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( Handle<FixedDoubleArray> array) { int len = array->length(); if (len == 0) return array; - Handle<FixedDoubleArray> result = Handle<FixedDoubleArray>::cast( - NewFixedDoubleArray(len, AllocationType::kYoung)); + Handle<FixedDoubleArray> result = + Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len)); Heap::CopyBlock( result->address() + FixedDoubleArray::kLengthOffset, array->address() + FixedDoubleArray::kLengthOffset, @@ -2196,32 +2175,39 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( return result; } -Handle<Object> Factory::NewNumber(double value, AllocationType allocation) { +template <AllocationType allocation> +Handle<Object> Factory::NewNumber(double value) { // Materialize as a SMI if possible. int32_t int_value; if (DoubleToSmiInteger(value, &int_value)) { return handle(Smi::FromInt(int_value), isolate()); } - return NewHeapNumber(value, allocation); + return NewHeapNumber<allocation>(value); } -Handle<Object> Factory::NewNumberFromInt(int32_t value, - AllocationType allocation) { +template Handle<Object> V8_EXPORT_PRIVATE +Factory::NewNumber<AllocationType::kYoung>(double); +template Handle<Object> V8_EXPORT_PRIVATE +Factory::NewNumber<AllocationType::kOld>(double); +template Handle<Object> V8_EXPORT_PRIVATE +Factory::NewNumber<AllocationType::kReadOnly>(double); + +Handle<Object> Factory::NewNumberFromInt(int32_t value) { if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate()); // Bypass NewNumber to avoid various redundant checks. - return NewHeapNumber(FastI2D(value), allocation); + return NewHeapNumber(FastI2D(value)); } -Handle<Object> Factory::NewNumberFromUint(uint32_t value, - AllocationType allocation) { +Handle<Object> Factory::NewNumberFromUint(uint32_t value) { int32_t int32v = static_cast<int32_t>(value); if (int32v >= 0 && Smi::IsValid(int32v)) { return handle(Smi::FromInt(int32v), isolate()); } - return NewHeapNumber(FastUI2D(value), allocation); + return NewHeapNumber(FastUI2D(value)); } -Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) { +template <AllocationType allocation> +Handle<HeapNumber> Factory::NewHeapNumber() { STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize); Map map = *heap_number_map(); HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation, @@ -2229,10 +2215,17 @@ Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) { return handle(HeapNumber::cast(result), isolate()); } +template Handle<HeapNumber> V8_EXPORT_PRIVATE +Factory::NewHeapNumber<AllocationType::kYoung>(); +template Handle<HeapNumber> V8_EXPORT_PRIVATE +Factory::NewHeapNumber<AllocationType::kOld>(); +template Handle<HeapNumber> V8_EXPORT_PRIVATE +Factory::NewHeapNumber<AllocationType::kReadOnly>(); + Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) { - return NewHeapNumber(value, isolate()->heap()->CanAllocateInReadOnlySpace() - ? AllocationType::kReadOnly - : AllocationType::kOld); + return isolate()->heap()->CanAllocateInReadOnlySpace() + ? NewHeapNumber<AllocationType::kReadOnly>(value) + : NewHeapNumber<AllocationType::kOld>(value); } Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length, @@ -2299,8 +2292,8 @@ Handle<Object> Factory::NewInvalidStringLengthError() { FATAL("Aborting on invalid string length"); } // Invalidate the "string length" protector. - if (isolate()->IsStringLengthOverflowIntact()) { - isolate()->InvalidateStringLengthOverflowProtector(); + if (Protectors::IsStringLengthOverflowLookupChainIntact(isolate())) { + Protectors::InvalidateStringLengthOverflowLookupChain(isolate()); } return NewRangeError(MessageTemplate::kInvalidStringLength); } @@ -2412,7 +2405,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) { case JS_ARRAY_TYPE: elements_kind = PACKED_SMI_ELEMENTS; break; - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: elements_kind = PACKED_ELEMENTS; break; default: @@ -2679,8 +2672,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) { { int obj_size = code->Size(); CodePageCollectionMemoryModificationScope code_allocation(heap); - HeapObject result = - heap->AllocateRawWithRetryOrFail(obj_size, AllocationType::kCode); + HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>( + obj_size, AllocationType::kCode); // Copy code object. Address old_addr = code->address(); @@ -2696,7 +2689,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) { // allocation is on. heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code); // Record all references to embedded objects in the new code object. +#ifndef V8_DISABLE_WRITE_BARRIERS WriteBarrierForCode(*new_code); +#endif } #ifdef VERIFY_HEAP @@ -2737,9 +2732,8 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor, return NewJSObjectFromMap(map, allocation); } -Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) { - Handle<JSObject> result = - NewJSObject(isolate()->object_function(), allocation); +Handle<JSObject> Factory::NewJSObjectWithNullProto() { + Handle<JSObject> result = NewJSObject(isolate()->object_function()); Handle<Map> new_map = Map::Copy( isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto"); Map::SetPrototype(isolate(), new_map, null_value()); @@ -2776,7 +2770,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject( // The global object might be created from an object template with accessors. // Fill these accessors into the dictionary. Handle<DescriptorArray> descs(map->instance_descriptors(), isolate()); - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : InternalIndex::Range(map->NumberOfOwnDescriptors())) { PropertyDetails details = descs->GetDetails(i); // Only accessors are expected. DCHECK_EQ(kAccessor, details.kind()); @@ -2888,13 +2882,14 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap( Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements( Handle<HeapObject> prototype, Handle<NameDictionary> properties, - Handle<FixedArrayBase> elements, AllocationType allocation) { + Handle<FixedArrayBase> elements) { Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map(); if (object_map->prototype() != *prototype) { object_map = Map::TransitionToPrototype(isolate(), object_map, prototype); } DCHECK(object_map->is_dictionary_map()); - Handle<JSObject> object = NewJSObjectFromMap(object_map, allocation); + Handle<JSObject> object = + NewJSObjectFromMap(object_map, AllocationType::kYoung); object->set_raw_properties_or_hash(*properties); if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) { DCHECK(elements->IsNumberDictionary()); @@ -3010,7 +3005,7 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() { Handle<JSModuleNamespace> module_namespace( Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map))); FieldIndex index = FieldIndex::ForDescriptor( - *map, JSModuleNamespace::kToStringTagFieldIndex); + *map, InternalIndex(JSModuleNamespace::kToStringTagFieldIndex)); module_namespace->FastPropertyAtPut(index, ReadOnlyRoots(isolate()).Module_string()); return module_namespace; @@ -3042,6 +3037,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule( Handle<FixedArray> requested_modules = requested_modules_length > 0 ? NewFixedArray(requested_modules_length) : empty_fixed_array(); + Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0); ReadOnlyRoots roots(isolate()); Handle<SourceTextModule> module( @@ -3061,6 +3057,12 @@ Handle<SourceTextModule> Factory::NewSourceTextModule( module->set_import_meta(roots.the_hole_value()); module->set_dfs_index(-1); module->set_dfs_ancestor_index(-1); + module->set_top_level_capability(roots.undefined_value()); + module->set_flags(0); + module->set_async(IsAsyncModule(code->kind())); + module->set_async_evaluating(false); + module->set_async_parent_modules(*async_parent_modules); + module->set_pending_async_dependencies(0); return module; } @@ -3086,15 +3088,43 @@ Handle<SyntheticModule> Factory::NewSyntheticModule( return module; } -Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared, - AllocationType allocation) { - Handle<JSFunction> array_buffer_fun( - shared == SharedFlag::kShared - ? isolate()->native_context()->shared_array_buffer_fun() - : isolate()->native_context()->array_buffer_fun(), +Handle<JSArrayBuffer> Factory::NewJSArrayBuffer( + std::shared_ptr<BackingStore> backing_store, AllocationType allocation) { + Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(), + isolate()); + auto result = + Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation)); + result->Setup(SharedFlag::kNotShared, std::move(backing_store)); + return result; +} + +MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore( + size_t byte_length, InitializedFlag initialized, + AllocationType allocation) { + std::unique_ptr<BackingStore> backing_store = nullptr; + + if (byte_length > 0) { + backing_store = BackingStore::Allocate(isolate(), byte_length, + SharedFlag::kNotShared, initialized); + if (!backing_store) return MaybeHandle<JSArrayBuffer>(); + } + Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(), + isolate()); + auto array_buffer = + Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation)); + array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store)); + return array_buffer; +} + +Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer( + std::shared_ptr<BackingStore> backing_store) { + Handle<Map> map( + isolate()->native_context()->shared_array_buffer_fun().initial_map(), isolate()); - Handle<Map> map(array_buffer_fun->initial_map(), isolate()); - return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation)); + auto result = Handle<JSArrayBuffer>::cast( + NewJSObjectFromMap(map, AllocationType::kYoung)); + result->Setup(SharedFlag::kShared, std::move(backing_store)); + return result; } Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value, @@ -3172,20 +3202,17 @@ void ForFixedTypedArray(ExternalArrayType array_type, size_t* element_size, Handle<JSArrayBufferView> Factory::NewJSArrayBufferView( Handle<Map> map, Handle<FixedArrayBase> elements, - Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length, - AllocationType allocation) { + Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length) { CHECK_LE(byte_length, buffer->byte_length()); CHECK_LE(byte_offset, buffer->byte_length()); CHECK_LE(byte_offset + byte_length, buffer->byte_length()); - Handle<JSArrayBufferView> array_buffer_view = - Handle<JSArrayBufferView>::cast(NewJSObjectFromMap(map, allocation)); + Handle<JSArrayBufferView> array_buffer_view = Handle<JSArrayBufferView>::cast( + NewJSObjectFromMap(map, AllocationType::kYoung)); array_buffer_view->set_elements(*elements); array_buffer_view->set_buffer(*buffer); array_buffer_view->set_byte_offset(byte_offset); array_buffer_view->set_byte_length(byte_length); - for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) { - array_buffer_view->SetEmbedderField(i, Smi::kZero); - } + ZeroEmbedderFields(array_buffer_view); DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(), v8::ArrayBufferView::kEmbedderFieldCount); return array_buffer_view; @@ -3193,8 +3220,8 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView( Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type, Handle<JSArrayBuffer> buffer, - size_t byte_offset, size_t length, - AllocationType allocation) { + size_t byte_offset, + size_t length) { size_t element_size; ElementsKind elements_kind; ForFixedTypedArray(type, &element_size, &elements_kind); @@ -3219,24 +3246,21 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type, default: UNREACHABLE(); } - Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast( - NewJSArrayBufferView(map, empty_byte_array(), buffer, byte_offset, - byte_length, allocation)); + Handle<JSTypedArray> typed_array = + Handle<JSTypedArray>::cast(NewJSArrayBufferView( + map, empty_byte_array(), buffer, byte_offset, byte_length)); typed_array->set_length(length); - typed_array->set_external_pointer( - reinterpret_cast<byte*>(buffer->backing_store()) + byte_offset); - typed_array->set_base_pointer(Smi::kZero); + typed_array->SetOffHeapDataPtr(buffer->backing_store(), byte_offset); return typed_array; } Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer, size_t byte_offset, - size_t byte_length, - AllocationType allocation) { + size_t byte_length) { Handle<Map> map(isolate()->native_context()->data_view_fun().initial_map(), isolate()); Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView( - map, empty_fixed_array(), buffer, byte_offset, byte_length, allocation)); + map, empty_fixed_array(), buffer, byte_offset, byte_length)); obj->set_data_pointer(static_cast<uint8_t*>(buffer->backing_store()) + byte_offset); return obj; @@ -3499,11 +3523,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( share->clear_padding(); } - // Link into the list. - Handle<WeakArrayList> noscript_list = noscript_shared_function_infos(); - noscript_list = WeakArrayList::AddToEnd(isolate(), noscript_list, - MaybeObjectHandle::Weak(share)); - isolate()->heap()->set_noscript_shared_function_infos(*noscript_list); #ifdef VERIFY_HEAP share->SharedFunctionInfoVerify(isolate()); @@ -3894,6 +3913,9 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp, JSRegExp::Flags flags, int capture_count) { Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize); Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue); + Smi ticks_until_tier_up = FLAG_regexp_tier_up + ? Smi::FromInt(FLAG_regexp_tier_up_ticks) + : uninitialized; store->set(JSRegExp::kTagIndex, Smi::FromInt(type)); store->set(JSRegExp::kSourceIndex, *source); store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags)); @@ -3904,7 +3926,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp, store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero); store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count)); store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized); - store->set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero); + store->set(JSRegExp::kIrregexpTicksUntilTierUpIndex, ticks_until_tier_up); regexp->set_data(*store); } @@ -4025,6 +4047,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap( map->AppendDescriptor(isolate(), &d); } DCHECK_EQ(inobject_properties_count, field_index); + DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors()); LOG(isolate(), MapDetails(*map)); return map; } @@ -4035,10 +4058,15 @@ Handle<Map> Factory::CreateStrictFunctionMap( int header_size = has_prototype ? JSFunction::kSizeWithPrototype : JSFunction::kSizeWithoutPrototype; int inobject_properties_count = 0; - if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count; + // length and prototype accessors or just length accessor. + int descriptors_count = IsFunctionModeWithPrototype(function_mode) ? 2 : 1; + if (IsFunctionModeWithName(function_mode)) { + ++inobject_properties_count; // name property. + } else { + ++descriptors_count; // name accessor. + } if (IsFunctionModeWithHomeObject(function_mode)) ++inobject_properties_count; - int descriptors_count = (IsFunctionModeWithPrototype(function_mode) ? 3 : 2) + - inobject_properties_count; + descriptors_count += inobject_properties_count; Handle<Map> map = NewMap( JS_FUNCTION_TYPE, header_size + inobject_properties_count * kTaggedSize, @@ -4102,6 +4130,7 @@ Handle<Map> Factory::CreateStrictFunctionMap( map->AppendDescriptor(isolate(), &d); } DCHECK_EQ(inobject_properties_count, field_index); + DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors()); LOG(isolate(), MapDetails(*map)); return map; } @@ -4141,19 +4170,18 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) { return map; } -Handle<JSPromise> Factory::NewJSPromiseWithoutHook(AllocationType allocation) { - Handle<JSPromise> promise = Handle<JSPromise>::cast( - NewJSObject(isolate()->promise_function(), allocation)); +Handle<JSPromise> Factory::NewJSPromiseWithoutHook() { + Handle<JSPromise> promise = + Handle<JSPromise>::cast(NewJSObject(isolate()->promise_function())); promise->set_reactions_or_result(Smi::kZero); promise->set_flags(0); - for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) { - promise->SetEmbedderField(i, Smi::kZero); - } + ZeroEmbedderFields(promise); + DCHECK_EQ(promise->GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount); return promise; } -Handle<JSPromise> Factory::NewJSPromise(AllocationType allocation) { - Handle<JSPromise> promise = NewJSPromiseWithoutHook(allocation); +Handle<JSPromise> Factory::NewJSPromise() { + Handle<JSPromise> promise = NewJSPromiseWithoutHook(); isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value()); return promise; } diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h index 1e47926e8e4..35de6425c9a 100644 --- a/chromium/v8/src/heap/factory.h +++ b/chromium/v8/src/heap/factory.h @@ -74,7 +74,8 @@ class WeakCell; struct SourceRange; template <typename T> class ZoneVector; -enum class SharedFlag : uint32_t; +enum class SharedFlag : uint8_t; +enum class InitializedFlag : uint8_t; enum FunctionMode { kWithNameBit = 1 << 0, @@ -107,14 +108,12 @@ enum FunctionMode { // Interface for handle based allocation. class V8_EXPORT_PRIVATE Factory { public: - Handle<Oddball> NewOddball( - Handle<Map> map, const char* to_string, Handle<Object> to_number, - const char* type_of, byte kind, - AllocationType allocation = AllocationType::kReadOnly); + Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string, + Handle<Object> to_number, const char* type_of, + byte kind); // Marks self references within code generation. - Handle<Oddball> NewSelfReferenceMarker( - AllocationType allocation = AllocationType::kOld); + Handle<Oddball> NewSelfReferenceMarker(); // Allocates a fixed array-like object with given map and initialized with // undefined values. @@ -140,8 +139,7 @@ class V8_EXPORT_PRIVATE Factory { int length, AllocationType allocation = AllocationType::kYoung); // Allocates a property array initialized with undefined values. - Handle<PropertyArray> NewPropertyArray( - int length, AllocationType allocation = AllocationType::kYoung); + Handle<PropertyArray> NewPropertyArray(int length); // Tries allocating a fixed array initialized with undefined values. // In case of an allocation failure (OOM) an empty handle is returned. // The caller has to manually signal an @@ -156,24 +154,20 @@ class V8_EXPORT_PRIVATE Factory { int length, AllocationType allocation = AllocationType::kYoung); // Allocates an uninitialized fixed array. It must be filled by the caller. - Handle<FixedArray> NewUninitializedFixedArray( - int length, AllocationType allocation = AllocationType::kYoung); + Handle<FixedArray> NewUninitializedFixedArray(int length); // Allocates a closure feedback cell array whose feedback cells are // initialized with undefined values. - Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray( - int num_slots, AllocationType allocation = AllocationType::kYoung); + Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(int num_slots); // Allocates a feedback vector whose slots are initialized with undefined // values. Handle<FeedbackVector> NewFeedbackVector( Handle<SharedFunctionInfo> shared, - Handle<ClosureFeedbackCellArray> closure_feedback_cell_array, - AllocationType allocation = AllocationType::kYoung); + Handle<ClosureFeedbackCellArray> closure_feedback_cell_array); // Allocates a clean embedder data array with given capacity. - Handle<EmbedderDataArray> NewEmbedderDataArray( - int length, AllocationType allocation = AllocationType::kYoung); + Handle<EmbedderDataArray> NewEmbedderDataArray(int length); // Allocates a fixed array for name-value pairs of boilerplate properties and // calculates the number of properties we need to store in the backing store. @@ -183,20 +177,17 @@ class V8_EXPORT_PRIVATE Factory { // Allocate a new uninitialized fixed double array. // The function returns a pre-allocated empty fixed array for length = 0, // so the return type must be the general fixed array class. - Handle<FixedArrayBase> NewFixedDoubleArray( - int length, AllocationType allocation = AllocationType::kYoung); + Handle<FixedArrayBase> NewFixedDoubleArray(int length); // Allocate a new fixed double array with hole values. - Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles( - int size, AllocationType allocation = AllocationType::kYoung); + Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(int size); // Allocates a FeedbackMedata object and zeroes the data section. Handle<FeedbackMetadata> NewFeedbackMetadata( int slot_count, int feedback_cell_count, AllocationType allocation = AllocationType::kOld); - Handle<FrameArray> NewFrameArray( - int number_of_frames, AllocationType allocation = AllocationType::kYoung); + Handle<FrameArray> NewFrameArray(int number_of_frames); Handle<OrderedHashSet> NewOrderedHashSet(); Handle<OrderedHashMap> NewOrderedHashMap(); @@ -223,10 +214,6 @@ class V8_EXPORT_PRIVATE Factory { Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2, AllocationType allocation); - // Create a new Tuple3 struct. - Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2, - Handle<Object> value3, AllocationType allocation); - // Create a new ArrayBoilerplateDescription struct. Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription( ElementsKind elements_kind, Handle<FixedArrayBase> constant_values); @@ -451,11 +438,8 @@ class V8_EXPORT_PRIVATE Factory { Handle<AccessorInfo> NewAccessorInfo(); - Handle<Script> NewScript(Handle<String> source, - AllocationType allocation = AllocationType::kOld); - Handle<Script> NewScriptWithId( - Handle<String> source, int script_id, - AllocationType allocation = AllocationType::kOld); + Handle<Script> NewScript(Handle<String> source); + Handle<Script> NewScriptWithId(Handle<String> source, int script_id); Handle<Script> CloneScript(Handle<Script> script); Handle<BreakPointInfo> NewBreakPointInfo(int source_position); @@ -479,8 +463,7 @@ class V8_EXPORT_PRIVATE Factory { Handle<JSReceiver> thenable, Handle<Context> context); // Foreign objects are pretenured when allocated by the bootstrapper. - Handle<Foreign> NewForeign( - Address addr, AllocationType allocation = AllocationType::kYoung); + Handle<Foreign> NewForeign(Address addr); Handle<ByteArray> NewByteArray( int length, AllocationType allocation = AllocationType::kYoung); @@ -498,9 +481,8 @@ class V8_EXPORT_PRIVATE Factory { Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value); Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value); - Handle<DescriptorArray> NewDescriptorArray( - int number_of_entries, int slack = 0, - AllocationType allocation = AllocationType::kYoung); + Handle<DescriptorArray> NewDescriptorArray(int number_of_entries, + int slack = 0); Handle<TransitionArray> NewTransitionArray(int number_of_transitions, int slack = 0); @@ -537,21 +519,18 @@ class V8_EXPORT_PRIVATE Factory { Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array, Handle<Map> map); - Handle<FixedArray> CopyFixedArrayAndGrow( - Handle<FixedArray> array, int grow_by, - AllocationType allocation = AllocationType::kYoung); + Handle<FixedArray> CopyFixedArrayAndGrow(Handle<FixedArray> array, + int grow_by); - Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow( - Handle<WeakFixedArray> array, int grow_by, - AllocationType allocation = AllocationType::kYoung); + Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(Handle<WeakFixedArray> array, + int grow_by); Handle<WeakArrayList> CopyWeakArrayListAndGrow( Handle<WeakArrayList> array, int grow_by, AllocationType allocation = AllocationType::kYoung); - Handle<PropertyArray> CopyPropertyArrayAndGrow( - Handle<PropertyArray> array, int grow_by, - AllocationType allocation = AllocationType::kYoung); + Handle<PropertyArray> CopyPropertyArrayAndGrow(Handle<PropertyArray> array, + int grow_by); Handle<FixedArray> CopyFixedArrayUpTo( Handle<FixedArray> array, int new_len, @@ -567,32 +546,28 @@ class V8_EXPORT_PRIVATE Factory { // Numbers (e.g. literals) are pretenured by the parser. // The return value may be a smi or a heap number. - Handle<Object> NewNumber(double value, - AllocationType allocation = AllocationType::kYoung); - - Handle<Object> NewNumberFromInt( - int32_t value, AllocationType allocation = AllocationType::kYoung); - Handle<Object> NewNumberFromUint( - uint32_t value, AllocationType allocation = AllocationType::kYoung); - inline Handle<Object> NewNumberFromSize( - size_t value, AllocationType allocation = AllocationType::kYoung); - inline Handle<Object> NewNumberFromInt64( - int64_t value, AllocationType allocation = AllocationType::kYoung); - inline Handle<HeapNumber> NewHeapNumber( - double value, AllocationType allocation = AllocationType::kYoung); - inline Handle<HeapNumber> NewHeapNumberFromBits( - uint64_t bits, AllocationType allocation = AllocationType::kYoung); + template <AllocationType allocation = AllocationType::kYoung> + EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) + Handle<Object> NewNumber(double value); + Handle<Object> NewNumberFromInt(int32_t value); + Handle<Object> NewNumberFromUint(uint32_t value); + inline Handle<Object> NewNumberFromSize(size_t value); + inline Handle<Object> NewNumberFromInt64(int64_t value); + template <AllocationType allocation = AllocationType::kYoung> + inline Handle<HeapNumber> NewHeapNumber(double value); + template <AllocationType allocation = AllocationType::kYoung> + inline Handle<HeapNumber> NewHeapNumberFromBits(uint64_t bits); // Creates heap number object with not yet set value field. - Handle<HeapNumber> NewHeapNumber( - AllocationType allocation = AllocationType::kYoung); + template <AllocationType allocation = AllocationType::kYoung> + EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) + Handle<HeapNumber> NewHeapNumber(); // Creates a new HeapNumber in read-only space if possible otherwise old // space. Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value); - inline Handle<HeapNumber> NewHeapNumberWithHoleNaN( - AllocationType allocation = AllocationType::kYoung); + inline Handle<HeapNumber> NewHeapNumberWithHoleNaN(); // Allocates a new BigInt with {length} digits. Only to be used by // MutableBigInt::New*. @@ -609,8 +584,7 @@ class V8_EXPORT_PRIVATE Factory { Handle<JSFunction> constructor, AllocationType allocation = AllocationType::kYoung); // JSObject without a prototype. - Handle<JSObject> NewJSObjectWithNullProto( - AllocationType allocation = AllocationType::kYoung); + Handle<JSObject> NewJSObjectWithNullProto(); // Global objects are pretenured and initialized based on a constructor. Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor); @@ -644,8 +618,7 @@ class V8_EXPORT_PRIVATE Factory { // object will have dictionary elements. Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements( Handle<HeapObject> prototype, Handle<NameDictionary> properties, - Handle<FixedArrayBase> elements, - AllocationType allocation = AllocationType::kYoung); + Handle<FixedArrayBase> elements); // JS arrays are pretenured when allocated by the parser. @@ -692,20 +665,27 @@ class V8_EXPORT_PRIVATE Factory { v8::Module::SyntheticModuleEvaluationSteps evaluation_steps); Handle<JSArrayBuffer> NewJSArrayBuffer( - SharedFlag shared, AllocationType allocation = AllocationType::kYoung); + std::shared_ptr<BackingStore> backing_store, + AllocationType allocation = AllocationType::kYoung); + + MaybeHandle<JSArrayBuffer> NewJSArrayBufferAndBackingStore( + size_t byte_length, InitializedFlag initialized, + AllocationType allocation = AllocationType::kYoung); + + Handle<JSArrayBuffer> NewJSSharedArrayBuffer( + std::shared_ptr<BackingStore> backing_store); static void TypeAndSizeForElementsKind(ElementsKind kind, ExternalArrayType* array_type, size_t* element_size); // Creates a new JSTypedArray with the specified buffer. - Handle<JSTypedArray> NewJSTypedArray( - ExternalArrayType type, Handle<JSArrayBuffer> buffer, size_t byte_offset, - size_t length, AllocationType allocation = AllocationType::kYoung); + Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type, + Handle<JSArrayBuffer> buffer, + size_t byte_offset, size_t length); - Handle<JSDataView> NewJSDataView( - Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length, - AllocationType allocation = AllocationType::kYoung); + Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer, + size_t byte_offset, size_t byte_length); Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done); Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator( @@ -922,10 +902,8 @@ class V8_EXPORT_PRIVATE Factory { // Converts the given ToPrimitive hint to it's string representation. Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint); - Handle<JSPromise> NewJSPromiseWithoutHook( - AllocationType allocation = AllocationType::kYoung); - Handle<JSPromise> NewJSPromise( - AllocationType allocation = AllocationType::kYoung); + Handle<JSPromise> NewJSPromiseWithoutHook(); + Handle<JSPromise> NewJSPromise(); Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false); @@ -1034,8 +1012,7 @@ class V8_EXPORT_PRIVATE Factory { Handle<JSArrayBufferView> NewJSArrayBufferView( Handle<Map> map, Handle<FixedArrayBase> elements, - Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length, - AllocationType allocation); + Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length); // Allocate memory for an uninitialized array (e.g., a FixedArray or similar). HeapObject AllocateRawArray(int size, AllocationType allocation); diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h index da803f33395..56f3590b8aa 100644 --- a/chromium/v8/src/heap/heap-inl.h +++ b/chromium/v8/src/heap/heap-inl.h @@ -111,10 +111,6 @@ void Heap::SetRootStringTable(StringTable value) { roots_table()[RootIndex::kStringTable] = value.ptr(); } -void Heap::SetRootNoScriptSharedFunctionInfos(Object value) { - roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value.ptr(); -} - void Heap::SetMessageListeners(TemplateList value) { roots_table()[RootIndex::kMessageListeners] = value.ptr(); } @@ -163,7 +159,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, AllocationAlignment alignment) { DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed()); - DCHECK(gc_state_ == NOT_IN_GC); + DCHECK_EQ(gc_state_, NOT_IN_GC); #ifdef V8_ENABLE_ALLOCATION_TIMEOUT if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) { if (!always_allocate() && Heap::allocation_timeout_-- <= 0) { @@ -180,8 +176,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, HeapObject object; AllocationResult allocation; - if (FLAG_single_generation && type == AllocationType::kYoung) + if (FLAG_single_generation && type == AllocationType::kYoung) { type = AllocationType::kOld; + } if (AllocationType::kYoung == type) { if (large_object) { @@ -212,9 +209,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, } else if (AllocationType::kMap == type) { allocation = map_space_->AllocateRawUnaligned(size_in_bytes); } else if (AllocationType::kReadOnly == type) { -#ifdef V8_USE_SNAPSHOT DCHECK(isolate_->serializer_enabled()); -#endif DCHECK(!large_object); DCHECK(CanAllocateInReadOnlySpace()); DCHECK_EQ(AllocationOrigin::kRuntime, origin); @@ -242,6 +237,40 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, return allocation; } +template <Heap::AllocationRetryMode mode> +HeapObject Heap::AllocateRawWith(int size, AllocationType allocation, + AllocationOrigin origin, + AllocationAlignment alignment) { + DCHECK(AllowHandleAllocation::IsAllowed()); + DCHECK(AllowHeapAllocation::IsAllowed()); + DCHECK_EQ(gc_state_, NOT_IN_GC); + Heap* heap = isolate()->heap(); + Address* top = heap->NewSpaceAllocationTopAddress(); + Address* limit = heap->NewSpaceAllocationLimitAddress(); + if (allocation == AllocationType::kYoung && + alignment == AllocationAlignment::kWordAligned && + size <= kMaxRegularHeapObjectSize && + (*limit - *top >= static_cast<unsigned>(size)) && + V8_LIKELY(!FLAG_single_generation && FLAG_inline_new && + FLAG_gc_interval == 0)) { + DCHECK(IsAligned(size, kTaggedSize)); + HeapObject obj = HeapObject::FromAddress(*top); + *top += size; + heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo); + MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size); + return obj; + } + switch (mode) { + case kLightRetry: + return AllocateRawWithLightRetrySlowPath(size, allocation, origin, + alignment); + case kRetryOrFail: + return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin, + alignment); + } + UNREACHABLE(); +} + void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) { for (auto& tracker : allocation_trackers_) { tracker->AllocationEvent(object.address(), size_in_bytes); diff --git a/chromium/v8/src/heap/heap-write-barrier-inl.h b/chromium/v8/src/heap/heap-write-barrier-inl.h index 5687284b1e8..a0d9902006f 100644 --- a/chromium/v8/src/heap/heap-write-barrier-inl.h +++ b/chromium/v8/src/heap/heap-write-barrier-inl.h @@ -212,6 +212,7 @@ inline void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host, inline WriteBarrierMode GetWriteBarrierModeForObject( HeapObject object, const DisallowHeapAllocation* promise) { + if (FLAG_disable_write_barriers) return SKIP_WRITE_BARRIER; DCHECK(Heap_PageFlagsAreConsistent(object)); heap_internals::MemoryChunk* chunk = heap_internals::MemoryChunk::FromHeapObject(object); @@ -221,6 +222,9 @@ inline WriteBarrierMode GetWriteBarrierModeForObject( } inline bool ObjectInYoungGeneration(Object object) { + // TODO(rong): Fix caller of this function when we deploy + // v8_use_third_party_heap. + if (FLAG_single_generation) return false; if (object.IsSmi()) return false; return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object)) ->InYoungGeneration(); diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc index ff3b34cfb4f..a80186504d7 100644 --- a/chromium/v8/src/heap/heap.cc +++ b/chromium/v8/src/heap/heap.cc @@ -39,6 +39,7 @@ #include "src/heap/incremental-marking.h" #include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact.h" +#include "src/heap/memory-measurement.h" #include "src/heap/memory-reducer.h" #include "src/heap/object-stats.h" #include "src/heap/objects-visiting-inl.h" @@ -47,7 +48,6 @@ #include "src/heap/remembered-set.h" #include "src/heap/scavenge-job.h" #include "src/heap/scavenger-inl.h" -#include "src/heap/store-buffer.h" #include "src/heap/stress-marking-observer.h" #include "src/heap/stress-scavenge-observer.h" #include "src/heap/sweeper.h" @@ -913,23 +913,6 @@ void Heap::RemoveAllocationObserversFromAllSpaces( } } -class Heap::SkipStoreBufferScope { - public: - explicit SkipStoreBufferScope(StoreBuffer* store_buffer) - : store_buffer_(store_buffer) { - store_buffer_->MoveAllEntriesToRememberedSet(); - store_buffer_->SetMode(StoreBuffer::IN_GC); - } - - ~SkipStoreBufferScope() { - DCHECK(store_buffer_->Empty()); - store_buffer_->SetMode(StoreBuffer::NOT_IN_GC); - } - - private: - StoreBuffer* store_buffer_; -}; - namespace { inline bool MakePretenureDecision( AllocationSite site, AllocationSite::PretenureDecision current_decision, @@ -1965,44 +1948,40 @@ bool Heap::PerformGarbageCollection( size_t start_young_generation_size = Heap::new_space()->Size() + new_lo_space()->SizeOfObjects(); - { - Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get()); - - switch (collector) { - case MARK_COMPACTOR: - UpdateOldGenerationAllocationCounter(); - // Perform mark-sweep with optional compaction. - MarkCompact(); - old_generation_size_configured_ = true; - // This should be updated before PostGarbageCollectionProcessing, which - // can cause another GC. Take into account the objects promoted during - // GC. - old_generation_allocation_counter_at_last_gc_ += - static_cast<size_t>(promoted_objects_size_); - old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects(); - break; - case MINOR_MARK_COMPACTOR: - MinorMarkCompact(); - break; - case SCAVENGER: - if ((fast_promotion_mode_ && - CanExpandOldGeneration(new_space()->Size() + - new_lo_space()->Size()))) { - tracer()->NotifyYoungGenerationHandling( - YoungGenerationHandling::kFastPromotionDuringScavenge); - EvacuateYoungGeneration(); - } else { - tracer()->NotifyYoungGenerationHandling( - YoungGenerationHandling::kRegularScavenge); - - Scavenge(); - } - break; - } + switch (collector) { + case MARK_COMPACTOR: + UpdateOldGenerationAllocationCounter(); + // Perform mark-sweep with optional compaction. + MarkCompact(); + old_generation_size_configured_ = true; + // This should be updated before PostGarbageCollectionProcessing, which + // can cause another GC. Take into account the objects promoted during + // GC. + old_generation_allocation_counter_at_last_gc_ += + static_cast<size_t>(promoted_objects_size_); + old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects(); + break; + case MINOR_MARK_COMPACTOR: + MinorMarkCompact(); + break; + case SCAVENGER: + if ((fast_promotion_mode_ && + CanExpandOldGeneration(new_space()->Size() + + new_lo_space()->Size()))) { + tracer()->NotifyYoungGenerationHandling( + YoungGenerationHandling::kFastPromotionDuringScavenge); + EvacuateYoungGeneration(); + } else { + tracer()->NotifyYoungGenerationHandling( + YoungGenerationHandling::kRegularScavenge); - ProcessPretenuringFeedback(); + Scavenge(); + } + break; } + ProcessPretenuringFeedback(); + UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size)); ConfigureInitialOldGenerationSize(); @@ -2780,12 +2759,34 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size, return object; } -void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) { - ArrayBufferTracker::RegisterNew(this, buffer); +void* Heap::AllocateExternalBackingStore( + const std::function<void*(size_t)>& allocate, size_t byte_length) { + // TODO(ulan): Perform GCs proactively based on the byte_length and + // the current external backing store counters. + void* result = allocate(byte_length); + if (result) return result; + for (int i = 0; i < 2; i++) { + CollectGarbage(OLD_SPACE, GarbageCollectionReason::kExternalMemoryPressure); + result = allocate(byte_length); + if (result) return result; + } + isolate()->counters()->gc_last_resort_from_handles()->Increment(); + CollectAllAvailableGarbage(GarbageCollectionReason::kExternalMemoryPressure); + return allocate(byte_length); +} + +void Heap::RegisterBackingStore(JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store) { + ArrayBufferTracker::RegisterNew(this, buffer, std::move(backing_store)); } -void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) { - ArrayBufferTracker::Unregister(this, buffer); +std::shared_ptr<BackingStore> Heap::UnregisterBackingStore( + JSArrayBuffer buffer) { + return ArrayBufferTracker::Unregister(this, buffer); +} + +std::shared_ptr<BackingStore> Heap::LookupBackingStore(JSArrayBuffer buffer) { + return ArrayBufferTracker::Lookup(this, buffer); } void Heap::ConfigureInitialOldGenerationSize() { @@ -3387,16 +3388,23 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( } } -void Heap::NotifyObjectLayoutChange(HeapObject object, int size, - const DisallowHeapAllocation&) { +void Heap::NotifyObjectLayoutChange( + HeapObject object, const DisallowHeapAllocation&, + InvalidateRecordedSlots invalidate_recorded_slots) { if (incremental_marking()->IsMarking()) { incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object); if (incremental_marking()->IsCompacting() && + invalidate_recorded_slots == InvalidateRecordedSlots::kYes && MayContainRecordedSlots(object)) { MemoryChunk::FromHeapObject(object) - ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size); + ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); } } + if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes && + MayContainRecordedSlots(object)) { + MemoryChunk::FromHeapObject(object) + ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); + } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { DCHECK(pending_layout_change_object_.is_null()); @@ -3684,8 +3692,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level, isolate()->stack_guard()->RequestGC(); auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner( reinterpret_cast<v8::Isolate*>(isolate())); - taskrunner->PostTask( - base::make_unique<MemoryPressureInterruptTask>(this)); + taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this)); } } } @@ -3748,6 +3755,11 @@ bool Heap::InvokeNearHeapLimitCallback() { return false; } +Handle<JSPromise> Heap::MeasureMemory(Handle<NativeContext> context, + v8::MeasureMemoryMode mode) { + return memory_measurement_->EnqueueRequest(context, mode); +} + void Heap::CollectCodeStatistics() { TRACE_EVENT0("v8", "Heap::CollectCodeStatistics"); CodeStatistics::ResetCodeAndMetadataStatistics(isolate()); @@ -4096,7 +4108,19 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end, } return KEEP_SLOT; }, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); + if (direction == OLD_TO_NEW) { + CHECK(chunk->SweepingDone()); + RememberedSetSweeping::Iterate( + chunk, + [start, end, untyped](MaybeObjectSlot slot) { + if (start <= slot.address() && slot.address() < end) { + untyped->insert(slot.address()); + } + return KEEP_SLOT; + }, + SlotSet::FREE_EMPTY_BUCKETS); + } RememberedSet<direction>::IterateTyped( chunk, [=](SlotType type, Address slot) { if (start <= slot && slot < end) { @@ -4117,7 +4141,6 @@ void Heap::VerifyRememberedSetFor(HeapObject object) { std::set<Address> old_to_new; std::set<std::pair<SlotType, Address> > typed_old_to_new; if (!InYoungGeneration(object)) { - store_buffer()->MoveAllEntriesToRememberedSet(); CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new); OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new, &this->ephemeron_remembered_set_); @@ -4288,6 +4311,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) { FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this); isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor); isolate_->handle_scope_implementer()->Iterate(v); + isolate_->IterateDeferredHandles(&left_trim_visitor); isolate_->IterateDeferredHandles(v); v->Synchronize(VisitorSynchronization::kHandleScope); @@ -4879,9 +4903,9 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) { return heap_object; } -HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation, - AllocationOrigin origin, - AllocationAlignment alignment) { +HeapObject Heap::AllocateRawWithLightRetrySlowPath( + int size, AllocationType allocation, AllocationOrigin origin, + AllocationAlignment alignment) { HeapObject result; AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment); if (alloc.To(&result)) { @@ -4901,12 +4925,12 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation, return HeapObject(); } -HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation, - AllocationOrigin origin, - AllocationAlignment alignment) { +HeapObject Heap::AllocateRawWithRetryOrFailSlowPath( + int size, AllocationType allocation, AllocationOrigin origin, + AllocationAlignment alignment) { AllocationResult alloc; HeapObject result = - AllocateRawWithLightRetry(size, allocation, origin, alignment); + AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment); if (!result.is_null()) return result; isolate()->counters()->gc_last_resort_from_handles()->Increment(); @@ -4979,8 +5003,6 @@ void Heap::SetUp() { memory_allocator_.reset( new MemoryAllocator(isolate_, MaxReserved(), code_range_size_)); - store_buffer_.reset(new StoreBuffer(this)); - mark_compact_collector_.reset(new MarkCompactCollector(this)); scavenger_collector_.reset(new ScavengerCollector(this)); @@ -5039,6 +5061,7 @@ void Heap::SetUpSpaces() { #endif // ENABLE_MINOR_MC array_buffer_collector_.reset(new ArrayBufferCollector(this)); gc_idle_time_handler_.reset(new GCIdleTimeHandler()); + memory_measurement_.reset(new MemoryMeasurement(isolate())); memory_reducer_.reset(new MemoryReducer(this)); if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) { live_object_stats_.reset(new ObjectStats(this)); @@ -5049,8 +5072,6 @@ void Heap::SetUpSpaces() { LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-available", Available())); - store_buffer()->SetUp(); - mark_compact_collector()->SetUp(); #ifdef ENABLE_MINOR_MC if (minor_mark_compact_collector() != nullptr) { @@ -5179,10 +5200,13 @@ EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const { } void Heap::RegisterExternallyReferencedObject(Address* location) { - // The embedder is not aware of whether numbers are materialized as heap - // objects are just passed around as Smis. + GlobalHandles::MarkTraced(location); Object object(*location); - if (!object.IsHeapObject()) return; + if (!object.IsHeapObject()) { + // The embedder is not aware of whether numbers are materialized as heap + // objects are just passed around as Smis. + return; + } HeapObject heap_object = HeapObject::cast(object); DCHECK(IsValidHeapObject(this, heap_object)); if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) { @@ -5282,8 +5306,6 @@ void Heap::TearDown() { space_[i] = nullptr; } - store_buffer()->TearDown(); - memory_allocator()->TearDown(); StrongRootsList* next = nullptr; @@ -5293,7 +5315,6 @@ void Heap::TearDown() { } strong_roots_list_ = nullptr; - store_buffer_.reset(); memory_allocator_.reset(); } @@ -5404,13 +5425,6 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) { DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts)); scripts = CompactWeakArrayList(this, scripts, allocation); set_script_list(*scripts); - - Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(), - isolate()); - DCHECK_IMPLIES(allocation == AllocationType::kOld, - InOldSpace(*no_script_list)); - no_script_list = CompactWeakArrayList(this, no_script_list, allocation); - set_noscript_shared_function_infos(*no_script_list); } void Heap::AddRetainedMap(Handle<Map> map) { @@ -5511,53 +5525,55 @@ void Heap::CheckHandleCount() { isolate_->handle_scope_implementer()->Iterate(&v); } -Address* Heap::store_buffer_top_address() { - return store_buffer()->top_address(); -} - -// static -intptr_t Heap::store_buffer_mask_constant() { - return StoreBuffer::kStoreBufferMask; -} - -// static -Address Heap::store_buffer_overflow_function_address() { - return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow); -} - void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) { +#ifndef V8_DISABLE_WRITE_BARRIERS DCHECK(!IsLargeObject(object)); Page* page = Page::FromAddress(slot.address()); if (!page->InYoungGeneration()) { DCHECK_EQ(page->owner_identity(), OLD_SPACE); - store_buffer()->MoveAllEntriesToRememberedSet(); - RememberedSet<OLD_TO_NEW>::Remove(page, slot.address()); + + if (!page->SweepingDone()) { + RememberedSet<OLD_TO_NEW>::Remove(page, slot.address()); + } } +#endif +} + +// static +int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) { + RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot); + return 0; } #ifdef DEBUG void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) { +#ifndef V8_DISABLE_WRITE_BARRIERS DCHECK(!IsLargeObject(object)); if (InYoungGeneration(object)) return; Page* page = Page::FromAddress(slot.address()); DCHECK_EQ(page->owner_identity(), OLD_SPACE); - store_buffer()->MoveAllEntriesToRememberedSet(); - CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address())); - // Old to old slots are filtered with invalidated slots. + // Slots are filtered with invalidated slots. + CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()), + page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object)); CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()), page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object)); +#endif } #endif void Heap::ClearRecordedSlotRange(Address start, Address end) { +#ifndef V8_DISABLE_WRITE_BARRIERS Page* page = Page::FromAddress(start); DCHECK(!page->IsLargePage()); if (!page->InYoungGeneration()) { DCHECK_EQ(page->owner_identity(), OLD_SPACE); - store_buffer()->MoveAllEntriesToRememberedSet(); - RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end, - SlotSet::KEEP_EMPTY_BUCKETS); + + if (!page->SweepingDone()) { + RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end, + SlotSet::KEEP_EMPTY_BUCKETS); + } } +#endif } PagedSpace* PagedSpaceIterator::Next() { @@ -6164,8 +6180,8 @@ void Heap::WriteBarrierForCodeSlow(Code code) { void Heap::GenerationalBarrierSlow(HeapObject object, Address slot, HeapObject value) { - Heap* heap = Heap::FromWritableHeapObject(object); - heap->store_buffer()->InsertEntry(slot); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); + RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot); } void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) { @@ -6207,7 +6223,6 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object, STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) || (kModeMask & kDoMarking)); - StoreBuffer* store_buffer = this->store_buffer(); IncrementalMarking* incremental_marking = this->incremental_marking(); MarkCompactCollector* collector = this->mark_compact_collector(); @@ -6218,7 +6233,8 @@ void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object, if ((kModeMask & kDoGenerational) && Heap::InYoungGeneration(value_heap_object)) { - store_buffer->InsertEntry(slot.address()); + RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page, + slot.address()); } if ((kModeMask & kDoMarking) && diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h index 2b8b963a798..182096f29c7 100644 --- a/chromium/v8/src/heap/heap.h +++ b/chromium/v8/src/heap/heap.h @@ -7,6 +7,7 @@ #include <cmath> #include <map> +#include <memory> #include <unordered_map> #include <unordered_set> #include <vector> @@ -45,7 +46,11 @@ class TestMemoryAllocatorScope; } // namespace heap class IncrementalMarking; +class BackingStore; class JSArrayBuffer; +class JSPromise; +class NativeContext; + using v8::MemoryPressureLevel; class AllocationObserver; @@ -62,6 +67,7 @@ class Isolate; class JSFinalizationGroup; class LocalEmbedderHeapTracer; class MemoryAllocator; +class MemoryMeasurement; class MemoryReducer; class MinorMarkCompactCollector; class ObjectIterator; @@ -74,7 +80,6 @@ class ScavengeJob; class Scavenger; class ScavengerCollector; class Space; -class StoreBuffer; class StressScavengeObserver; class TimedHistogram; class WeakObjectRetainer; @@ -86,6 +91,8 @@ enum ArrayStorageAllocationMode { enum class ClearRecordedSlots { kYes, kNo }; +enum class InvalidateRecordedSlots { kYes, kNo }; + enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory }; enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes }; @@ -560,6 +567,9 @@ class Heap { void RecordStats(HeapStats* stats, bool take_snapshot = false); + Handle<JSPromise> MeasureMemory(Handle<NativeContext> context, + v8::MeasureMemoryMode mode); + // Check new space expansion criteria and expand semispaces if it was hit. void CheckNewSpaceExpansionCriteria(); @@ -839,12 +849,13 @@ class Heap { void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; } - Address* store_buffer_top_address(); + V8_EXPORT_PRIVATE Address* store_buffer_top_address(); static intptr_t store_buffer_mask_constant(); static Address store_buffer_overflow_function_address(); void ClearRecordedSlot(HeapObject object, ObjectSlot slot); void ClearRecordedSlotRange(Address start, Address end); + static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot); #ifdef DEBUG void VerifyClearedSlot(HeapObject object, ObjectSlot slot); @@ -896,8 +907,13 @@ class Heap { // The runtime uses this function to notify potentially unsafe object layout // changes that require special synchronization with the concurrent marker. // The old size is the size of the object before layout change. - void NotifyObjectLayoutChange(HeapObject object, int old_size, - const DisallowHeapAllocation&); + // By default recorded slots in the object are invalidated. Pass + // InvalidateRecordedSlots::kNo if this is not necessary or to perform this + // manually. + void NotifyObjectLayoutChange( + HeapObject object, const DisallowHeapAllocation&, + InvalidateRecordedSlots invalidate_recorded_slots = + InvalidateRecordedSlots::kYes); #ifdef VERIFY_HEAP // This function checks that either @@ -1214,16 +1230,24 @@ class Heap { AlignWithFiller(HeapObject object, int object_size, int allocation_size, AllocationAlignment alignment); + // Allocate an external backing store with the given allocation callback. + // If the callback fails (indicated by a nullptr result) then this function + // will re-try the allocation after performing GCs. This is useful for + // external backing stores that may be retained by (unreachable) V8 objects + // such as ArrayBuffers, ExternalStrings, etc. + // + // The function may also proactively trigger GCs even if the allocation + // callback does not fail to keep the memory usage low. + V8_EXPORT_PRIVATE void* AllocateExternalBackingStore( + const std::function<void*(size_t)>& allocate, size_t byte_length); + // =========================================================================== // ArrayBuffer tracking. ===================================================== // =========================================================================== - - // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external - // in the registration/unregistration APIs. Consider dropping the "New" from - // "RegisterNewArrayBuffer" because one can re-register a previously - // unregistered buffer, too, and the name is confusing. - void RegisterNewArrayBuffer(JSArrayBuffer buffer); - void UnregisterArrayBuffer(JSArrayBuffer buffer); + void RegisterBackingStore(JSArrayBuffer buffer, + std::shared_ptr<BackingStore> backing_store); + std::shared_ptr<BackingStore> UnregisterBackingStore(JSArrayBuffer buffer); + std::shared_ptr<BackingStore> LookupBackingStore(JSArrayBuffer buffer); // =========================================================================== // Allocation site tracking. ================================================= @@ -1332,9 +1356,7 @@ class Heap { // per call to mmap(). The page is only reclaimed when the process is // killed. Confine the hint to a 32-bit section of the virtual address // space. See crbug.com/700928. - uintptr_t offset = - reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) & - kMmapRegionMask; + uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask; result = reinterpret_cast<void*>(mmap_region_base_ + offset); #endif // V8_OS_MACOSX #endif // V8_TARGET_ARCH_X64 @@ -1348,8 +1370,6 @@ class Heap { inline int MaxNumberToStringCacheSize() const; private: - class SkipStoreBufferScope; - using ExternalStringTableUpdaterCallback = String (*)(Heap* heap, FullObjectSlot pointer); @@ -1462,11 +1482,7 @@ class Heap { ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR - StoreBuffer* store_buffer() { return store_buffer_.get(); } - - void set_current_gc_flags(int flags) { - current_gc_flags_ = flags; - } + void set_current_gc_flags(int flags) { current_gc_flags_ = flags; } inline bool ShouldReduceMemory() const { return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0; @@ -1732,20 +1748,23 @@ class Heap { AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationAlignment alignment = kWordAligned); + // This method will try to allocate objects quickly (AllocationType::kYoung) + // otherwise it falls back to a slower path indicated by the mode. + enum AllocationRetryMode { kLightRetry, kRetryOrFail }; + template <AllocationRetryMode mode> + V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith( + int size, AllocationType allocation, + AllocationOrigin origin = AllocationOrigin::kRuntime, + AllocationAlignment alignment = kWordAligned); + // This method will try to perform an allocation of a given size of a given // AllocationType. If the allocation fails, a regular full garbage collection // is triggered and the allocation is retried. This is performed multiple // times. If after that retry procedure the allocation still fails nullptr is // returned. - HeapObject AllocateRawWithLightRetry( + V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath( int size, AllocationType allocation, AllocationOrigin origin, AllocationAlignment alignment = kWordAligned); - HeapObject AllocateRawWithLightRetry( - int size, AllocationType allocation, - AllocationAlignment alignment = kWordAligned) { - return AllocateRawWithLightRetry(size, allocation, - AllocationOrigin::kRuntime, alignment); - } // This method will try to perform an allocation of a given size of a given // AllocationType. If the allocation fails, a regular full garbage collection @@ -1753,17 +1772,11 @@ class Heap { // times. If after that retry procedure the allocation still fails a "hammer" // garbage collection is triggered which tries to significantly reduce memory. // If the allocation still fails after that a fatal error is thrown. - HeapObject AllocateRawWithRetryOrFail( + V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath( int size, AllocationType allocation, AllocationOrigin origin, AllocationAlignment alignment = kWordAligned); - HeapObject AllocateRawWithRetryOrFail( - int size, AllocationType allocation, - AllocationAlignment alignment = kWordAligned) { - return AllocateRawWithRetryOrFail(size, allocation, - AllocationOrigin::kRuntime, alignment); - } - HeapObject AllocateRawCodeInLargeObjectSpace(int size); + V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size); // Allocates a heap object based on the map. V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map, @@ -1980,10 +1993,10 @@ class Heap { std::unique_ptr<ScavengerCollector> scavenger_collector_; std::unique_ptr<ArrayBufferCollector> array_buffer_collector_; std::unique_ptr<MemoryAllocator> memory_allocator_; - std::unique_ptr<StoreBuffer> store_buffer_; std::unique_ptr<IncrementalMarking> incremental_marking_; std::unique_ptr<ConcurrentMarking> concurrent_marking_; std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_; + std::unique_ptr<MemoryMeasurement> memory_measurement_; std::unique_ptr<MemoryReducer> memory_reducer_; std::unique_ptr<ObjectStats> live_object_stats_; std::unique_ptr<ObjectStats> dead_object_stats_; @@ -2101,7 +2114,6 @@ class Heap { friend class Scavenger; friend class ScavengerCollector; friend class Space; - friend class StoreBuffer; friend class Sweeper; friend class heap::TestMemoryAllocatorScope; @@ -2152,7 +2164,6 @@ class HeapStats { intptr_t* end_marker; // 27 }; - class AlwaysAllocateScope { public: explicit inline AlwaysAllocateScope(Heap* heap); @@ -2232,7 +2243,6 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor { Heap* heap_; }; - // Verify that all objects are Smis. class VerifySmisVisitor : public RootVisitor { public: @@ -2263,7 +2273,7 @@ class V8_EXPORT_PRIVATE SpaceIterator : public Malloced { private: Heap* heap_; - int current_space_; // from enum AllocationSpace. + int current_space_; // from enum AllocationSpace. }; // A HeapObjectIterator provides iteration over the entire non-read-only heap. diff --git a/chromium/v8/src/heap/incremental-marking-job.cc b/chromium/v8/src/heap/incremental-marking-job.cc index c6e607c3ead..1f924ff1391 100644 --- a/chromium/v8/src/heap/incremental-marking-job.cc +++ b/chromium/v8/src/heap/incremental-marking-job.cc @@ -54,24 +54,24 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) { V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate); if (task_type == TaskType::kNormal) { if (taskrunner->NonNestableTasksEnabled()) { - taskrunner->PostNonNestableTask(base::make_unique<Task>( + taskrunner->PostNonNestableTask(std::make_unique<Task>( heap->isolate(), this, EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type)); } else { - taskrunner->PostTask(base::make_unique<Task>( + taskrunner->PostTask(std::make_unique<Task>( heap->isolate(), this, EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type)); } } else { if (taskrunner->NonNestableDelayedTasksEnabled()) { taskrunner->PostNonNestableDelayedTask( - base::make_unique<Task>( + std::make_unique<Task>( heap->isolate(), this, EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type), kDelayInSeconds); } else { taskrunner->PostDelayedTask( - base::make_unique<Task>( + std::make_unique<Task>( heap->isolate(), this, EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type), kDelayInSeconds); diff --git a/chromium/v8/src/heap/invalidated-slots-inl.h b/chromium/v8/src/heap/invalidated-slots-inl.h index 35a08108f63..546667b2b25 100644 --- a/chromium/v8/src/heap/invalidated-slots-inl.h +++ b/chromium/v8/src/heap/invalidated-slots-inl.h @@ -24,42 +24,40 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) { DCHECK_LE(last_slot_, slot); last_slot_ = slot; #endif - while (slot >= invalidated_end_) { - ++iterator_; - if (iterator_ != iterator_end_) { - // Invalidated ranges must not overlap. - DCHECK_LE(invalidated_end_, iterator_->first.address()); - invalidated_start_ = iterator_->first.address(); - invalidated_end_ = invalidated_start_ + iterator_->second; - invalidated_object_ = HeapObject(); - invalidated_object_size_ = 0; - } else { - invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; - } - } - // Now the invalidated region ends after the slot. if (slot < invalidated_start_) { - // The invalidated region starts after the slot. return true; } - // The invalidated region includes the slot. - // Ask the object if the slot is valid. - if (invalidated_object_.is_null()) { - invalidated_object_ = HeapObject::FromAddress(invalidated_start_); - DCHECK(!invalidated_object_.IsFiller()); - invalidated_object_size_ = - invalidated_object_.SizeFromMap(invalidated_object_.map()); + + while (slot >= next_invalidated_start_) { + NextInvalidatedObject(); + } + + HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_); + + if (invalidated_size_ == 0) { + DCHECK(invalidated_object.map().IsMap()); + invalidated_size_ = invalidated_object.Size(); } + int offset = static_cast<int>(slot - invalidated_start_); DCHECK_GT(offset, 0); - DCHECK_LE(invalidated_object_size_, - static_cast<int>(invalidated_end_ - invalidated_start_)); + if (offset < invalidated_size_) + return invalidated_object.IsValidSlot(invalidated_object.map(), offset); + + NextInvalidatedObject(); + return true; +} + +void InvalidatedSlotsFilter::NextInvalidatedObject() { + invalidated_start_ = next_invalidated_start_; + invalidated_size_ = 0; - if (offset >= invalidated_object_size_) { - return slots_in_free_space_are_valid_; + if (iterator_ == iterator_end_) { + next_invalidated_start_ = sentinel_; + } else { + next_invalidated_start_ = iterator_->address(); + iterator_++; } - return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset); } void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { @@ -72,35 +70,25 @@ void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) { if (iterator_ == iterator_end_) return; - // Ignore invalidated objects before free region - while (free_start >= invalidated_end_) { + // Ignore invalidated objects that start before free region + while (invalidated_start_ < free_start) { ++iterator_; NextInvalidatedObject(); } - // Loop here: Free region might contain multiple invalidated objects - while (free_end > invalidated_start_) { - // Case: Free region starts before current invalidated object - if (free_start <= invalidated_start_) { - iterator_ = invalidated_slots_->erase(iterator_); - - } else { - // Case: Free region starts within current invalidated object - // (Can happen for right-trimmed objects) - iterator_++; - } - + // Remove all invalidated objects that start within + // free region. + while (invalidated_start_ < free_end) { + iterator_ = invalidated_slots_->erase(iterator_); NextInvalidatedObject(); } } void InvalidatedSlotsCleanup::NextInvalidatedObject() { if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->first.address(); - invalidated_end_ = invalidated_start_ + iterator_->second; + invalidated_start_ = iterator_->address(); } else { invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; } } diff --git a/chromium/v8/src/heap/invalidated-slots.cc b/chromium/v8/src/heap/invalidated-slots.cc index 8fa1518d683..9f29af218bc 100644 --- a/chromium/v8/src/heap/invalidated-slots.cc +++ b/chromium/v8/src/heap/invalidated-slots.cc @@ -3,52 +3,35 @@ // found in the LICENSE file. #include "src/heap/invalidated-slots.h" +#include "src/heap/invalidated-slots-inl.h" #include "src/heap/spaces.h" +#include "src/objects/objects-inl.h" namespace v8 { namespace internal { InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) { - // The sweeper removes invalid slots and makes free space available for - // allocation. Slots for new objects can be recorded in the free space. - // Note that we cannot simply check for SweepingDone because pages in large - // object space are not swept but have SweepingDone() == true. - bool slots_in_free_space_are_valid = - chunk->SweepingDone() && chunk->InOldSpace(); - return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(), - slots_in_free_space_are_valid); + return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>()); } InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) { - // Always treat these slots as valid for old-to-new for now. Invalid - // old-to-new slots are always cleared. - bool slots_in_free_space_are_valid = true; - return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(), - slots_in_free_space_are_valid); + return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>()); } InvalidatedSlotsFilter::InvalidatedSlotsFilter( - MemoryChunk* chunk, InvalidatedSlots* invalidated_slots, - bool slots_in_free_space_are_valid) { - // Adjust slots_in_free_space_are_valid_ if more spaces are added. - DCHECK_IMPLIES(invalidated_slots != nullptr, - chunk->InOldSpace() || chunk->InLargeObjectSpace()); - - slots_in_free_space_are_valid_ = slots_in_free_space_are_valid; + MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) { invalidated_slots = invalidated_slots ? invalidated_slots : &empty_; iterator_ = invalidated_slots->begin(); iterator_end_ = invalidated_slots->end(); sentinel_ = chunk->area_end(); - if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->first.address(); - invalidated_end_ = invalidated_start_ + iterator_->second; - } else { - invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; - } - // These values will be lazily set when needed. - invalidated_object_size_ = 0; + + // Invoke NextInvalidatedObject twice, to initialize + // invalidated_start_ to the first invalidated object and + // next_invalidated_object_ to the second one. + NextInvalidatedObject(); + NextInvalidatedObject(); + #ifdef DEBUG last_slot_ = chunk->area_start(); #endif @@ -69,13 +52,7 @@ InvalidatedSlotsCleanup::InvalidatedSlotsCleanup( iterator_end_ = invalidated_slots_->end(); sentinel_ = chunk->area_end(); - if (iterator_ != iterator_end_) { - invalidated_start_ = iterator_->first.address(); - invalidated_end_ = invalidated_start_ + iterator_->second; - } else { - invalidated_start_ = sentinel_; - invalidated_end_ = sentinel_; - } + NextInvalidatedObject(); #ifdef DEBUG last_free_ = chunk->area_start(); diff --git a/chromium/v8/src/heap/invalidated-slots.h b/chromium/v8/src/heap/invalidated-slots.h index 4a722719106..15be3ce44cd 100644 --- a/chromium/v8/src/heap/invalidated-slots.h +++ b/chromium/v8/src/heap/invalidated-slots.h @@ -5,7 +5,7 @@ #ifndef V8_HEAP_INVALIDATED_SLOTS_H_ #define V8_HEAP_INVALIDATED_SLOTS_H_ -#include <map> +#include <set> #include <stack> #include "src/base/atomic-utils.h" @@ -20,7 +20,7 @@ namespace internal { // that potentially invalidates slots recorded concurrently. The second part // of each element is the size of the corresponding object before the layout // change. -using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>; +using InvalidatedSlots = std::set<HeapObject, Object::Comparer>; // This class provides IsValid predicate that takes into account the set // of invalidated objects in the given memory chunk. @@ -34,8 +34,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk); explicit InvalidatedSlotsFilter(MemoryChunk* chunk, - InvalidatedSlots* invalidated_slots, - bool slots_in_free_space_are_valid); + InvalidatedSlots* invalidated_slots); inline bool IsValid(Address slot); private: @@ -43,14 +42,15 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter { InvalidatedSlots::const_iterator iterator_end_; Address sentinel_; Address invalidated_start_; - Address invalidated_end_; - HeapObject invalidated_object_; - int invalidated_object_size_; - bool slots_in_free_space_are_valid_; + Address next_invalidated_start_; + int invalidated_size_; InvalidatedSlots empty_; #ifdef DEBUG Address last_slot_; #endif + + private: + inline void NextInvalidatedObject(); }; class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { @@ -71,7 +71,6 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup { Address sentinel_; Address invalidated_start_; - Address invalidated_end_; inline void NextInvalidatedObject(); #ifdef DEBUG diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h index cf6d96cef81..ed7e251f44f 100644 --- a/chromium/v8/src/heap/mark-compact-inl.h +++ b/chromium/v8/src/heap/mark-compact-inl.h @@ -485,7 +485,8 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot, MemoryChunk* source_page = MemoryChunk::FromHeapObject(object); if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() && !source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) { - RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address()); + RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page, + slot.address()); } } @@ -493,7 +494,8 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page, HeapObjectSlot slot, HeapObject target) { MemoryChunk* target_page = MemoryChunk::FromHeapObject(target); if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) { - RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address()); + RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page, + slot.address()); } } diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc index f7067a60ea2..c18b2652d7a 100644 --- a/chromium/v8/src/heap/mark-compact.cc +++ b/chromium/v8/src/heap/mark-compact.cc @@ -2080,12 +2080,13 @@ void MarkCompactCollector::FlushBytecodeFromSFI( MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start); // Clear any recorded slots for the compiled data as being invalid. + DCHECK_NULL(chunk->sweeping_slot_set()); RememberedSet<OLD_TO_NEW>::RemoveRange( chunk, compiled_data_start, compiled_data_start + compiled_data_size, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); RememberedSet<OLD_TO_OLD>::RemoveRange( chunk, compiled_data_start, compiled_data_start + compiled_data_size, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); // Swap the map, using set_map_after_allocation to avoid verify heap checks // which are not necessary since we are doing this during the GC atomic pause. @@ -2233,12 +2234,12 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array, DCHECK_LE(0, new_nof_all_descriptors); Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address(); Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address(); - RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array), - start, end, - SlotSet::PREFREE_EMPTY_BUCKETS); - RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array), - start, end, - SlotSet::PREFREE_EMPTY_BUCKETS); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(array); + DCHECK_NULL(chunk->sweeping_slot_set()); + RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end, + SlotSet::FREE_EMPTY_BUCKETS); + RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end, + SlotSet::FREE_EMPTY_BUCKETS); heap()->CreateFillerObjectAt(start, static_cast<int>(end - start), ClearRecordedSlots::kNo); array.set_number_of_all_descriptors(new_nof_all_descriptors); @@ -3411,15 +3412,32 @@ class RememberedSetUpdatingItem : public UpdatingItem { void UpdateUntypedPointers() { if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) { + InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_); RememberedSet<OLD_TO_NEW>::Iterate( chunk_, - [this](MaybeObjectSlot slot) { + [this, &filter](MaybeObjectSlot slot) { + if (!filter.IsValid(slot.address())) return REMOVE_SLOT; return CheckAndUpdateOldToNewSlot(slot); }, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); } - DCHECK_NULL(chunk_->invalidated_slots<OLD_TO_NEW>()); + if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) { + InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_); + RememberedSetSweeping::Iterate( + chunk_, + [this, &filter](MaybeObjectSlot slot) { + if (!filter.IsValid(slot.address())) return REMOVE_SLOT; + return CheckAndUpdateOldToNewSlot(slot); + }, + SlotSet::FREE_EMPTY_BUCKETS); + } + + if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) { + // The invalidated slots are not needed after old-to-new slots were + // processed. + chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>(); + } if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) { @@ -3430,17 +3448,11 @@ class RememberedSetUpdatingItem : public UpdatingItem { if (!filter.IsValid(slot.address())) return REMOVE_SLOT; return UpdateSlot<AccessMode::NON_ATOMIC>(slot); }, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); + chunk_->ReleaseSlotSet<OLD_TO_OLD>(); } if ((updating_mode_ == RememberedSetUpdatingMode::ALL) && chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) { -#ifdef DEBUG - for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) { - HeapObject object = object_size.first; - int size = object_size.second; - DCHECK_LE(object.SizeFromMap(object.map()), size); - } -#endif // The invalidated slots are not needed after old-to-old slots were // processsed. chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>(); @@ -3557,15 +3569,18 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems( const bool contains_old_to_new_slots = chunk->slot_set<OLD_TO_NEW>() != nullptr || chunk->typed_slot_set<OLD_TO_NEW>() != nullptr; + const bool contains_old_to_new_sweeping_slots = + chunk->sweeping_slot_set() != nullptr; const bool contains_old_to_old_invalidated_slots = chunk->invalidated_slots<OLD_TO_OLD>() != nullptr; const bool contains_old_to_new_invalidated_slots = chunk->invalidated_slots<OLD_TO_NEW>() != nullptr; - if (!contains_old_to_new_slots && !contains_old_to_old_slots && - !contains_old_to_old_invalidated_slots && + if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots && + !contains_old_to_old_slots && !contains_old_to_old_invalidated_slots && !contains_old_to_new_invalidated_slots) continue; if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots || + contains_old_to_new_sweeping_slots || contains_old_to_old_invalidated_slots || contains_old_to_new_invalidated_slots) { job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode)); @@ -3773,11 +3788,22 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() { // might not have recorded them in first place. // Remove outdated slots. + RememberedSetSweeping::RemoveRange(page, page->address(), + failed_object.address(), + SlotSet::FREE_EMPTY_BUCKETS); RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_object.address(), - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), failed_object.address()); + + // Remove invalidated slots. + if (failed_object.address() > page->area_start()) { + InvalidatedSlotsCleanup old_to_new_cleanup = + InvalidatedSlotsCleanup::OldToNew(page); + old_to_new_cleanup.Free(page->area_start(), failed_object.address()); + } + // Recompute live bytes. LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state()); // Re-record slots. @@ -4350,11 +4376,7 @@ void MinorMarkCompactCollector::CollectGarbage() { RememberedSet<OLD_TO_NEW>::IterateMemoryChunks( heap(), [](MemoryChunk* chunk) { - if (chunk->SweepingDone()) { - RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk); - } else { - RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk); - } + RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk); }); heap()->account_external_memory_concurrently_freed(); @@ -4651,7 +4673,15 @@ class PageMarkingItem : public MarkingItem { if (!filter.IsValid(slot.address())) return REMOVE_SLOT; return CheckAndMarkObject(task, slot); }, - SlotSet::PREFREE_EMPTY_BUCKETS); + SlotSet::FREE_EMPTY_BUCKETS); + filter = InvalidatedSlotsFilter::OldToNew(chunk_); + RememberedSetSweeping::Iterate( + chunk_, + [this, task, &filter](MaybeObjectSlot slot) { + if (!filter.IsValid(slot.address())) return REMOVE_SLOT; + return CheckAndMarkObject(task, slot); + }, + SlotSet::FREE_EMPTY_BUCKETS); } void MarkTypedPointers(YoungGenerationMarkingTask* task) { diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc new file mode 100644 index 00000000000..62cd5dadb94 --- /dev/null +++ b/chromium/v8/src/heap/memory-measurement.cc @@ -0,0 +1,80 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/memory-measurement.h" + +#include "src/execution/isolate-inl.h" +#include "src/execution/isolate.h" +#include "src/heap/factory-inl.h" +#include "src/heap/factory.h" +#include "src/objects/js-promise.h" + +namespace v8 { +namespace internal { + +MemoryMeasurement::MemoryMeasurement(Isolate* isolate) : isolate_(isolate) {} + +namespace { + +class MemoryMeasurementResultBuilder { + public: + MemoryMeasurementResultBuilder(Isolate* isolate, Factory* factory) + : isolate_(isolate), factory_(factory) { + result_ = NewJSObject(); + } + + void AddTotals(size_t estimate, size_t lower_bound, size_t upper_bound) { + Handle<JSObject> total = NewJSObject(); + Handle<Object> estimate_obj = NewNumber(estimate); + AddProperty(total, factory_->jsMemoryEstimate_string(), estimate_obj); + Handle<Object> range = NewRange(lower_bound, upper_bound); + AddProperty(total, factory_->jsMemoryRange_string(), range); + AddProperty(result_, factory_->total_string(), total); + } + + Handle<JSObject> Build() { return result_; } + + private: + Handle<Object> NewNumber(size_t value) { + return factory_->NewNumberFromSize(value); + } + + Handle<JSObject> NewJSObject() { + return factory_->NewJSObject(isolate_->object_function()); + } + + Handle<JSArray> NewRange(size_t lower_bound, size_t upper_bound) { + Handle<Object> lower = NewNumber(lower_bound); + Handle<Object> upper = NewNumber(upper_bound); + Handle<FixedArray> elements = factory_->NewFixedArray(2); + elements->set(0, *lower); + elements->set(1, *upper); + return factory_->NewJSArrayWithElements(elements); + } + + void AddProperty(Handle<JSObject> object, Handle<String> name, + Handle<Object> value) { + JSObject::AddProperty(isolate_, object, name, value, NONE); + } + + Isolate* isolate_; + Factory* factory_; + Handle<JSObject> result_; +}; + +} // anonymous namespace + +Handle<JSPromise> MemoryMeasurement::EnqueueRequest( + Handle<NativeContext> context, v8::MeasureMemoryMode mode) { + Handle<JSPromise> promise = isolate_->factory()->NewJSPromise(); + MemoryMeasurementResultBuilder result_builder(isolate_, isolate_->factory()); + result_builder.AddTotals(isolate_->heap()->SizeOfObjects(), 0, + isolate_->heap()->SizeOfObjects()); + Handle<JSObject> result = result_builder.Build(); + JSPromise::Resolve(promise, result).ToHandleChecked(); + return promise; +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/heap/memory-measurement.h b/chromium/v8/src/heap/memory-measurement.h new file mode 100644 index 00000000000..6de7c8c970c --- /dev/null +++ b/chromium/v8/src/heap/memory-measurement.h @@ -0,0 +1,29 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_MEMORY_MEASUREMENT_H_ +#define V8_HEAP_MEMORY_MEASUREMENT_H_ + +#include "src/common/globals.h" +#include "src/objects/objects.h" + +namespace v8 { +namespace internal { + +class Heap; + +class V8_EXPORT_PRIVATE MemoryMeasurement { + public: + explicit MemoryMeasurement(Isolate* isolate); + Handle<JSPromise> EnqueueRequest(Handle<NativeContext> context, + v8::MeasureMemoryMode mode); + + private: + Isolate* isolate_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_HEAP_MEMORY_MEASUREMENT_H_ diff --git a/chromium/v8/src/heap/memory-reducer.cc b/chromium/v8/src/heap/memory-reducer.cc index 704e6567962..37dca5b99c4 100644 --- a/chromium/v8/src/heap/memory-reducer.cc +++ b/chromium/v8/src/heap/memory-reducer.cc @@ -214,9 +214,8 @@ void MemoryReducer::ScheduleTimer(double delay_ms) { if (heap()->IsTearingDown()) return; // Leave some room for precision error in task scheduler. const double kSlackMs = 100; - taskrunner_->PostDelayedTask( - base::make_unique<MemoryReducer::TimerTask>(this), - (delay_ms + kSlackMs) / 1000.0); + taskrunner_->PostDelayedTask(std::make_unique<MemoryReducer::TimerTask>(this), + (delay_ms + kSlackMs) / 1000.0); } void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); } diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc index 2ee88361c96..44798a39282 100644 --- a/chromium/v8/src/heap/object-stats.cc +++ b/chromium/v8/src/heap/object-stats.cc @@ -150,9 +150,8 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) { JSObjectFieldStats stats; stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map); if (!map.is_dictionary_map()) { - int nof = map.NumberOfOwnDescriptors(); DescriptorArray descriptors = map.instance_descriptors(); - for (int descriptor = 0; descriptor < nof; descriptor++) { + for (InternalIndex descriptor : map.IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(descriptor); if (details.location() == kField) { FieldIndex index = FieldIndex::ForDescriptor(map, descriptor); @@ -658,8 +657,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType( Object obj = maybe_obj->GetHeapObjectOrSmi(); switch (kind) { case FeedbackSlotKind::kCall: - if (obj == *isolate->factory()->uninitialized_symbol() || - obj == *isolate->factory()->premonomorphic_symbol()) { + if (obj == *isolate->factory()->uninitialized_symbol()) { return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE; } return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE; @@ -669,8 +667,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType( case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kHasKeyed: - if (obj == *isolate->factory()->uninitialized_symbol() || - obj == *isolate->factory()->premonomorphic_symbol()) { + if (obj == *isolate->factory()->uninitialized_symbol()) { return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE; } return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE; @@ -682,8 +679,7 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType( case FeedbackSlotKind::kStoreGlobalStrict: case FeedbackSlotKind::kStoreKeyedSloppy: case FeedbackSlotKind::kStoreKeyedStrict: - if (obj == *isolate->factory()->uninitialized_symbol() || - obj == *isolate->factory()->premonomorphic_symbol()) { + if (obj == *isolate->factory()->uninitialized_symbol()) { return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE; } return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE; @@ -829,10 +825,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() { ObjectStats::RETAINED_MAPS_TYPE); // WeakArrayList. - RecordSimpleVirtualObjectStats( - HeapObject(), - WeakArrayList::cast(heap_->noscript_shared_function_infos()), - ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE); RecordSimpleVirtualObjectStats(HeapObject(), WeakArrayList::cast(heap_->script_list()), ObjectStats::SCRIPT_LIST_TYPE); diff --git a/chromium/v8/src/heap/object-stats.h b/chromium/v8/src/heap/object-stats.h index 2a9b9675ef2..28ef967c5ca 100644 --- a/chromium/v8/src/heap/object-stats.h +++ b/chromium/v8/src/heap/object-stats.h @@ -54,7 +54,6 @@ V(MAP_PROTOTYPE_DICTIONARY_TYPE) \ V(MAP_PROTOTYPE_TYPE) \ V(MAP_STABLE_TYPE) \ - V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \ V(NUMBER_STRING_CACHE_TYPE) \ V(OBJECT_DICTIONARY_ELEMENTS_TYPE) \ V(OBJECT_ELEMENTS_TYPE) \ diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h index ba0bfa2415b..d4d6d9375cd 100644 --- a/chromium/v8/src/heap/objects-visiting-inl.h +++ b/chromium/v8/src/heap/objects-visiting-inl.h @@ -38,7 +38,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map, HeapObject object) { ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); switch (map.visitor_id()) { -#define CASE(TypeName, Type) \ +#define CASE(TypeName) \ case kVisit##TypeName: \ return visitor->Visit##TypeName( \ map, ConcreteVisitor::template Cast<TypeName>(object)); @@ -77,10 +77,10 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer( static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot()); } -#define VISIT(TypeName, Type) \ +#define VISIT(TypeName) \ template <typename ResultType, typename ConcreteVisitor> \ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \ - Map map, Type object) { \ + Map map, TypeName object) { \ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \ if (!visitor->ShouldVisit(object)) return ResultType(); \ if (!visitor->AllowDefaultJSObjectVisit()) { \ diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h index a5c291458f5..0f972737d25 100644 --- a/chromium/v8/src/heap/objects-visiting.h +++ b/chromium/v8/src/heap/objects-visiting.h @@ -13,59 +13,58 @@ namespace v8 { namespace internal { -// TODO(jkummerow): Drop the duplication: V(x, x) -> V(x). -#define TYPED_VISITOR_ID_LIST(V) \ - V(AllocationSite, AllocationSite) \ - V(BigInt, BigInt) \ - V(ByteArray, ByteArray) \ - V(BytecodeArray, BytecodeArray) \ - V(Cell, Cell) \ - V(Code, Code) \ - V(CodeDataContainer, CodeDataContainer) \ - V(ConsString, ConsString) \ - V(Context, Context) \ - V(DataHandler, DataHandler) \ - V(DescriptorArray, DescriptorArray) \ - V(EmbedderDataArray, EmbedderDataArray) \ - V(EphemeronHashTable, EphemeronHashTable) \ - V(FeedbackCell, FeedbackCell) \ - V(FeedbackVector, FeedbackVector) \ - V(FixedArray, FixedArray) \ - V(FixedDoubleArray, FixedDoubleArray) \ - V(JSArrayBuffer, JSArrayBuffer) \ - V(JSDataView, JSDataView) \ - V(JSFunction, JSFunction) \ - V(JSObject, JSObject) \ - V(JSTypedArray, JSTypedArray) \ - V(WeakCell, WeakCell) \ - V(JSWeakCollection, JSWeakCollection) \ - V(JSWeakRef, JSWeakRef) \ - V(Map, Map) \ - V(NativeContext, NativeContext) \ - V(Oddball, Oddball) \ - V(PreparseData, PreparseData) \ - V(PropertyArray, PropertyArray) \ - V(PropertyCell, PropertyCell) \ - V(PrototypeInfo, PrototypeInfo) \ - V(SeqOneByteString, SeqOneByteString) \ - V(SeqTwoByteString, SeqTwoByteString) \ - V(SharedFunctionInfo, SharedFunctionInfo) \ - V(SlicedString, SlicedString) \ - V(SmallOrderedHashMap, SmallOrderedHashMap) \ - V(SmallOrderedHashSet, SmallOrderedHashSet) \ - V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \ - V(SourceTextModule, SourceTextModule) \ - V(Symbol, Symbol) \ - V(SyntheticModule, SyntheticModule) \ - V(ThinString, ThinString) \ - V(TransitionArray, TransitionArray) \ - V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \ - V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \ - V(WasmCapiFunctionData, WasmCapiFunctionData) \ - V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \ - V(WasmInstanceObject, WasmInstanceObject) - -#define FORWARD_DECLARE(TypeName, Type) class Type; +#define TYPED_VISITOR_ID_LIST(V) \ + V(AllocationSite) \ + V(BigInt) \ + V(ByteArray) \ + V(BytecodeArray) \ + V(Cell) \ + V(Code) \ + V(CodeDataContainer) \ + V(ConsString) \ + V(Context) \ + V(DataHandler) \ + V(DescriptorArray) \ + V(EmbedderDataArray) \ + V(EphemeronHashTable) \ + V(FeedbackCell) \ + V(FeedbackVector) \ + V(FixedArray) \ + V(FixedDoubleArray) \ + V(JSArrayBuffer) \ + V(JSDataView) \ + V(JSFunction) \ + V(JSObject) \ + V(JSTypedArray) \ + V(WeakCell) \ + V(JSWeakCollection) \ + V(JSWeakRef) \ + V(Map) \ + V(NativeContext) \ + V(Oddball) \ + V(PreparseData) \ + V(PropertyArray) \ + V(PropertyCell) \ + V(PrototypeInfo) \ + V(SeqOneByteString) \ + V(SeqTwoByteString) \ + V(SharedFunctionInfo) \ + V(SlicedString) \ + V(SmallOrderedHashMap) \ + V(SmallOrderedHashSet) \ + V(SmallOrderedNameDictionary) \ + V(SourceTextModule) \ + V(Symbol) \ + V(SyntheticModule) \ + V(ThinString) \ + V(TransitionArray) \ + V(UncompiledDataWithoutPreparseData) \ + V(UncompiledDataWithPreparseData) \ + V(WasmCapiFunctionData) \ + V(WasmIndirectFunctionTable) \ + V(WasmInstanceObject) + +#define FORWARD_DECLARE(TypeName) class TypeName; TYPED_VISITOR_ID_LIST(FORWARD_DECLARE) #undef FORWARD_DECLARE @@ -99,8 +98,8 @@ class HeapVisitor : public ObjectVisitor { // in default Visit implemention for subclasses of JSObject. V8_INLINE bool AllowDefaultJSObjectVisit() { return true; } -#define VISIT(TypeName, Type) \ - V8_INLINE ResultType Visit##TypeName(Map map, Type object); +#define VISIT(TypeName) \ + V8_INLINE ResultType Visit##TypeName(Map map, TypeName object); TYPED_VISITOR_ID_LIST(VISIT) #undef VISIT V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object); diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h index eefc565e008..3c8984c83a6 100644 --- a/chromium/v8/src/heap/remembered-set.h +++ b/chromium/v8/src/heap/remembered-set.h @@ -5,6 +5,8 @@ #ifndef V8_HEAP_REMEMBERED_SET_H_ #define V8_HEAP_REMEMBERED_SET_H_ +#include <memory> + #include "src/base/memory.h" #include "src/codegen/reloc-info.h" #include "src/heap/heap.h" @@ -16,54 +18,39 @@ namespace internal { enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED }; -// TODO(ulan): Investigate performance of de-templatizing this class. -template <RememberedSetType type> -class RememberedSet : public AllStatic { +class RememberedSetOperations { public: // Given a page and a slot in that page, this function adds the slot to the // remembered set. - template <AccessMode access_mode = AccessMode::ATOMIC> - static void Insert(MemoryChunk* chunk, Address slot_addr) { + template <AccessMode access_mode> + static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) { DCHECK(chunk->Contains(slot_addr)); - SlotSet* slot_set = chunk->slot_set<type, access_mode>(); - if (slot_set == nullptr) { - slot_set = chunk->AllocateSlotSet<type>(); - } uintptr_t offset = slot_addr - chunk->address(); slot_set[offset / Page::kPageSize].Insert<access_mode>(offset % Page::kPageSize); } - // Given a page and a slot in that page, this function returns true if - // the remembered set contains the slot. - static bool Contains(MemoryChunk* chunk, Address slot_addr) { - DCHECK(chunk->Contains(slot_addr)); - SlotSet* slot_set = chunk->slot_set<type>(); - if (slot_set == nullptr) { - return false; + template <typename Callback> + static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback, + SlotSet::EmptyBucketMode mode) { + if (slots != nullptr) { + size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; + for (size_t page = 0; page < pages; page++) { + slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback, + mode); + } } - uintptr_t offset = slot_addr - chunk->address(); - return slot_set[offset / Page::kPageSize].Contains(offset % - Page::kPageSize); } - // Given a page and a slot in that page, this function removes the slot from - // the remembered set. - // If the slot was never added, then the function does nothing. - static void Remove(MemoryChunk* chunk, Address slot_addr) { - DCHECK(chunk->Contains(slot_addr)); - SlotSet* slot_set = chunk->slot_set<type>(); + static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) { if (slot_set != nullptr) { uintptr_t offset = slot_addr - chunk->address(); slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize); } } - // Given a page and a range of slots in that page, this function removes the - // slots from the remembered set. - static void RemoveRange(MemoryChunk* chunk, Address start, Address end, - SlotSet::EmptyBucketMode mode) { - SlotSet* slot_set = chunk->slot_set<type>(); + static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start, + Address end, SlotSet::EmptyBucketMode mode) { if (slot_set != nullptr) { uintptr_t start_offset = start - chunk->address(); uintptr_t end_offset = end - chunk->address(); @@ -99,6 +86,53 @@ class RememberedSet : public AllStatic { } } } +}; + +// TODO(ulan): Investigate performance of de-templatizing this class. +template <RememberedSetType type> +class RememberedSet : public AllStatic { + public: + // Given a page and a slot in that page, this function adds the slot to the + // remembered set. + template <AccessMode access_mode> + static void Insert(MemoryChunk* chunk, Address slot_addr) { + DCHECK(chunk->Contains(slot_addr)); + SlotSet* slot_set = chunk->slot_set<type, access_mode>(); + if (slot_set == nullptr) { + slot_set = chunk->AllocateSlotSet<type>(); + } + RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr); + } + + // Given a page and a slot in that page, this function returns true if + // the remembered set contains the slot. + static bool Contains(MemoryChunk* chunk, Address slot_addr) { + DCHECK(chunk->Contains(slot_addr)); + SlotSet* slot_set = chunk->slot_set<type>(); + if (slot_set == nullptr) { + return false; + } + uintptr_t offset = slot_addr - chunk->address(); + return slot_set[offset / Page::kPageSize].Contains(offset % + Page::kPageSize); + } + + // Given a page and a slot in that page, this function removes the slot from + // the remembered set. + // If the slot was never added, then the function does nothing. + static void Remove(MemoryChunk* chunk, Address slot_addr) { + DCHECK(chunk->Contains(slot_addr)); + SlotSet* slot_set = chunk->slot_set<type>(); + RememberedSetOperations::Remove(slot_set, chunk, slot_addr); + } + + // Given a page and a range of slots in that page, this function removes the + // slots from the remembered set. + static void RemoveRange(MemoryChunk* chunk, Address start, Address end, + SlotSet::EmptyBucketMode mode) { + SlotSet* slot_set = chunk->slot_set<type>(); + RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode); + } // Iterates and filters the remembered set with the given callback. // The callback should take (Address slot) and return SlotCallbackResult. @@ -120,8 +154,11 @@ class RememberedSet : public AllStatic { MemoryChunk* chunk; while ((chunk = it.next()) != nullptr) { SlotSet* slots = chunk->slot_set<type>(); + SlotSet* sweeping_slots = + type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr; TypedSlotSet* typed_slots = chunk->typed_slot_set<type>(); - if (slots != nullptr || typed_slots != nullptr || + if (slots != nullptr || sweeping_slots != nullptr || + typed_slots != nullptr || chunk->invalidated_slots<type>() != nullptr) { callback(chunk); } @@ -138,42 +175,7 @@ class RememberedSet : public AllStatic { static void Iterate(MemoryChunk* chunk, Callback callback, SlotSet::EmptyBucketMode mode) { SlotSet* slots = chunk->slot_set<type>(); - if (slots != nullptr) { - size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; - int new_count = 0; - for (size_t page = 0; page < pages; page++) { - new_count += slots[page].Iterate(callback, mode); - } - // Only old-to-old slot sets are released eagerly. Old-new-slot sets are - // released by the sweeper threads. - if (type == OLD_TO_OLD && new_count == 0) { - chunk->ReleaseSlotSet<OLD_TO_OLD>(); - } - } - } - - static int NumberOfPreFreedEmptyBuckets(MemoryChunk* chunk) { - DCHECK(type == OLD_TO_NEW); - int result = 0; - SlotSet* slots = chunk->slot_set<type>(); - if (slots != nullptr) { - size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; - for (size_t page = 0; page < pages; page++) { - result += slots[page].NumberOfPreFreedEmptyBuckets(); - } - } - return result; - } - - static void PreFreeEmptyBuckets(MemoryChunk* chunk) { - DCHECK(type == OLD_TO_NEW); - SlotSet* slots = chunk->slot_set<type>(); - if (slots != nullptr) { - size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; - for (size_t page = 0; page < pages; page++) { - slots[page].PreFreeEmptyBuckets(); - } - } + RememberedSetOperations::Iterate(slots, chunk, callback, mode); } static void FreeEmptyBuckets(MemoryChunk* chunk) { @@ -183,7 +185,6 @@ class RememberedSet : public AllStatic { size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; for (size_t page = 0; page < pages; page++) { slots[page].FreeEmptyBuckets(); - slots[page].FreeToBeFreedBuckets(); } } } @@ -217,7 +218,7 @@ class RememberedSet : public AllStatic { return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT; }, - TypedSlotSet::PREFREE_EMPTY_CHUNKS); + TypedSlotSet::FREE_EMPTY_CHUNKS); } } @@ -234,9 +235,9 @@ class RememberedSet : public AllStatic { }); } - // Iterates and filters typed old to old pointers in the given memory chunk - // with the given callback. The callback should take (SlotType slot_type, - // Address addr) and return SlotCallbackResult. + // Iterates and filters typed pointers in the given memory chunk with the + // given callback. The callback should take (SlotType slot_type, Address addr) + // and return SlotCallbackResult. template <typename Callback> static void IterateTyped(MemoryChunk* chunk, Callback callback) { TypedSlotSet* slots = chunk->typed_slot_set<type>(); @@ -259,9 +260,6 @@ class RememberedSet : public AllStatic { chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>(); } } - - private: - static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot); }; class UpdateTypedSlotHelper { @@ -347,6 +345,46 @@ class UpdateTypedSlotHelper { } }; +class RememberedSetSweeping { + public: + template <AccessMode access_mode> + static void Insert(MemoryChunk* chunk, Address slot_addr) { + DCHECK(chunk->Contains(slot_addr)); + SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>(); + if (slot_set == nullptr) { + slot_set = chunk->AllocateSweepingSlotSet(); + } + RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr); + } + + static void Remove(MemoryChunk* chunk, Address slot_addr) { + DCHECK(chunk->Contains(slot_addr)); + SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>(); + RememberedSetOperations::Remove(slot_set, chunk, slot_addr); + } + + // Given a page and a range of slots in that page, this function removes the + // slots from the remembered set. + static void RemoveRange(MemoryChunk* chunk, Address start, Address end, + SlotSet::EmptyBucketMode mode) { + SlotSet* slot_set = chunk->sweeping_slot_set(); + RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode); + } + + // Iterates and filters the remembered set in the given memory chunk with + // the given callback. The callback should take (Address slot) and return + // SlotCallbackResult. + // + // Notice that |mode| can only be of FREE* or PREFREE* if there are no other + // threads concurrently inserting slots. + template <typename Callback> + static void Iterate(MemoryChunk* chunk, Callback callback, + SlotSet::EmptyBucketMode mode) { + SlotSet* slots = chunk->sweeping_slot_set(); + RememberedSetOperations::Iterate(slots, chunk, callback, mode); + } +}; + inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) { if (RelocInfo::IsCodeTargetMode(rmode)) { return CODE_TARGET_SLOT; diff --git a/chromium/v8/src/heap/scavenge-job.cc b/chromium/v8/src/heap/scavenge-job.cc index 273866d5e4d..3730bfeecb6 100644 --- a/chromium/v8/src/heap/scavenge-job.cc +++ b/chromium/v8/src/heap/scavenge-job.cc @@ -108,7 +108,7 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) { v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate()); if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) { idle_task_pending_ = true; - auto task = base::make_unique<IdleTask>(heap->isolate(), this); + auto task = std::make_unique<IdleTask>(heap->isolate(), this); V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask( std::move(task)); } diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc index 7d56882953e..47c19d4fcc1 100644 --- a/chromium/v8/src/heap/scavenger.cc +++ b/chromium/v8/src/heap/scavenger.cc @@ -153,8 +153,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor { if (result == KEEP_SLOT) { SLOW_DCHECK(target.IsHeapObject()); - RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host), - slot.address()); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(host); + + // Sweeper is stopped during scavenge, so we can directly + // insert into its remembered set here. + if (chunk->sweeping_slot_set()) { + RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk, + slot.address()); + } else { + RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk, + slot.address()); + } } SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate( HeapObject::cast(target))); @@ -165,8 +174,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor { // We cannot call MarkCompactCollector::RecordSlot because that checks // that the host page is not in young generation, which does not hold // for pending large pages. - RememberedSet<OLD_TO_OLD>::Insert(MemoryChunk::FromHeapObject(host), - slot.address()); + RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>( + MemoryChunk::FromHeapObject(host), slot.address()); } } @@ -239,8 +248,10 @@ void ScavengerCollector::CollectGarbage() { // access to the slots of a page and can completely avoid any locks on // the page itself. Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope); - filter_scope.FilterOldSpaceSweepingPages( - [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); }); + filter_scope.FilterOldSpaceSweepingPages([](Page* page) { + return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set(); + }); + RememberedSet<OLD_TO_NEW>::IterateMemoryChunks( heap_, [&job](MemoryChunk* chunk) { job.AddItem(new PageScavengingItem(chunk)); @@ -335,11 +346,7 @@ void ScavengerCollector::CollectGarbage() { heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; }); RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) { - if (chunk->SweepingDone()) { - RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk); - } else { - RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk); - } + RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk); }); // Update how much has survived scavenge. @@ -430,16 +437,45 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) { } } +// Remove this crashkey after chromium:1010312 is fixed. +class ScopedFullHeapCrashKey { + public: + explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) { + isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap"); + } + ~ScopedFullHeapCrashKey() { + isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, ""); + } + + private: + Isolate* isolate_ = nullptr; +}; + void Scavenger::ScavengePage(MemoryChunk* page) { + ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(heap_->isolate()); CodePageMemoryModificationScope memory_modification_scope(page); + InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page); RememberedSet<OLD_TO_NEW>::Iterate( page, - [this](MaybeObjectSlot addr) { - return CheckAndScavengeObject(heap_, addr); + [this, &filter](MaybeObjectSlot slot) { + if (!filter.IsValid(slot.address())) return REMOVE_SLOT; + return CheckAndScavengeObject(heap_, slot); + }, + SlotSet::KEEP_EMPTY_BUCKETS); + filter = InvalidatedSlotsFilter::OldToNew(page); + RememberedSetSweeping::Iterate( + page, + [this, &filter](MaybeObjectSlot slot) { + if (!filter.IsValid(slot.address())) return REMOVE_SLOT; + return CheckAndScavengeObject(heap_, slot); }, SlotSet::KEEP_EMPTY_BUCKETS); - DCHECK_NULL(page->invalidated_slots<OLD_TO_NEW>()); + if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) { + // The invalidated slots are not needed after old-to-new slots were + // processed. + page->ReleaseInvalidatedSlots<OLD_TO_NEW>(); + } RememberedSet<OLD_TO_NEW>::IterateTyped( page, [=](SlotType type, Address addr) { diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc index 15ca6d79303..9f94029af37 100644 --- a/chromium/v8/src/heap/setup-heap-internal.cc +++ b/chromium/v8/src/heap/setup-heap-internal.cc @@ -7,6 +7,7 @@ #include "src/builtins/accessors.h" #include "src/codegen/compilation-cache.h" #include "src/execution/isolate.h" +#include "src/execution/protectors.h" #include "src/heap/factory.h" #include "src/heap/heap-inl.h" #include "src/ic/handler-configuration.h" @@ -616,17 +617,17 @@ void Heap::CreateInitialObjects() { // The -0 value must be set before NewNumber works. set_minus_zero_value( - *factory->NewHeapNumber(-0.0, AllocationType::kReadOnly)); + *factory->NewHeapNumber<AllocationType::kReadOnly>(-0.0)); DCHECK(std::signbit(roots.minus_zero_value().Number())); - set_nan_value(*factory->NewHeapNumber( - std::numeric_limits<double>::quiet_NaN(), AllocationType::kReadOnly)); - set_hole_nan_value(*factory->NewHeapNumberFromBits( - kHoleNanInt64, AllocationType::kReadOnly)); + set_nan_value(*factory->NewHeapNumber<AllocationType::kReadOnly>( + std::numeric_limits<double>::quiet_NaN())); + set_hole_nan_value(*factory->NewHeapNumberFromBits<AllocationType::kReadOnly>( + kHoleNanInt64)); set_infinity_value( - *factory->NewHeapNumber(V8_INFINITY, AllocationType::kReadOnly)); + *factory->NewHeapNumber<AllocationType::kReadOnly>(V8_INFINITY)); set_minus_infinity_value( - *factory->NewHeapNumber(-V8_INFINITY, AllocationType::kReadOnly)); + *factory->NewHeapNumber<AllocationType::kReadOnly>(-V8_INFINITY)); set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly)); InitializeHashSeed(); @@ -704,8 +705,7 @@ void Heap::CreateInitialObjects() { Oddball::kStaleRegister)); // Initialize the self-reference marker. - set_self_reference_marker( - *factory->NewSelfReferenceMarker(AllocationType::kReadOnly)); + set_self_reference_marker(*factory->NewSelfReferenceMarker()); set_interpreter_entry_trampoline_for_profiling(roots.undefined_value()); @@ -781,13 +781,13 @@ void Heap::CreateInitialObjects() { set_feedback_vectors_for_profiling_tools(roots.undefined_value()); set_pending_optimize_for_test_bytecode(roots.undefined_value()); + set_shared_wasm_memories(roots.empty_weak_array_list()); set_script_list(roots.empty_weak_array_list()); Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New( isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY); DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1)); - slow_element_dictionary->set_requires_slow_elements(); set_empty_slow_element_dictionary(*slow_element_dictionary); set_materialized_objects(*factory->NewFixedArray(0, AllocationType::kOld)); @@ -839,76 +839,122 @@ void Heap::CreateInitialObjects() { script->set_origin_options(ScriptOriginOptions(true, false)); set_empty_script(*script); - Handle<Cell> array_constructor_cell = factory->NewCell( - handle(Smi::FromInt(Isolate::kProtectorValid), isolate())); - set_array_constructor_protector(*array_constructor_cell); + { + Handle<PropertyCell> cell = factory->NewPropertyCell( + factory->empty_string(), AllocationType::kReadOnly); + cell->set_value(roots.the_hole_value()); + set_empty_property_cell(*cell); + } - Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_no_elements_protector(*cell); + // Protectors + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_array_constructor_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string(), - AllocationType::kReadOnly); - cell->set_value(roots.the_hole_value()); - set_empty_property_cell(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_no_elements_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_array_iterator_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_array_iterator_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_map_iterator_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_map_iterator_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_set_iterator_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_set_iterator_protector(*cell); + } - Handle<Cell> is_concat_spreadable_cell = factory->NewCell( - handle(Smi::FromInt(Isolate::kProtectorValid), isolate())); - set_is_concat_spreadable_protector(*is_concat_spreadable_cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_is_concat_spreadable_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_array_species_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_array_species_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_typed_array_species_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_typed_array_species_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_promise_species_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_promise_species_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_string_iterator_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_string_iterator_protector(*cell); + } - Handle<Cell> string_length_overflow_cell = factory->NewCell( - handle(Smi::FromInt(Isolate::kProtectorValid), isolate())); - set_string_length_protector(*string_length_overflow_cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_string_length_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_array_buffer_detaching_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_array_buffer_detaching_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_promise_hook_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_promise_hook_protector(*cell); + } - Handle<Cell> promise_resolve_cell = factory->NewCell( - handle(Smi::FromInt(Isolate::kProtectorValid), isolate())); - set_promise_resolve_protector(*promise_resolve_cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_promise_resolve_protector(*cell); + } - cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); - set_promise_then_protector(*cell); + { + Handle<PropertyCell> cell = + factory->NewPropertyCell(factory->empty_string()); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); + set_promise_then_protector(*cell); + } set_serialized_objects(roots.empty_fixed_array()); set_serialized_global_proxy_sizes(roots.empty_fixed_array()); - set_noscript_shared_function_infos(roots.empty_weak_array_list()); - /* Canonical off-heap trampoline data */ set_off_heap_trampoline_relocation_info( *Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_)); diff --git a/chromium/v8/src/heap/slot-set.cc b/chromium/v8/src/heap/slot-set.cc index 12cf6bab5af..92540574a0b 100644 --- a/chromium/v8/src/heap/slot-set.cc +++ b/chromium/v8/src/heap/slot-set.cc @@ -11,7 +11,6 @@ TypedSlots::~TypedSlots() { Chunk* chunk = head_; while (chunk != nullptr) { Chunk* next = chunk->next; - delete[] chunk->buffer; delete chunk; chunk = next; } @@ -22,9 +21,8 @@ TypedSlots::~TypedSlots() { void TypedSlots::Insert(SlotType type, uint32_t offset) { TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset)}; Chunk* chunk = EnsureChunk(); - DCHECK_LT(chunk->count, chunk->capacity); - chunk->buffer[chunk->count] = slot; - ++chunk->count; + DCHECK_LT(chunk->buffer.size(), chunk->buffer.capacity()); + chunk->buffer.push_back(slot); } void TypedSlots::Merge(TypedSlots* other) { @@ -46,37 +44,25 @@ TypedSlots::Chunk* TypedSlots::EnsureChunk() { if (!head_) { head_ = tail_ = NewChunk(nullptr, kInitialBufferSize); } - if (head_->count == head_->capacity) { - head_ = NewChunk(head_, NextCapacity(head_->capacity)); + if (head_->buffer.size() == head_->buffer.capacity()) { + head_ = NewChunk(head_, NextCapacity(head_->buffer.capacity())); } return head_; } -TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) { +TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, size_t capacity) { Chunk* chunk = new Chunk; chunk->next = next; - chunk->buffer = new TypedSlot[capacity]; - chunk->capacity = capacity; - chunk->count = 0; + chunk->buffer.reserve(capacity); + DCHECK_EQ(chunk->buffer.capacity(), capacity); return chunk; } -TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); } - -void TypedSlotSet::FreeToBeFreedChunks() { - base::MutexGuard guard(&to_be_freed_chunks_mutex_); - std::stack<std::unique_ptr<Chunk>> empty; - to_be_freed_chunks_.swap(empty); -} - void TypedSlotSet::ClearInvalidSlots( const std::map<uint32_t, uint32_t>& invalid_ranges) { Chunk* chunk = LoadHead(); while (chunk != nullptr) { - TypedSlot* buffer = chunk->buffer; - int count = chunk->count; - for (int i = 0; i < count; i++) { - TypedSlot slot = LoadTypedSlot(buffer + i); + for (TypedSlot& slot : chunk->buffer) { SlotType type = TypeField::decode(slot.type_and_offset); if (type == CLEARED_SLOT) continue; uint32_t offset = OffsetField::decode(slot.type_and_offset); @@ -88,7 +74,7 @@ void TypedSlotSet::ClearInvalidSlots( upper_bound--; DCHECK_LE(upper_bound->first, offset); if (upper_bound->second > offset) { - ClearTypedSlot(buffer + i); + slot = ClearedTypedSlot(); } } chunk = LoadNext(chunk); diff --git a/chromium/v8/src/heap/slot-set.h b/chromium/v8/src/heap/slot-set.h index c71192bfdce..b1321b6fcaa 100644 --- a/chromium/v8/src/heap/slot-set.h +++ b/chromium/v8/src/heap/slot-set.h @@ -6,6 +6,7 @@ #define V8_HEAP_SLOT_SET_H_ #include <map> +#include <memory> #include <stack> #include "src/base/atomic-utils.h" @@ -21,19 +22,15 @@ namespace internal { enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT }; // Data structure for maintaining a set of slots in a standard (non-large) -// page. The base address of the page must be set with SetPageStart before any -// operation. +// page. // The data structure assumes that the slots are pointer size aligned and // splits the valid slot offset range into kBuckets buckets. // Each bucket is a bitmap with a bit corresponding to a single slot offset. class SlotSet : public Malloced { public: enum EmptyBucketMode { - FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately. - PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot - // set, but deallocated on demand by a sweeper - // thread. - KEEP_EMPTY_BUCKETS // An empty bucket will be kept. + FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately. + KEEP_EMPTY_BUCKETS // An empty bucket will be kept. }; SlotSet() { @@ -46,15 +43,12 @@ class SlotSet : public Malloced { for (int i = 0; i < kBuckets; i++) { ReleaseBucket(i); } - FreeToBeFreedBuckets(); } - void SetPageStart(Address page_start) { page_start_ = page_start; } - // The slot offset specifies a slot at address page_start_ + slot_offset. // AccessMode defines whether there can be concurrent access on the buckets // or not. - template <AccessMode access_mode = AccessMode::ATOMIC> + template <AccessMode access_mode> void Insert(int slot_offset) { int bucket_index, cell_index, bit_index; SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); @@ -138,9 +132,7 @@ class SlotSet : public Malloced { DCHECK(current_bucket == end_bucket || (current_bucket < end_bucket && current_cell == 0)); while (current_bucket < end_bucket) { - if (mode == PREFREE_EMPTY_BUCKETS) { - PreFreeEmptyBucket(current_bucket); - } else if (mode == FREE_EMPTY_BUCKETS) { + if (mode == FREE_EMPTY_BUCKETS) { ReleaseBucket(current_bucket); } else { DCHECK(mode == KEEP_EMPTY_BUCKETS); @@ -152,11 +144,11 @@ class SlotSet : public Malloced { current_bucket++; } // All buckets between start_bucket and end_bucket are cleared. + DCHECK(current_bucket == end_bucket); + if (current_bucket == kBuckets) return; bucket = LoadBucket(&buckets_[current_bucket]); - DCHECK(current_bucket == end_bucket && current_cell <= end_cell); - if (current_bucket == kBuckets || bucket == nullptr) { - return; - } + DCHECK(current_cell <= end_cell); + if (bucket == nullptr) return; while (current_cell < end_cell) { StoreCell(&bucket[current_cell], 0); current_cell++; @@ -189,7 +181,7 @@ class SlotSet : public Malloced { // else return REMOVE_SLOT; // }); template <typename Callback> - int Iterate(Callback callback, EmptyBucketMode mode) { + int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) { int new_count = 0; for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { Bucket bucket = LoadBucket(&buckets_[bucket_index]); @@ -205,7 +197,7 @@ class SlotSet : public Malloced { int bit_offset = base::bits::CountTrailingZeros(cell); uint32_t bit_mask = 1u << bit_offset; uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2; - if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) { + if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) { ++in_bucket_count; } else { mask |= bit_mask; @@ -218,31 +210,12 @@ class SlotSet : public Malloced { } } } - if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) { - PreFreeEmptyBucket(bucket_index); - } new_count += in_bucket_count; } } return new_count; } - int NumberOfPreFreedEmptyBuckets() { - base::MutexGuard guard(&to_be_freed_buckets_mutex_); - return static_cast<int>(to_be_freed_buckets_.size()); - } - - void PreFreeEmptyBuckets() { - for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { - Bucket bucket = LoadBucket(&buckets_[bucket_index]); - if (bucket != nullptr) { - if (IsEmptyBucket(bucket)) { - PreFreeEmptyBucket(bucket_index); - } - } - } - } - void FreeEmptyBuckets() { for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { Bucket bucket = LoadBucket(&buckets_[bucket_index]); @@ -254,27 +227,22 @@ class SlotSet : public Malloced { } } - void FreeToBeFreedBuckets() { - base::MutexGuard guard(&to_be_freed_buckets_mutex_); - while (!to_be_freed_buckets_.empty()) { - Bucket top = to_be_freed_buckets_.top(); - to_be_freed_buckets_.pop(); - DeleteArray<uint32_t>(top); - } - DCHECK_EQ(0u, to_be_freed_buckets_.size()); - } - - private: - using Bucket = uint32_t*; static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize; static const int kCellsPerBucket = 32; static const int kCellsPerBucketLog2 = 5; + static const int kCellSizeBytesLog2 = 2; + static const int kCellSizeBytes = 1 << kCellSizeBytesLog2; static const int kBitsPerCell = 32; static const int kBitsPerCellLog2 = 5; static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; + static const int kSize = kBuckets * kSystemPointerSize; + + using Bucket = uint32_t*; + + private: Bucket AllocateBucket() { Bucket result = NewArray<uint32_t>(kCellsPerBucket); for (int i = 0; i < kCellsPerBucket; i++) { @@ -293,15 +261,6 @@ class SlotSet : public Malloced { } } - void PreFreeEmptyBucket(int bucket_index) { - Bucket bucket = LoadBucket(&buckets_[bucket_index]); - if (bucket != nullptr) { - base::MutexGuard guard(&to_be_freed_buckets_mutex_); - to_be_freed_buckets_.push(bucket); - StoreBucket(&buckets_[bucket_index], nullptr); - } - } - void ReleaseBucket(int bucket_index) { Bucket bucket = LoadBucket(&buckets_[bucket_index]); StoreBucket(&buckets_[bucket_index], nullptr); @@ -381,11 +340,11 @@ class SlotSet : public Malloced { } Bucket buckets_[kBuckets]; - Address page_start_; - base::Mutex to_be_freed_buckets_mutex_; - std::stack<uint32_t*> to_be_freed_buckets_; }; +STATIC_ASSERT(std::is_standard_layout<SlotSet>::value); +STATIC_ASSERT(sizeof(SlotSet) == SlotSet::kSize); + enum SlotType { FULL_EMBEDDED_OBJECT_SLOT, COMPRESSED_EMBEDDED_OBJECT_SLOT, @@ -396,9 +355,9 @@ enum SlotType { }; // Data structure for maintaining a list of typed slots in a page. -// Typed slots can only appear in Code and JSFunction objects, so +// Typed slots can only appear in Code objects, so // the maximum possible offset is limited by the LargePage::kMaxCodePageSize. -// The implementation is a chain of chunks, where each chunks is an array of +// The implementation is a chain of chunks, where each chunk is an array of // encoded (slot type, slot offset) pairs. // There is no duplicate detection and we do not expect many duplicates because // typed slots contain V8 internal pointers that are not directly exposed to JS. @@ -418,17 +377,15 @@ class V8_EXPORT_PRIVATE TypedSlots { }; struct Chunk { Chunk* next; - TypedSlot* buffer; - int32_t capacity; - int32_t count; + std::vector<TypedSlot> buffer; }; - static const int kInitialBufferSize = 100; - static const int kMaxBufferSize = 16 * KB; - static int NextCapacity(int capacity) { + static const size_t kInitialBufferSize = 100; + static const size_t kMaxBufferSize = 16 * KB; + static size_t NextCapacity(size_t capacity) { return Min(kMaxBufferSize, capacity * 2); } Chunk* EnsureChunk(); - Chunk* NewChunk(Chunk* next, int capacity); + Chunk* NewChunk(Chunk* next, size_t capacity); Chunk* head_ = nullptr; Chunk* tail_ = nullptr; }; @@ -437,15 +394,10 @@ class V8_EXPORT_PRIVATE TypedSlots { // clearing of invalid slots. class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots { public: - // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty - // during the iteration are queued in to_be_freed_chunks_, which are - // then freed in FreeToBeFreedChunks. - enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS }; + enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS }; explicit TypedSlotSet(Address page_start) : page_start_(page_start) {} - ~TypedSlotSet() override; - // Iterate over all slots in the set and for each slot invoke the callback. // If the callback returns REMOVE_SLOT then the slot is removed from the set. // Returns the new number of slots. @@ -463,11 +415,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots { Chunk* previous = nullptr; int new_count = 0; while (chunk != nullptr) { - TypedSlot* buffer = chunk->buffer; - int count = chunk->count; bool empty = true; - for (int i = 0; i < count; i++) { - TypedSlot slot = LoadTypedSlot(buffer + i); + for (TypedSlot& slot : chunk->buffer) { SlotType type = TypeField::decode(slot.type_and_offset); if (type != CLEARED_SLOT) { uint32_t offset = OffsetField::decode(slot.type_and_offset); @@ -476,12 +425,12 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots { new_count++; empty = false; } else { - ClearTypedSlot(buffer + i); + slot = ClearedTypedSlot(); } } } Chunk* next = chunk->next; - if (mode == PREFREE_EMPTY_CHUNKS && empty) { + if (mode == FREE_EMPTY_CHUNKS && empty) { // We remove the chunk from the list but let it still point its next // chunk to allow concurrent iteration. if (previous) { @@ -489,8 +438,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots { } else { StoreHead(next); } - base::MutexGuard guard(&to_be_freed_chunks_mutex_); - to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk)); + + delete chunk; } else { previous = chunk; } @@ -518,19 +467,11 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots { void StoreHead(Chunk* chunk) { base::AsAtomicPointer::Relaxed_Store(&head_, chunk); } - TypedSlot LoadTypedSlot(TypedSlot* slot) { - return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)}; - } - void ClearTypedSlot(TypedSlot* slot) { - // Order is important here and should match that of LoadTypedSlot. - base::AsAtomic32::Relaxed_Store( - &slot->type_and_offset, - TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)); + static TypedSlot ClearedTypedSlot() { + return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)}; } Address page_start_; - base::Mutex to_be_freed_chunks_mutex_; - std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_; }; } // namespace internal diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc index dd8ba301018..2c5d5c298da 100644 --- a/chromium/v8/src/heap/spaces.cc +++ b/chromium/v8/src/heap/spaces.cc @@ -11,14 +11,14 @@ #include "src/base/lsan.h" #include "src/base/macros.h" #include "src/base/platform/semaphore.h" -#include "src/base/template-utils.h" #include "src/execution/vm-state-inl.h" -#include "src/heap/array-buffer-tracker.h" +#include "src/heap/array-buffer-tracker-inl.h" #include "src/heap/combined-heap.h" #include "src/heap/concurrent-marking.h" #include "src/heap/gc-tracer.h" #include "src/heap/heap-controller.h" #include "src/heap/incremental-marking-inl.h" +#include "src/heap/invalidated-slots-inl.h" #include "src/heap/mark-compact.h" #include "src/heap/read-only-heap.h" #include "src/heap/remembered-set.h" @@ -220,7 +220,7 @@ void MemoryAllocator::InitializeCodePageAllocator( requested)); heap_reservation_ = std::move(reservation); - code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>( + code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>( page_allocator, aligned_base, size, static_cast<size_t>(MemoryChunk::kAlignment)); code_page_allocator_ = code_page_allocator_instance_.get(); @@ -286,7 +286,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() { } return; } - auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this); + auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this); if (FLAG_trace_unmapper) { PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n", @@ -699,6 +699,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->InitializeReservedMemory(); base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr); base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr); + base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr); base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW], nullptr); base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD], @@ -856,6 +857,33 @@ Page* Page::ConvertNewToOld(Page* old_page) { return new_page; } +void Page::MoveOldToNewRememberedSetForSweeping() { + CHECK_NULL(sweeping_slot_set_); + sweeping_slot_set_ = slot_set_[OLD_TO_NEW]; + slot_set_[OLD_TO_NEW] = nullptr; +} + +void Page::MergeOldToNewRememberedSets() { + if (sweeping_slot_set_ == nullptr) return; + + RememberedSet<OLD_TO_NEW>::Iterate( + this, + [this](MaybeObjectSlot slot) { + Address address = slot.address(); + RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address); + return KEEP_SLOT; + }, + SlotSet::KEEP_EMPTY_BUCKETS); + + if (slot_set_[OLD_TO_NEW]) { + ReleaseSlotSet<OLD_TO_NEW>(); + } + + CHECK_NULL(slot_set_[OLD_TO_NEW]); + slot_set_[OLD_TO_NEW] = sweeping_slot_set_; + sweeping_slot_set_ = nullptr; +} + size_t MemoryChunk::CommittedPhysicalMemory() { if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE) return size(); @@ -1376,6 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() { } ReleaseSlotSet<OLD_TO_NEW>(); + ReleaseSlotSet(&sweeping_slot_set_); ReleaseSlotSet<OLD_TO_OLD>(); ReleaseTypedSlotSet<OLD_TO_NEW>(); ReleaseTypedSlotSet<OLD_TO_OLD>(); @@ -1399,11 +1428,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() { static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) { size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize; DCHECK_LT(0, pages); - SlotSet* slot_set = new SlotSet[pages]; - for (size_t i = 0; i < pages; i++) { - slot_set[i].SetPageStart(page_start + i * Page::kPageSize); - } - return slot_set; + return new SlotSet[pages]; } template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>(); @@ -1411,15 +1436,23 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>(); template <RememberedSetType type> SlotSet* MemoryChunk::AllocateSlotSet() { - SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address()); + return AllocateSlotSet(&slot_set_[type]); +} + +SlotSet* MemoryChunk::AllocateSweepingSlotSet() { + return AllocateSlotSet(&sweeping_slot_set_); +} + +SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) { + SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address()); SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap( - &slot_set_[type], nullptr, slot_set); + slot_set, nullptr, new_slot_set); if (old_slot_set != nullptr) { - delete[] slot_set; - slot_set = old_slot_set; + delete[] new_slot_set; + new_slot_set = old_slot_set; } - DCHECK(slot_set); - return slot_set; + DCHECK(new_slot_set); + return new_slot_set; } template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>(); @@ -1427,10 +1460,13 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>(); template <RememberedSetType type> void MemoryChunk::ReleaseSlotSet() { - SlotSet* slot_set = slot_set_[type]; - if (slot_set) { - slot_set_[type] = nullptr; - delete[] slot_set; + ReleaseSlotSet(&slot_set_[type]); +} + +void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) { + if (*slot_set) { + delete[] * slot_set; + *slot_set = nullptr; } } @@ -1484,15 +1520,12 @@ void MemoryChunk::ReleaseInvalidatedSlots() { } template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object, - int size); +MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object); template V8_EXPORT_PRIVATE void -MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object, - int size); +MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object); template <RememberedSetType type> -void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, - int size) { +void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) { bool skip_slot_recording; if (type == OLD_TO_NEW) { @@ -1509,27 +1542,17 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object, AllocateInvalidatedSlots<type>(); } - InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>(); - InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object); + invalidated_slots<type>()->insert(object); +} - if (it != invalidated_slots->end() && it->first == object) { - // object was already inserted - CHECK_LE(size, it->second); - return; +void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { + if (heap()->incremental_marking()->IsCompacting()) { + // We cannot check slot_set_[OLD_TO_OLD] here, since the + // concurrent markers might insert slots concurrently. + RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); } - it = invalidated_slots->insert(it, std::make_pair(object, size)); - - // prevent overlapping invalidated objects for old-to-new. - if (type == OLD_TO_NEW && it != invalidated_slots->begin()) { - HeapObject pred = (--it)->first; - int pred_size = it->second; - DCHECK_LT(pred.address(), object.address()); - - if (pred.address() + pred_size > object.address()) { - it->second = static_cast<int>(object.address() - pred.address()); - } - } + RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); } template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>( @@ -1546,27 +1569,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) { invalidated_slots<type>()->end(); } -template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>( - HeapObject old_start, HeapObject new_start); - -template <RememberedSetType type> -void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start, - HeapObject new_start) { - DCHECK_LT(old_start, new_start); - DCHECK_EQ(MemoryChunk::FromHeapObject(old_start), - MemoryChunk::FromHeapObject(new_start)); - static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots"); - if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) { - auto it = invalidated_slots<type>()->find(old_start); - if (it != invalidated_slots<type>()->end()) { - int old_size = it->second; - int delta = static_cast<int>(new_start.address() - old_start.address()); - invalidated_slots<type>()->erase(it); - (*invalidated_slots<type>())[new_start] = old_size - delta; - } - } -} - void MemoryChunk::ReleaseLocalTracker() { DCHECK_NOT_NULL(local_tracker_); delete local_tracker_; @@ -1657,6 +1659,7 @@ void PagedSpace::RefillFreeList() { DCHECK(!IsDetached()); MarkCompactCollector* collector = heap()->mark_compact_collector(); size_t added = 0; + { Page* p = nullptr; while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) { @@ -1667,6 +1670,15 @@ void PagedSpace::RefillFreeList() { category->Reset(free_list()); }); } + + // Also merge old-to-new remembered sets outside of collections. + // Do not do this during GC, because of races during scavenges. + // One thread might iterate remembered set, while another thread merges + // them. + if (!is_local()) { + p->MergeOldToNewRememberedSets(); + } + // Only during compaction pages can actually change ownership. This is // safe because there exists no other competing action on the page links // during compaction. @@ -1709,6 +1721,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { // Move over pages. for (auto it = other->begin(); it != other->end();) { Page* p = *(it++); + + p->MergeOldToNewRememberedSets(); + // Relinking requires the category to be unlinked. other->RemovePage(p); AddPage(p); @@ -1883,19 +1898,8 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, // Generated code may allocate inline from the linear allocation area for. // To make sure we can observe these allocations, we use a lower limit. size_t step = GetNextInlineAllocationStepSize(); - - // TODO(ofrobots): there is subtle difference between old space and new - // space here. Any way to avoid it? `step - 1` makes more sense as we would - // like to sample the object that straddles the `start + step` boundary. - // Rounding down further would introduce a small statistical error in - // sampling. However, presently PagedSpace requires limit to be aligned. - size_t rounded_step; - if (identity() == NEW_SPACE) { - DCHECK_GE(step, 1); - rounded_step = step - 1; - } else { - rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step)); - } + size_t rounded_step = + RoundSizeDownToObjectAlignment(static_cast<int>(step - 1)); return Min(static_cast<Address>(start + min_size + rounded_step), end); } else { // The entire node can be used as the linear allocation area. @@ -2139,7 +2143,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { } else if (object.IsJSArrayBuffer()) { JSArrayBuffer array_buffer = JSArrayBuffer::cast(object); if (ArrayBufferTracker::IsTracked(array_buffer)) { - size_t size = array_buffer.byte_length(); + size_t size = PerIsolateAccountingLength(array_buffer); external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size; } } @@ -2628,7 +2632,7 @@ void NewSpace::Verify(Isolate* isolate) { } else if (object.IsJSArrayBuffer()) { JSArrayBuffer array_buffer = JSArrayBuffer::cast(object); if (ArrayBufferTracker::IsTracked(array_buffer)) { - size_t size = array_buffer.byte_length(); + size_t size = PerIsolateAccountingLength(array_buffer); external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size; } } @@ -3942,6 +3946,7 @@ Address LargePage::GetAddressToShrink(Address object_address, } void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { + DCHECK_NULL(this->sweeping_slot_set()); RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), SlotSet::FREE_EMPTY_BUCKETS); RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h index ebb6876cbe1..5652042d20c 100644 --- a/chromium/v8/src/heap/spaces.h +++ b/chromium/v8/src/heap/spaces.h @@ -130,12 +130,6 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory }; enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted }; -enum RememberedSetType { - OLD_TO_NEW, - OLD_TO_OLD, - NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1 -}; - // A free list category maintains a linked list of free memory blocks. class FreeListCategory { public: @@ -606,7 +600,7 @@ class MemoryChunk : public BasicMemoryChunk { + kSystemPointerSize // Address owner_ + kSizetSize // size_t progress_bar_ + kIntptrSize // intptr_t live_byte_count_ - + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array + + kSystemPointerSize // SlotSet* sweeping_slot_set_ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array + kSystemPointerSize * @@ -706,6 +700,13 @@ class MemoryChunk : public BasicMemoryChunk { return slot_set_[type]; } + template <AccessMode access_mode = AccessMode::ATOMIC> + SlotSet* sweeping_slot_set() { + if (access_mode == AccessMode::ATOMIC) + return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_); + return sweeping_slot_set_; + } + template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC> TypedSlotSet* typed_slot_set() { if (access_mode == AccessMode::ATOMIC) @@ -715,9 +716,13 @@ class MemoryChunk : public BasicMemoryChunk { template <RememberedSetType type> V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet(); + SlotSet* AllocateSweepingSlotSet(); + SlotSet* AllocateSlotSet(SlotSet** slot_set); + // Not safe to be called concurrently. template <RememberedSetType type> void ReleaseSlotSet(); + void ReleaseSlotSet(SlotSet** slot_set); template <RememberedSetType type> TypedSlotSet* AllocateTypedSlotSet(); // Not safe to be called concurrently. @@ -729,12 +734,8 @@ class MemoryChunk : public BasicMemoryChunk { template <RememberedSetType type> void ReleaseInvalidatedSlots(); template <RememberedSetType type> - V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object, - int size); - // Updates invalidated_slots after array left-trimming. - template <RememberedSetType type> - void MoveObjectWithInvalidatedSlots(HeapObject old_start, - HeapObject new_start); + V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object); + void InvalidateRecordedSlots(HeapObject object); template <RememberedSetType type> bool RegisteredObjectWithInvalidatedSlots(HeapObject object); template <RememberedSetType type> @@ -914,7 +915,7 @@ class MemoryChunk : public BasicMemoryChunk { // A single slot set for small pages (of size kPageSize) or an array of slot // set for large pages. In the latter case the number of entries in the array // is ceil(size() / kPageSize). - SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; + SlotSet* sweeping_slot_set_; TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES]; @@ -1097,6 +1098,9 @@ class Page : public MemoryChunk { void AllocateFreeListCategories(); void ReleaseFreeListCategories(); + void MoveOldToNewRememberedSetForSweeping(); + void MergeOldToNewRememberedSets(); + #ifdef DEBUG void Print(); #endif // DEBUG diff --git a/chromium/v8/src/heap/store-buffer-inl.h b/chromium/v8/src/heap/store-buffer-inl.h deleted file mode 100644 index b43098bf57d..00000000000 --- a/chromium/v8/src/heap/store-buffer-inl.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_HEAP_STORE_BUFFER_INL_H_ -#define V8_HEAP_STORE_BUFFER_INL_H_ - -#include "src/heap/store-buffer.h" - -#include "src/heap/heap-inl.h" - -namespace v8 { -namespace internal { - -void StoreBuffer::InsertIntoStoreBuffer(Address slot) { - if (top_ + sizeof(Address) > limit_[current_]) { - StoreBufferOverflow(heap_->isolate()); - } - *top_ = slot; - top_++; -} - -} // namespace internal -} // namespace v8 - -#endif // V8_HEAP_STORE_BUFFER_INL_H_ diff --git a/chromium/v8/src/heap/store-buffer.cc b/chromium/v8/src/heap/store-buffer.cc deleted file mode 100644 index 349e7877409..00000000000 --- a/chromium/v8/src/heap/store-buffer.cc +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/heap/store-buffer.h" - -#include <algorithm> - -#include "src/base/bits.h" -#include "src/base/macros.h" -#include "src/base/template-utils.h" -#include "src/execution/isolate.h" -#include "src/heap/incremental-marking.h" -#include "src/heap/store-buffer-inl.h" -#include "src/init/v8.h" -#include "src/logging/counters.h" -#include "src/objects/objects-inl.h" - -namespace v8 { -namespace internal { - -StoreBuffer::StoreBuffer(Heap* heap) - : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) { - for (int i = 0; i < kStoreBuffers; i++) { - start_[i] = nullptr; - limit_[i] = nullptr; - lazy_top_[i] = nullptr; - } - task_running_ = false; - insertion_callback = &InsertDuringRuntime; -} - -void StoreBuffer::SetUp() { - v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); - // Round up the requested size in order to fulfill the VirtualMemory's - // requrements on the requested size alignment. This may cause a bit of - // memory wastage if the actual CommitPageSize() will be bigger than the - // kMinExpectedOSPageSize value but this is a trade-off for keeping the - // store buffer overflow check in write barriers cheap. - const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers, - page_allocator->CommitPageSize()); - // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us - // use a bit test to detect the ends of the buffers. - STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize)); - const size_t alignment = - std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize()); - void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment); - VirtualMemory reservation(page_allocator, requested_size, hint, alignment); - if (!reservation.IsReserved()) { - heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); - } - - Address start = reservation.address(); - const size_t allocated_size = reservation.size(); - - start_[0] = reinterpret_cast<Address*>(start); - limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize); - start_[1] = limit_[0]; - limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize); - - // Sanity check the buffers. - Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size); - USE(vm_limit); - for (int i = 0; i < kStoreBuffers; i++) { - DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address()); - DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address()); - DCHECK(start_[i] <= vm_limit); - DCHECK(limit_[i] <= vm_limit); - DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask); - } - - // Set RW permissions only on the pages we use. - const size_t used_size = RoundUp(requested_size, CommitPageSize()); - if (!reservation.SetPermissions(start, used_size, - PageAllocator::kReadWrite)) { - heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); - } - current_ = 0; - top_ = start_[current_]; - virtual_memory_ = std::move(reservation); -} - -void StoreBuffer::TearDown() { - if (virtual_memory_.IsReserved()) virtual_memory_.Free(); - top_ = nullptr; - for (int i = 0; i < kStoreBuffers; i++) { - start_[i] = nullptr; - limit_[i] = nullptr; - lazy_top_[i] = nullptr; - } -} - -void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) { - DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC); - store_buffer->InsertIntoStoreBuffer(slot); -} - -void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer, - Address slot) { - DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC); - RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); -} - -void StoreBuffer::SetMode(StoreBufferMode mode) { - mode_ = mode; - if (mode == NOT_IN_GC) { - insertion_callback = &InsertDuringRuntime; - } else { - insertion_callback = &InsertDuringGarbageCollection; - } -} - -int StoreBuffer::StoreBufferOverflow(Isolate* isolate) { - isolate->heap()->store_buffer()->FlipStoreBuffers(); - isolate->counters()->store_buffer_overflows()->Increment(); - // Called by RecordWriteCodeStubAssembler, which doesnt accept void type - return 0; -} - -void StoreBuffer::FlipStoreBuffers() { - base::MutexGuard guard(&mutex_); - int other = (current_ + 1) % kStoreBuffers; - MoveEntriesToRememberedSet(other); - lazy_top_[current_] = top_; - current_ = other; - top_ = start_[current_]; - - if (!task_running_ && FLAG_concurrent_store_buffer) { - task_running_ = true; - V8::GetCurrentPlatform()->CallOnWorkerThread( - base::make_unique<Task>(heap_->isolate(), this)); - } -} - -void StoreBuffer::MoveEntriesToRememberedSet(int index) { - if (!lazy_top_[index]) return; - DCHECK_GE(index, 0); - DCHECK_LT(index, kStoreBuffers); - Address last_inserted_addr = kNullAddress; - MemoryChunk* chunk = nullptr; - - for (Address* current = start_[index]; current < lazy_top_[index]; - current++) { - Address addr = *current; - if (chunk == nullptr || - MemoryChunk::BaseAddress(addr) != chunk->address()) { - chunk = MemoryChunk::FromAnyPointerAddress(addr); - } - if (addr != last_inserted_addr) { - RememberedSet<OLD_TO_NEW>::Insert(chunk, addr); - last_inserted_addr = addr; - } - } - lazy_top_[index] = nullptr; -} - -void StoreBuffer::MoveAllEntriesToRememberedSet() { - base::MutexGuard guard(&mutex_); - int other = (current_ + 1) % kStoreBuffers; - MoveEntriesToRememberedSet(other); - lazy_top_[current_] = top_; - MoveEntriesToRememberedSet(current_); - top_ = start_[current_]; -} - -void StoreBuffer::ConcurrentlyProcessStoreBuffer() { - base::MutexGuard guard(&mutex_); - int other = (current_ + 1) % kStoreBuffers; - MoveEntriesToRememberedSet(other); - task_running_ = false; -} - -} // namespace internal -} // namespace v8 diff --git a/chromium/v8/src/heap/store-buffer.h b/chromium/v8/src/heap/store-buffer.h deleted file mode 100644 index 025bb6a060b..00000000000 --- a/chromium/v8/src/heap/store-buffer.h +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_HEAP_STORE_BUFFER_H_ -#define V8_HEAP_STORE_BUFFER_H_ - -#include "src/base/logging.h" -#include "src/base/platform/platform.h" -#include "src/common/globals.h" -#include "src/heap/gc-tracer.h" -#include "src/heap/remembered-set.h" -#include "src/heap/slot-set.h" -#include "src/tasks/cancelable-task.h" -#include "src/utils/allocation.h" - -namespace v8 { -namespace internal { - -// Intermediate buffer that accumulates old-to-new stores from the generated -// code. Moreover, it stores invalid old-to-new slots with two entries. -// The first is a tagged address of the start of the invalid range, the second -// one is the end address of the invalid range or null if there is just one slot -// that needs to be removed from the remembered set. On buffer overflow the -// slots are moved to the remembered set. -// Store buffer entries are always full pointers. -class StoreBuffer { - public: - enum StoreBufferMode { IN_GC, NOT_IN_GC }; - - static const int kStoreBuffers = 2; - static const int kStoreBufferSize = - Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers), - 1 << (11 + kSystemPointerSizeLog2)); - static const int kStoreBufferMask = kStoreBufferSize - 1; - - V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate); - - static void InsertDuringGarbageCollection(StoreBuffer* store_buffer, - Address slot); - static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot); - - explicit StoreBuffer(Heap* heap); - void SetUp(); - void TearDown(); - - // Used to add entries from generated code. - inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } - - // Moves entries from a specific store buffer to the remembered set. This - // method takes a lock. - void MoveEntriesToRememberedSet(int index); - - // This method ensures that all used store buffer entries are transferred to - // the remembered set. - void MoveAllEntriesToRememberedSet(); - - inline void InsertIntoStoreBuffer(Address slot); - - void InsertEntry(Address slot) { - // Insertions coming from the GC are directly inserted into the remembered - // set. Insertions coming from the runtime are added to the store buffer to - // allow concurrent processing. - insertion_callback(this, slot); - } - - void SetMode(StoreBufferMode mode); - - // Used by the concurrent processing thread to transfer entries from the - // store buffer to the remembered set. - void ConcurrentlyProcessStoreBuffer(); - - bool Empty() { - for (int i = 0; i < kStoreBuffers; i++) { - if (lazy_top_[i]) { - return false; - } - } - return top_ == start_[current_]; - } - - Heap* heap() { return heap_; } - - private: - // There are two store buffers. If one store buffer fills up, the main thread - // publishes the top pointer of the store buffer that needs processing in its - // global lazy_top_ field. After that it start the concurrent processing - // thread. The concurrent processing thread uses the pointer in lazy_top_. - // It will grab the given mutex and transfer its entries to the remembered - // set. If the concurrent thread does not make progress, the main thread will - // perform the work. - // Important: there is an ordering constrained. The store buffer with the - // older entries has to be processed first. - class Task : public CancelableTask { - public: - Task(Isolate* isolate, StoreBuffer* store_buffer) - : CancelableTask(isolate), - store_buffer_(store_buffer), - tracer_(isolate->heap()->tracer()) {} - ~Task() override = default; - - private: - void RunInternal() override { - TRACE_BACKGROUND_GC(tracer_, - GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER); - store_buffer_->ConcurrentlyProcessStoreBuffer(); - } - StoreBuffer* store_buffer_; - GCTracer* tracer_; - DISALLOW_COPY_AND_ASSIGN(Task); - }; - - StoreBufferMode mode() const { return mode_; } - - void FlipStoreBuffers(); - - Heap* heap_; - - Address* top_; - - // The start and the limit of the buffer that contains store slots - // added from the generated code. We have two chunks of store buffers. - // Whenever one fills up, we notify a concurrent processing thread and - // use the other empty one in the meantime. - Address* start_[kStoreBuffers]; - Address* limit_[kStoreBuffers]; - - // At most one lazy_top_ pointer is set at any time. - Address* lazy_top_[kStoreBuffers]; - base::Mutex mutex_; - - // We only want to have at most one concurrent processing tas running. - bool task_running_; - - // Points to the current buffer in use. - int current_; - - // During GC, entries are directly added to the remembered set without - // going through the store buffer. This is signaled by a special - // IN_GC mode. - StoreBufferMode mode_; - - VirtualMemory virtual_memory_; - - // Callbacks are more efficient than reading out the gc state for every - // store buffer operation. - void (*insertion_callback)(StoreBuffer*, Address); -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_HEAP_STORE_BUFFER_H_ diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc index c3c6b58835c..11be7754856 100644 --- a/chromium/v8/src/heap/sweeper.cc +++ b/chromium/v8/src/heap/sweeper.cc @@ -4,7 +4,6 @@ #include "src/heap/sweeper.h" -#include "src/base/template-utils.h" #include "src/execution/vm-state-inl.h" #include "src/heap/array-buffer-tracker-inl.h" #include "src/heap/gc-tracer.h" @@ -181,7 +180,7 @@ void Sweeper::StartSweeperTasks() { ForAllSweepingSpaces([this](AllocationSpace space) { DCHECK(IsValidSweepingSpace(space)); num_sweeping_tasks_++; - auto task = base::make_unique<SweeperTask>( + auto task = std::make_unique<SweeperTask>( heap_->isolate(), this, &pending_sweeper_tasks_semaphore_, &num_sweeping_tasks_, space); DCHECK_LT(num_tasks_, kMaxSweeperTasks); @@ -321,8 +320,8 @@ int Sweeper::RawSweep( ClearFreedMemoryMode::kClearFreedMemory); } if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size); - RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end, - SlotSet::KEEP_EMPTY_BUCKETS); + RememberedSetSweeping::RemoveRange(p, free_start, free_end, + SlotSet::KEEP_EMPTY_BUCKETS); RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end, SlotSet::KEEP_EMPTY_BUCKETS); if (non_empty_typed_slots) { @@ -355,8 +354,8 @@ int Sweeper::RawSweep( ClearFreedMemoryMode::kClearFreedMemory); } if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size); - RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(), - SlotSet::KEEP_EMPTY_BUCKETS); + RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(), + SlotSet::KEEP_EMPTY_BUCKETS); RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(), SlotSet::KEEP_EMPTY_BUCKETS); if (non_empty_typed_slots) { @@ -404,6 +403,10 @@ void Sweeper::SweepSpaceFromTask(AllocationSpace identity) { Page* page = nullptr; while (!stop_sweeper_tasks_ && ((page = GetSweepingPageSafe(identity)) != nullptr)) { + // Typed slot sets are only recorded on code pages. Code pages + // are not swept concurrently to the application to ensure W^X. + DCHECK(!page->typed_slot_set<OLD_TO_NEW>() && + !page->typed_slot_set<OLD_TO_OLD>()); ParallelSweepPage(page, identity); } } @@ -462,16 +465,6 @@ int Sweeper::ParallelSweepPage( max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode, invalidated_slots_in_free_space); DCHECK(page->SweepingDone()); - - // After finishing sweeping of a page we clean up its remembered set. - TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>(); - if (typed_slot_set) { - typed_slot_set->FreeToBeFreedChunks(); - } - SlotSet* slot_set = page->slot_set<OLD_TO_NEW>(); - if (slot_set) { - slot_set->FreeToBeFreedBuckets(); - } } { @@ -488,7 +481,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() { auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate); taskrunner->PostTask( - base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this)); + std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this)); } } @@ -517,6 +510,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) { DCHECK(!category->is_linked(page->owner()->free_list())); }); #endif // DEBUG + page->MoveOldToNewRememberedSetForSweeping(); page->set_concurrent_sweeping_state(Page::kSweepingPending); heap_->paged_space(space)->IncreaseAllocatedBytes( marking_state_->live_bytes(page), page); @@ -596,8 +590,8 @@ void Sweeper::StartIterabilityTasks() { DCHECK(!iterability_task_started_); if (FLAG_concurrent_sweeping && !iterability_list_.empty()) { - auto task = base::make_unique<IterabilityTask>( - heap_->isolate(), this, &iterability_task_semaphore_); + auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this, + &iterability_task_semaphore_); iterability_task_id_ = task->id(); iterability_task_started_ = true; V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); diff --git a/chromium/v8/src/ic/accessor-assembler.cc b/chromium/v8/src/ic/accessor-assembler.cc index f9efcba05f7..99cbd3c3c89 100644 --- a/chromium/v8/src/ic/accessor-assembler.cc +++ b/chromium/v8/src/ic/accessor-assembler.cc @@ -5,6 +5,7 @@ #include "src/ic/accessor-assembler.h" #include "src/ast/ast.h" +#include "src/base/optional.h" #include "src/codegen/code-factory.h" #include "src/ic/handler-configuration.h" #include "src/ic/ic.h" @@ -16,6 +17,7 @@ #include "src/objects/heap-number.h" #include "src/objects/module.h" #include "src/objects/objects-inl.h" +#include "src/objects/property-details.h" #include "src/objects/smi.h" namespace v8 { @@ -23,10 +25,6 @@ namespace internal { using compiler::CodeAssemblerState; using compiler::Node; -template <typename T> -using TNode = compiler::TNode<T>; -template <typename T> -using SloppyTNode = compiler::SloppyTNode<T>; //////////////////// Private helpers. @@ -66,27 +64,25 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField( } TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase( - Node* slot, Node* vector, Node* receiver_map, Label* if_handler, - TVariable<MaybeObject>* var_handler, Label* if_miss) { + TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map, + Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss) { Comment("TryMonomorphicCase"); DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep()); // TODO(ishell): add helper class that hides offset computations for a series // of loads. - CSA_ASSERT(this, IsFeedbackVector(vector), vector); int32_t header_size = FeedbackVector::kFeedbackSlotsOffset - kHeapObjectTag; // Adding |header_size| with a separate IntPtrAdd rather than passing it // into ElementOffsetFromIndex() allows it to be folded into a single // [base, index, offset] indirect memory access on x64. - TNode<IntPtrT> offset = - ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS); + TNode<IntPtrT> offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS); TNode<MaybeObject> feedback = ReinterpretCast<MaybeObject>( Load(MachineType::AnyTagged(), vector, IntPtrAdd(offset, IntPtrConstant(header_size)))); // Try to quickly handle the monomorphic case without knowing for sure // if we have a weak reference in feedback. - GotoIf(IsNotWeakReferenceTo(feedback, CAST(receiver_map)), if_miss); + GotoIfNot(IsWeakReferenceTo(feedback, receiver_map), if_miss); TNode<MaybeObject> handler = UncheckedCast<MaybeObject>( Load(MachineType::AnyTagged(), vector, @@ -98,7 +94,7 @@ TNode<MaybeObject> AccessorAssembler::TryMonomorphicCase( } void AccessorAssembler::HandlePolymorphicCase( - Node* receiver_map, TNode<WeakFixedArray> feedback, Label* if_handler, + TNode<Map> receiver_map, TNode<WeakFixedArray> feedback, Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss) { Comment("HandlePolymorphicCase"); DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep()); @@ -121,8 +117,7 @@ void AccessorAssembler::HandlePolymorphicCase( TNode<MaybeObject> maybe_cached_map = LoadWeakFixedArrayElement(feedback, var_index.value()); CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map)); - GotoIf(IsNotWeakReferenceTo(maybe_cached_map, CAST(receiver_map)), - &loop_next); + GotoIfNot(IsWeakReferenceTo(maybe_cached_map, receiver_map), &loop_next); // Found, now call handler. TNode<MaybeObject> handler = @@ -157,7 +152,7 @@ void AccessorAssembler::HandleLoadICHandlerCase( BIND(&try_proto_handler); { GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler); - HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler, + HandleLoadICProtoHandler(p, CAST(handler), &var_holder, &var_smi_handler, &if_smi_handler, miss, exit_point, ic_mode, access_mode); } @@ -167,8 +162,8 @@ void AccessorAssembler::HandleLoadICHandlerCase( BIND(&if_smi_handler); { HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(), - handler, miss, exit_point, on_nonexistent, - support_elements, access_mode); + handler, miss, exit_point, ic_mode, + on_nonexistent, support_elements, access_mode); } BIND(&call_handler); @@ -237,9 +232,10 @@ void AccessorAssembler::HandleLoadAccessor( api_holder.value(), p->receiver())); } -void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word, +void AccessorAssembler::HandleLoadField(SloppyTNode<JSObject> holder, + TNode<WordT> handler_word, Variable* var_double_value, - Label* rebox_double, + Label* rebox_double, Label* miss, ExitPoint* exit_point) { Comment("field_load"); TNode<IntPtrT> index = @@ -261,8 +257,13 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word, var_double_value->Bind( LoadObjectField(holder, offset, MachineType::Float64())); } else { - TNode<HeapNumber> heap_number = CAST(LoadObjectField(holder, offset)); - var_double_value->Bind(LoadHeapNumberValue(heap_number)); + TNode<Object> heap_number = LoadObjectField(holder, offset); + // This is not an "old" Smi value from before a Smi->Double transition. + // Rather, it's possible that since the last update of this IC, the Double + // field transitioned to a Tagged field, and was then assigned a Smi. + GotoIf(TaggedIsSmi(heap_number), miss); + GotoIfNot(IsHeapNumber(CAST(heap_number)), miss); + var_double_value->Bind(LoadHeapNumberValue(CAST(heap_number))); } Goto(rebox_double); } @@ -276,6 +277,13 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word, exit_point->Return(value); BIND(&is_double); + if (!FLAG_unbox_double_fields) { + // This is not an "old" Smi value from before a Smi->Double transition. + // Rather, it's possible that since the last update of this IC, the Double + // field transitioned to a Tagged field, and was then assigned a Smi. + GotoIf(TaggedIsSmi(value), miss); + GotoIfNot(IsHeapNumber(CAST(value)), miss); + } var_double_value->Bind(LoadHeapNumberValue(CAST(value))); Goto(rebox_double); } @@ -293,10 +301,10 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType( } void AccessorAssembler::HandleLoadICSmiHandlerCase( - const LazyLoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler, - SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point, - OnNonExistent on_nonexistent, ElementSupport support_elements, - LoadAccessMode access_mode) { + const LazyLoadICParameters* p, SloppyTNode<HeapObject> holder, + SloppyTNode<Smi> smi_handler, SloppyTNode<Object> handler, Label* miss, + ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent, + ElementSupport support_elements, LoadAccessMode access_mode) { VARIABLE(var_double_value, MachineRepresentation::kFloat64); Label rebox_double(this, &var_double_value); @@ -388,10 +396,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase( Label if_oob(this, Label::kDeferred); Comment("indexed string"); + TNode<String> string_holder = CAST(holder); TNode<IntPtrT> intptr_index = TryToIntptr(p->name(), miss); - TNode<IntPtrT> length = LoadStringLengthAsWord(holder); + TNode<IntPtrT> length = LoadStringLengthAsWord(string_holder); GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob); - TNode<Int32T> code = StringCharCodeAt(holder, intptr_index); + TNode<Int32T> code = StringCharCodeAt(string_holder, intptr_index); TNode<String> result = StringFromSingleCharCode(code); Return(result); @@ -410,23 +419,25 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase( if (access_mode == LoadAccessMode::kHas) { HandleLoadICSmiHandlerHasNamedCase(p, holder, handler_kind, miss, - exit_point); + exit_point, ic_mode); } else { HandleLoadICSmiHandlerLoadNamedCase( p, holder, handler_kind, handler_word, &rebox_double, &var_double_value, - handler, miss, exit_point, on_nonexistent, support_elements); + handler, miss, exit_point, ic_mode, on_nonexistent, support_elements); } } void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( - const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind, - TNode<WordT> handler_word, Label* rebox_double, Variable* var_double_value, - SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point, - OnNonExistent on_nonexistent, ElementSupport support_elements) { + const LazyLoadICParameters* p, TNode<HeapObject> holder, + TNode<IntPtrT> handler_kind, TNode<WordT> handler_word, Label* rebox_double, + Variable* var_double_value, SloppyTNode<Object> handler, Label* miss, + ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent, + ElementSupport support_elements) { Label constant(this), field(this), normal(this, Label::kDeferred), - interceptor(this, Label::kDeferred), nonexistent(this), - accessor(this, Label::kDeferred), global(this, Label::kDeferred), - module_export(this, Label::kDeferred), proxy(this, Label::kDeferred), + slow(this, Label::kDeferred), interceptor(this, Label::kDeferred), + nonexistent(this), accessor(this, Label::kDeferred), + global(this, Label::kDeferred), module_export(this, Label::kDeferred), + proxy(this, Label::kDeferred), native_data_property(this, Label::kDeferred), api_getter(this, Label::kDeferred); @@ -459,14 +470,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global); + GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow); + GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)), &proxy); Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kModuleExport)), &module_export, &interceptor); BIND(&field); - HandleLoadField(holder, handler_word, var_double_value, rebox_double, - exit_point); + HandleLoadField(CAST(holder), handler_word, var_double_value, rebox_double, + miss, exit_point); BIND(&nonexistent); // This is a handler for a load of a non-existent value. @@ -487,7 +500,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( BIND(&normal); { Comment("load_normal"); - TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder)); + TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder))); TVARIABLE(IntPtrT, var_name_index); Label found(this, &var_name_index); NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found, @@ -529,8 +542,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( BIND(&proxy); { - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), to_name_failed(this, Label::kDeferred); @@ -586,20 +599,31 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( p->context(), p->name(), p->receiver(), holder, p->slot(), p->vector()); } + BIND(&slow); + { + Comment("load_slow"); + if (ic_mode == ICMode::kGlobalIC) { + exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Slow, p->context(), + p->name(), p->slot(), p->vector()); + + } else { + exit_point->ReturnCallRuntime(Runtime::kGetProperty, p->context(), + p->receiver(), p->name()); + } + } BIND(&module_export); { Comment("module export"); TNode<UintPtrT> index = DecodeWord<LoadHandler::ExportsIndexBits>(handler_word); - Node* module = - LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset, - MachineType::TaggedPointer()); - TNode<ObjectHashTable> exports = CAST(LoadObjectField( - module, Module::kExportsOffset, MachineType::TaggedPointer())); + TNode<Module> module = + CAST(LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset)); + TNode<ObjectHashTable> exports = + LoadObjectField<ObjectHashTable>(module, Module::kExportsOffset); TNode<Cell> cell = CAST(LoadFixedArrayElement(exports, index)); // The handler is only installed for exports that exist. - Node* value = LoadCellValue(cell); + TNode<Object> value = LoadCellValue(cell); Label is_the_hole(this, Label::kDeferred); GotoIf(IsTheHole(value), &is_the_hole); exit_point->Return(value); @@ -617,10 +641,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase( } void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase( - const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind, - Label* miss, ExitPoint* exit_point) { + const LazyLoadICParameters* p, TNode<HeapObject> holder, + TNode<IntPtrT> handler_kind, Label* miss, ExitPoint* exit_point, + ICMode ic_mode) { Label return_true(this), return_false(this), return_lookup(this), - normal(this), global(this); + normal(this), global(this), slow(this); GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &return_true); @@ -649,6 +674,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase( IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)), &return_true); + GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow); + Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global, &return_lookup); @@ -676,7 +703,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase( BIND(&normal); { Comment("has_normal"); - TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder)); + TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(holder))); TVARIABLE(IntPtrT, var_name_index); Label found(this); NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found, @@ -695,6 +722,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase( exit_point->Return(TrueConstant()); } + + BIND(&slow); + { + Comment("load_slow"); + if (ic_mode == ICMode::kGlobalIC) { + exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Slow, p->context(), + p->name(), p->slot(), p->vector()); + } else { + exit_point->ReturnCallRuntime(Runtime::kHasProperty, p->context(), + p->receiver(), p->name()); + } + } } // Performs actions common to both load and store handlers: @@ -715,8 +754,9 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase( // TODO(ishell): Remove templatezation once we move common bits from // Load/StoreHandler to the base class. template <typename ICHandler, typename ICParameters> -Node* AccessorAssembler::HandleProtoHandler( - const ICParameters* p, Node* handler, const OnCodeHandler& on_code_handler, +TNode<Object> AccessorAssembler::HandleProtoHandler( + const ICParameters* p, TNode<DataHandler> handler, + const OnCodeHandler& on_code_handler, const OnFoundOnReceiver& on_found_on_receiver, Label* miss, ICMode ic_mode) { // @@ -738,8 +778,7 @@ Node* AccessorAssembler::HandleProtoHandler( Label if_smi_handler(this); GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler); - CSA_ASSERT(this, IsCodeMap(LoadMap(CAST(smi_or_code_handler)))); - on_code_handler(smi_or_code_handler); + on_code_handler(CAST(smi_or_code_handler)); BIND(&if_smi_handler); } @@ -771,8 +810,8 @@ Node* AccessorAssembler::HandleProtoHandler( CSA_ASSERT(this, IsWeakOrCleared(data2)); TNode<Context> expected_native_context = CAST(GetHeapObjectAssumeWeak(data2, miss)); - EmitAccessCheck(expected_native_context, p->context(), p->receiver(), - &done, miss); + EmitAccessCheck(expected_native_context, p->context(), + CAST(p->receiver()), &done, miss); } // Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC @@ -807,18 +846,19 @@ Node* AccessorAssembler::HandleProtoHandler( } void AccessorAssembler::HandleLoadICProtoHandler( - const LazyLoadICParameters* p, Node* handler, Variable* var_holder, - Variable* var_smi_handler, Label* if_smi_handler, Label* miss, - ExitPoint* exit_point, ICMode ic_mode, LoadAccessMode access_mode) { + const LazyLoadICParameters* p, TNode<DataHandler> handler, + Variable* var_holder, Variable* var_smi_handler, Label* if_smi_handler, + Label* miss, ExitPoint* exit_point, ICMode ic_mode, + LoadAccessMode access_mode) { DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep()); DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep()); - Node* smi_handler = HandleProtoHandler<LoadHandler>( + TNode<Smi> smi_handler = CAST(HandleProtoHandler<LoadHandler>( p, handler, // Code sub-handlers are not expected in LoadICs, so no |on_code_handler|. nullptr, // on_found_on_receiver - [=](Node* properties, Node* name_index) { + [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) { if (access_mode == LoadAccessMode::kHas) { exit_point->Return(TrueConstant()); } else { @@ -832,7 +872,7 @@ void AccessorAssembler::HandleLoadICProtoHandler( exit_point->Return(value); } }, - miss, ic_mode); + miss, ic_mode)); TNode<MaybeObject> maybe_holder_or_constant = LoadHandlerDataField(handler, 1); @@ -840,7 +880,7 @@ void AccessorAssembler::HandleLoadICProtoHandler( Label load_from_cached_holder(this), is_smi(this), done(this); GotoIf(TaggedIsSmi(maybe_holder_or_constant), &is_smi); - Branch(IsStrongReferenceTo(maybe_holder_or_constant, NullConstant()), &done, + Branch(TaggedEqual(maybe_holder_or_constant, NullConstant()), &done, &load_from_cached_holder); BIND(&is_smi); @@ -878,14 +918,15 @@ void AccessorAssembler::HandleLoadICProtoHandler( } void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context, - TNode<Context> context, Node* receiver, + TNode<Context> context, + TNode<Object> receiver, Label* can_access, Label* miss) { CSA_ASSERT(this, IsNativeContext(expected_native_context)); - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); GotoIf(TaggedEqual(expected_native_context, native_context), can_access); // If the receiver is not a JSGlobalProxy then we miss. - GotoIfNot(IsJSGlobalProxy(receiver), miss); + GotoIfNot(IsJSGlobalProxy(CAST(receiver)), miss); // For JSGlobalProxy receiver try to compare security tokens of current // and expected native contexts. TNode<Object> expected_token = LoadContextElement( @@ -895,8 +936,8 @@ void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context, Branch(TaggedEqual(expected_token, current_token), can_access, miss); } -void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable, - Label* readonly) { +void AccessorAssembler::JumpIfDataProperty(TNode<Uint32T> details, + Label* writable, Label* readonly) { if (readonly) { // Accessor properties never have the READ_ONLY attribute set. GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), @@ -911,10 +952,11 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable, } void AccessorAssembler::HandleStoreICNativeDataProperty( - const StoreICParameters* p, Node* holder, Node* handler_word) { + const StoreICParameters* p, SloppyTNode<HeapObject> holder, + TNode<Word32T> handler_word) { Comment("native_data_property_store"); TNode<IntPtrT> descriptor = - Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word)); + Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word)); TNode<AccessorInfo> accessor_info = CAST(LoadDescriptorValue(LoadMap(holder), descriptor)); @@ -936,23 +978,30 @@ void AccessorAssembler::HandleStoreICHandlerCase( BIND(&if_smi_handler); { Node* holder = p->receiver(); - TNode<IntPtrT> handler_word = SmiUntag(CAST(handler)); + TNode<Int32T> handler_word = SmiToInt32(CAST(handler)); - Label if_fast_smi(this), if_proxy(this); + Label if_fast_smi(this), if_proxy(this), if_interceptor(this), + if_slow(this); STATIC_ASSERT(StoreHandler::kGlobalProxy + 1 == StoreHandler::kNormal); - STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kProxy); + STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kInterceptor); + STATIC_ASSERT(StoreHandler::kInterceptor + 1 == StoreHandler::kSlow); + STATIC_ASSERT(StoreHandler::kSlow + 1 == StoreHandler::kProxy); STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber); - TNode<UintPtrT> handler_kind = - DecodeWord<StoreHandler::KindBits>(handler_word); - GotoIf(IntPtrLessThan(handler_kind, - IntPtrConstant(StoreHandler::kGlobalProxy)), - &if_fast_smi); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)), + TNode<Uint32T> handler_kind = + DecodeWord32<StoreHandler::KindBits>(handler_word); + GotoIf( + Int32LessThan(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)), + &if_fast_smi); + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)), &if_proxy); + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)), + &if_interceptor); + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)), + &if_slow); CSA_ASSERT(this, - WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal))); + Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal))); TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder)); TVARIABLE(IntPtrT, var_name_index); @@ -976,14 +1025,14 @@ void AccessorAssembler::HandleStoreICHandlerCase( BIND(&if_fast_smi); { - TNode<UintPtrT> handler_kind = - DecodeWord<StoreHandler::KindBits>(handler_word); + TNode<Uint32T> handler_kind = + DecodeWord32<StoreHandler::KindBits>(handler_word); Label data(this), accessor(this), native_data_property(this); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)), + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)), &accessor); - Branch(WordEqual(handler_kind, - IntPtrConstant(StoreHandler::kNativeDataProperty)), + Branch(Word32Equal(handler_kind, + Int32Constant(StoreHandler::kNativeDataProperty)), &native_data_property, &data); BIND(&accessor); @@ -999,6 +1048,29 @@ void AccessorAssembler::HandleStoreICHandlerCase( BIND(&if_proxy); HandleStoreToProxy(p, holder, miss, support_elements); + + BIND(&if_interceptor); + { + Comment("store_interceptor"); + TailCallRuntime(Runtime::kStorePropertyWithInterceptor, p->context(), + p->value(), p->slot(), p->vector(), p->receiver(), + p->name()); + } + + BIND(&if_slow); + { + Comment("store_slow"); + // The slow case calls into the runtime to complete the store without + // causing an IC miss that would otherwise cause a transition to the + // generic stub. + if (ic_mode == ICMode::kGlobalIC) { + TailCallRuntime(Runtime::kStoreGlobalIC_Slow, p->context(), p->value(), + p->slot(), p->vector(), p->receiver(), p->name()); + } else { + TailCallRuntime(Runtime::kKeyedStoreIC_Slow, p->context(), p->value(), + p->receiver(), p->name()); + } + } } BIND(&if_nonsmi_handler); @@ -1111,7 +1183,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase( } void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors, - Node* name_index, + TNode<IntPtrT> name_index, TNode<Word32T> representation, Node* value, Label* bailout) { Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this); @@ -1143,20 +1215,20 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors, BIND(&r_heapobject); { GotoIf(TaggedIsSmi(value), bailout); - TNode<MaybeObject> field_type = LoadFieldTypeByKeyIndex( - descriptors, UncheckedCast<IntPtrT>(name_index)); + TNode<MaybeObject> field_type = + LoadFieldTypeByKeyIndex(descriptors, name_index); const Address kNoneType = FieldType::None().ptr(); const Address kAnyType = FieldType::Any().ptr(); DCHECK_NE(static_cast<uint32_t>(kNoneType), kClearedWeakHeapObjectLower32); DCHECK_NE(static_cast<uint32_t>(kAnyType), kClearedWeakHeapObjectLower32); // FieldType::None can't hold any value. - GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type), - IntPtrConstant(kNoneType)), - bailout); + GotoIf( + TaggedEqual(field_type, BitcastWordToTagged(IntPtrConstant(kNoneType))), + bailout); // FieldType::Any can hold any value. - GotoIf(WordEqual(BitcastMaybeObjectToWord(field_type), - IntPtrConstant(kAnyType)), - &all_fine); + GotoIf( + TaggedEqual(field_type, BitcastWordToTagged(IntPtrConstant(kAnyType))), + &all_fine); // Cleared weak references count as FieldType::None, which can't hold any // value. TNode<Map> field_type_map = @@ -1168,15 +1240,16 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors, BIND(&all_fine); } -TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(Node* details) { +TNode<BoolT> AccessorAssembler::IsPropertyDetailsConst(TNode<Uint32T> details) { return Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details), Int32Constant(static_cast<int32_t>(VariableMode::kConst))); } void AccessorAssembler::OverwriteExistingFastDataProperty( - Node* object, Node* object_map, Node* descriptors, - Node* descriptor_name_index, Node* details, TNode<Object> value, - Label* slow, bool do_transitioning_store) { + SloppyTNode<HeapObject> object, TNode<Map> object_map, + TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor_name_index, + TNode<Uint32T> details, TNode<Object> value, Label* slow, + bool do_transitioning_store) { Label done(this), if_field(this), if_descriptor(this); CSA_ASSERT(this, @@ -1192,8 +1265,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty( TNode<Uint32T> representation = DecodeWord32<PropertyDetails::RepresentationField>(details); - CheckFieldType(CAST(descriptors), descriptor_name_index, representation, - value, slow); + CheckFieldType(descriptors, descriptor_name_index, representation, value, + slow); TNode<UintPtrT> field_index = DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details); @@ -1224,7 +1297,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty( Label if_mutable(this); GotoIfNot(IsPropertyDetailsConst(details), &if_mutable); TNode<Float64T> current_value = - LoadObjectField<Float64T>(CAST(object), field_offset); + LoadObjectField<Float64T>(object, field_offset); BranchIfSameNumberValue(current_value, double_value, &done, slow); BIND(&if_mutable); } @@ -1257,8 +1330,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty( } else { Label if_mutable(this); GotoIfNot(IsPropertyDetailsConst(details), &if_mutable); - TNode<Object> current_value = - LoadObjectField(CAST(object), field_offset); + TNode<Object> current_value = LoadObjectField(object, field_offset); BranchIfSameValue(current_value, value, &done, slow, SameValueMode::kNumbersOnly); BIND(&if_mutable); @@ -1302,7 +1374,8 @@ void AccessorAssembler::OverwriteExistingFastDataProperty( } else { Label tagged_rep(this), double_rep(this); - TNode<PropertyArray> properties = CAST(LoadFastProperties(object)); + TNode<PropertyArray> properties = + CAST(LoadFastProperties(CAST(object))); Branch( Word32Equal(representation, Int32Constant(Representation::kDouble)), &double_rep, &tagged_rep); @@ -1342,7 +1415,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty( { // Check that constant matches value. TNode<Object> constant = LoadValueByKeyIndex( - CAST(descriptors), UncheckedCast<IntPtrT>(descriptor_name_index)); + descriptors, UncheckedCast<IntPtrT>(descriptor_name_index)); GotoIf(TaggedNotEqual(value, constant), slow); if (do_transitioning_store) { @@ -1370,10 +1443,11 @@ void AccessorAssembler::CheckPrototypeValidityCell( } void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p, - Node* holder, Node* handler_word) { + SloppyTNode<HeapObject> holder, + TNode<Word32T> handler_word) { Comment("accessor_store"); TNode<IntPtrT> descriptor = - Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word)); + Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word)); TNode<HeapObject> accessor_pair = CAST(LoadDescriptorValue(LoadMap(holder), descriptor)); CSA_ASSERT(this, IsAccessorPair(accessor_pair)); @@ -1393,7 +1467,7 @@ void AccessorAssembler::HandleStoreICProtoHandler( OnCodeHandler on_code_handler; if (support_elements == kSupportElements) { // Code sub-handlers are expected only in KeyedStoreICs. - on_code_handler = [=](Node* code_handler) { + on_code_handler = [=](TNode<Code> code_handler) { // This is either element store or transitioning element store. Label if_element_store(this), if_transitioning_element_store(this); Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store, @@ -1421,10 +1495,10 @@ void AccessorAssembler::HandleStoreICProtoHandler( }; } - Node* smi_handler = HandleProtoHandler<StoreHandler>( + TNode<Object> smi_handler = HandleProtoHandler<StoreHandler>( p, handler, on_code_handler, // on_found_on_receiver - [=](Node* properties, Node* name_index) { + [=](TNode<NameDictionary> properties, TNode<IntPtrT> name_index) { TNode<Uint32T> details = LoadDetailsByKeyIndex<NameDictionary>(properties, name_index); // Check that the property is a writable data property (no accessor). @@ -1434,49 +1508,80 @@ void AccessorAssembler::HandleStoreICProtoHandler( STATIC_ASSERT(kData == 0); GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss); - StoreValueByKeyIndex<NameDictionary>( - CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value()); + StoreValueByKeyIndex<NameDictionary>(properties, name_index, + p->value()); Return(p->value()); }, miss, ic_mode); { Label if_add_normal(this), if_store_global_proxy(this), if_api_setter(this), - if_accessor(this), if_native_data_property(this); + if_accessor(this), if_native_data_property(this), if_slow(this), + if_interceptor(this); CSA_ASSERT(this, TaggedIsSmi(smi_handler)); - TNode<IntPtrT> handler_word = SmiUntag(smi_handler); + TNode<Int32T> handler_word = SmiToInt32(CAST(smi_handler)); - TNode<UintPtrT> handler_kind = - DecodeWord<StoreHandler::KindBits>(handler_word); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)), + TNode<Uint32T> handler_kind = + DecodeWord32<StoreHandler::KindBits>(handler_word); + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)), &if_add_normal); TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1); CSA_ASSERT(this, IsWeakOrCleared(maybe_holder)); TNode<HeapObject> holder = GetHeapObjectAssumeWeak(maybe_holder, miss); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)), + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)), &if_store_global_proxy); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kAccessor)), + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)), &if_accessor); - GotoIf(WordEqual(handler_kind, - IntPtrConstant(StoreHandler::kNativeDataProperty)), + GotoIf(Word32Equal(handler_kind, + Int32Constant(StoreHandler::kNativeDataProperty)), &if_native_data_property); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)), + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)), &if_api_setter); - GotoIf(WordEqual(handler_kind, - IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)), - &if_api_setter); + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)), + &if_slow); + + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)), + &if_interceptor); + + GotoIf( + Word32Equal(handler_kind, + Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)), + &if_api_setter); CSA_ASSERT(this, - WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy))); + Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy))); HandleStoreToProxy(p, holder, miss, support_elements); + BIND(&if_slow); + { + Comment("store_slow"); + // The slow case calls into the runtime to complete the store without + // causing an IC miss that would otherwise cause a transition to the + // generic stub. + if (ic_mode == ICMode::kGlobalIC) { + TailCallRuntime(Runtime::kStoreGlobalIC_Slow, p->context(), p->value(), + p->slot(), p->vector(), p->receiver(), p->name()); + } else { + TailCallRuntime(Runtime::kKeyedStoreIC_Slow, p->context(), p->value(), + p->receiver(), p->name()); + } + } + + BIND(&if_interceptor); + { + Comment("store_interceptor"); + TailCallRuntime(Runtime::kStorePropertyWithInterceptor, p->context(), + p->value(), p->slot(), p->vector(), p->receiver(), + p->name()); + } + BIND(&if_add_normal); { // This is a case of "transitioning store" to a dictionary mode object @@ -1512,7 +1617,7 @@ void AccessorAssembler::HandleStoreICProtoHandler( // Context is stored either in data2 or data3 field depending on whether // the access check is enabled for this handler or not. TNode<MaybeObject> maybe_context = Select<MaybeObject>( - IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word), + IsSetWord32<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word), [=] { return LoadHandlerDataField(handler, 3); }, [=] { return LoadHandlerDataField(handler, 2); }); @@ -1530,13 +1635,13 @@ void AccessorAssembler::HandleStoreICProtoHandler( VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver()); Label store(this); - GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)), + GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)), &store); - CSA_ASSERT( - this, - WordEqual(handler_kind, - IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype))); + CSA_ASSERT(this, + Word32Equal( + handler_kind, + Int32Constant(StoreHandler::kApiSetterHolderIsPrototype))); api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver()))); Goto(&store); @@ -1559,8 +1664,8 @@ void AccessorAssembler::HandleStoreICProtoHandler( void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p, Node* proxy, Label* miss, ElementSupport support_elements) { - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), to_name_failed(this, Label::kDeferred); @@ -1591,128 +1696,200 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p, } } -void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word, - Node* holder, Node* value, - Label* miss) { +void AccessorAssembler::HandleStoreICSmiHandlerCase( + SloppyTNode<Word32T> handler_word, SloppyTNode<JSObject> holder, + SloppyTNode<Object> value, Label* miss) { Comment("field store"); #ifdef DEBUG - TNode<UintPtrT> handler_kind = - DecodeWord<StoreHandler::KindBits>(handler_word); + TNode<Uint32T> handler_kind = + DecodeWord32<StoreHandler::KindBits>(handler_word); CSA_ASSERT( this, Word32Or( - WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)), - WordEqual(handler_kind, IntPtrConstant(StoreHandler::kConstField)))); + Word32Equal(handler_kind, Int32Constant(StoreHandler::kField)), + Word32Equal(handler_kind, Int32Constant(StoreHandler::kConstField)))); #endif - TNode<UintPtrT> field_representation = - DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word); + TNode<Uint32T> field_representation = + DecodeWord32<StoreHandler::RepresentationBits>(handler_word); Label if_smi_field(this), if_double_field(this), if_heap_object_field(this), if_tagged_field(this); - GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)), - &if_tagged_field); - GotoIf(WordEqual(field_representation, - IntPtrConstant(StoreHandler::kHeapObject)), - &if_heap_object_field); - GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)), - &if_double_field); - CSA_ASSERT(this, WordEqual(field_representation, - IntPtrConstant(StoreHandler::kSmi))); - Goto(&if_smi_field); + int32_t case_values[] = {Representation::kTagged, Representation::kHeapObject, + Representation::kSmi}; + Label* case_labels[] = {&if_tagged_field, &if_heap_object_field, + &if_smi_field}; + + Switch(field_representation, &if_double_field, case_values, case_labels, 3); BIND(&if_tagged_field); { Comment("store tagged field"); - HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(), - value, miss); - } - - BIND(&if_double_field); - { - Comment("store double field"); - HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(), - value, miss); + HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt, + Representation::Tagged(), miss); } BIND(&if_heap_object_field); { + Comment("heap object field checks"); + CheckHeapObjectTypeMatchesDescriptor(handler_word, holder, value, miss); + Comment("store heap object field"); - HandleStoreFieldAndReturn(handler_word, holder, - Representation::HeapObject(), value, miss); + HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt, + Representation::HeapObject(), miss); } BIND(&if_smi_field); { + Comment("smi field checks"); + GotoIfNot(TaggedIsSmi(value), miss); + Comment("store smi field"); - HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(), - value, miss); + HandleStoreFieldAndReturn(handler_word, holder, value, base::nullopt, + Representation::Smi(), miss); + } + + BIND(&if_double_field); + { + CSA_ASSERT(this, Word32Equal(field_representation, + Int32Constant(Representation::kDouble))); + Comment("double field checks"); + TNode<Float64T> double_value = TryTaggedToFloat64(value, miss); + CheckDescriptorConsidersNumbersMutable(handler_word, holder, miss); + + Comment("store double field"); + HandleStoreFieldAndReturn(handler_word, holder, value, double_value, + Representation::Double(), miss); } } -void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word, - Node* holder, - Representation representation, - Node* value, Label* miss) { - Node* prepared_value = - PrepareValueForStore(handler_word, holder, representation, value, miss); +void AccessorAssembler::CheckHeapObjectTypeMatchesDescriptor( + TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value, + Label* bailout) { + GotoIf(TaggedIsSmi(value), bailout); - Label if_inobject(this), if_out_of_object(this); - Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject, - &if_out_of_object); + Label done(this); + // Skip field type check in favor of constant value check when storing + // to constant field. + GotoIf(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word), + Int32Constant(StoreHandler::kConstField)), + &done); + TNode<IntPtrT> descriptor = + Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word)); + TNode<MaybeObject> maybe_field_type = + LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor); - BIND(&if_inobject); + GotoIf(TaggedIsSmi(maybe_field_type), &done); + // Check that value type matches the field type. { - StoreNamedField(handler_word, holder, true, representation, prepared_value, - miss); - Return(value); + TNode<HeapObject> field_type = + GetHeapObjectAssumeWeak(maybe_field_type, bailout); + Branch(TaggedEqual(LoadMap(CAST(value)), field_type), &done, bailout); } + BIND(&done); +} - BIND(&if_out_of_object); - { - StoreNamedField(handler_word, holder, false, representation, prepared_value, - miss); - Return(value); - } +void AccessorAssembler::CheckDescriptorConsidersNumbersMutable( + TNode<Word32T> handler_word, TNode<JSObject> holder, Label* bailout) { + // We have to check that the representation is Double. Checking the value + // (either in the field or being assigned) is not enough, as we could have + // transitioned to Tagged but still be holding a HeapNumber, which would no + // longer be allowed to be mutable. + + // TODO(leszeks): We could skip the representation check in favor of a + // constant value check in HandleStoreFieldAndReturn here, but then + // HandleStoreFieldAndReturn would need an IsHeapNumber check in case both the + // representation changed and the value is no longer a HeapNumber. + TNode<IntPtrT> descriptor_entry = + Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word)); + TNode<DescriptorArray> descriptors = LoadMapDescriptors(LoadMap(holder)); + TNode<Uint32T> details = + LoadDetailsByDescriptorEntry(descriptors, descriptor_entry); + + GotoIfNot(IsEqualInWord32<PropertyDetails::RepresentationField>( + details, Representation::kDouble), + bailout); } -Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder, - Representation representation, - Node* value, Label* bailout) { - if (representation.IsDouble()) { - value = TryTaggedToFloat64(value, bailout); +void AccessorAssembler::HandleStoreFieldAndReturn( + TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value, + base::Optional<TNode<Float64T>> double_value, Representation representation, + Label* miss) { + Label done(this); - } else if (representation.IsHeapObject()) { - GotoIf(TaggedIsSmi(value), bailout); + bool store_value_as_double = representation.IsDouble(); - Label done(this); - // Skip field type check in favor of constant value check when storing - // to constant field. - GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word), - IntPtrConstant(StoreHandler::kConstField)), - &done); - TNode<IntPtrT> descriptor = - Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word)); - TNode<MaybeObject> maybe_field_type = - LoadDescriptorValueOrFieldType(LoadMap(holder), descriptor); + TNode<BoolT> is_inobject = + IsSetWord32<StoreHandler::IsInobjectBits>(handler_word); + TNode<HeapObject> property_storage = Select<HeapObject>( + is_inobject, [&]() { return holder; }, + [&]() { return LoadFastProperties(holder); }); - GotoIf(TaggedIsSmi(maybe_field_type), &done); - // Check that value type matches the field type. - { - TNode<HeapObject> field_type = - GetHeapObjectAssumeWeak(maybe_field_type, bailout); - Branch(TaggedEqual(LoadMap(CAST(value)), field_type), &done, bailout); + TNode<UintPtrT> index = + DecodeWordFromWord32<StoreHandler::FieldIndexBits>(handler_word); + TNode<IntPtrT> offset = Signed(TimesTaggedSize(index)); + + // For Double fields, we want to mutate the current double-value + // field rather than changing it to point at a new HeapNumber. + if (store_value_as_double) { + TVARIABLE(HeapObject, actual_property_storage, property_storage); + TVARIABLE(IntPtrT, actual_offset, offset); + + Label property_and_offset_ready(this); + + // If we are unboxing double fields, and this is an in-object field, the + // property_storage and offset are already pointing to the double-valued + // field. + if (FLAG_unbox_double_fields) { + GotoIf(is_inobject, &property_and_offset_ready); } - BIND(&done); - } else if (representation.IsSmi()) { - GotoIfNot(TaggedIsSmi(value), bailout); + // Store the double value directly into the mutable HeapNumber. + TNode<Object> field = LoadObjectField(property_storage, offset); + CSA_ASSERT(this, IsHeapNumber(CAST(field))); + actual_property_storage = CAST(field); + actual_offset = IntPtrConstant(HeapNumber::kValueOffset); + Goto(&property_and_offset_ready); + + BIND(&property_and_offset_ready); + property_storage = actual_property_storage.value(); + offset = actual_offset.value(); + } + + // Do constant value check if necessary. + Label do_store(this); + GotoIfNot(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word), + Int32Constant(StoreHandler::kConstField)), + &do_store); + { + if (store_value_as_double) { + Label done(this); + TNode<Float64T> current_value = + LoadObjectField<Float64T>(property_storage, offset); + BranchIfSameNumberValue(current_value, *double_value, &done, miss); + BIND(&done); + Return(value); + } else { + TNode<Object> current_value = LoadObjectField(property_storage, offset); + GotoIfNot(TaggedEqual(current_value, value), miss); + Return(value); + } + } + BIND(&do_store); + // Do the store. + if (store_value_as_double) { + StoreObjectFieldNoWriteBarrier(property_storage, offset, *double_value, + MachineRepresentation::kFloat64); + } else if (representation.IsSmi()) { + TNode<Smi> value_smi = CAST(value); + StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi); } else { - DCHECK(representation.IsTagged()); + StoreObjectField(property_storage, offset, value); } - return value; + + Return(value); } Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object, @@ -1737,7 +1914,7 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object, BIND(&if_smi_hash); { TNode<Int32T> hash = SmiToInt32(CAST(properties)); - TNode<Word32T> encoded_hash = + TNode<Int32T> encoded_hash = Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift)); var_encoded_hash.Bind(encoded_hash); var_length.Bind(IntPtrOrSmiConstant(0, mode)); @@ -1813,59 +1990,6 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object, } } -void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object, - bool is_inobject, - Representation representation, - Node* value, Label* bailout) { - bool store_value_as_double = representation.IsDouble(); - Node* property_storage = object; - if (!is_inobject) { - property_storage = LoadFastProperties(object); - } - - TNode<UintPtrT> index = - DecodeWord<StoreHandler::FieldIndexBits>(handler_word); - TNode<IntPtrT> offset = Signed(TimesTaggedSize(index)); - if (representation.IsDouble()) { - if (!FLAG_unbox_double_fields || !is_inobject) { - // Load the mutable heap number. - property_storage = LoadObjectField(property_storage, offset); - // Store the double value into it. - offset = IntPtrConstant(HeapNumber::kValueOffset); - } - } - - // Do constant value check if necessary. - Label const_checked(this); - GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word), - IntPtrConstant(StoreHandler::kConstField)), - &const_checked); - { - if (store_value_as_double) { - TNode<Float64T> current_value = - LoadObjectField<Float64T>(CAST(property_storage), offset); - BranchIfSameNumberValue(current_value, UncheckedCast<Float64T>(value), - &const_checked, bailout); - } else { - TNode<Object> current_value = LoadObjectField(property_storage, offset); - Branch(TaggedEqual(current_value, UncheckedCast<Object>(value)), - &const_checked, bailout); - } - } - - BIND(&const_checked); - // Do the store. - if (store_value_as_double) { - StoreObjectFieldNoWriteBarrier(property_storage, offset, value, - MachineRepresentation::kFloat64); - } else if (representation.IsSmi()) { - TNode<Smi> value_smi = CAST(value); - StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi); - } else { - StoreObjectField(property_storage, offset, value); - } -} - void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object, Node* elements, Node* intptr_index, @@ -2012,8 +2136,7 @@ void AccessorAssembler::EmitElementLoad( if (access_mode == LoadAccessMode::kHas) { exit_point->Return(TrueConstant()); } else { - TNode<RawPtrT> backing_store = - LoadJSTypedArrayBackingStore(CAST(object)); + TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object)); Label uint8_elements(this), int8_elements(this), uint16_elements(this), int16_elements(this), uint32_elements(this), int32_elements(this), @@ -2039,50 +2162,48 @@ void AccessorAssembler::EmitElementLoad( BIND(&uint8_elements); { Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too. - Node* element = - Load(MachineType::Uint8(), backing_store, intptr_index); + Node* element = Load(MachineType::Uint8(), data_ptr, intptr_index); exit_point->Return(SmiFromInt32(element)); } BIND(&int8_elements); { Comment("INT8_ELEMENTS"); - Node* element = - Load(MachineType::Int8(), backing_store, intptr_index); + Node* element = Load(MachineType::Int8(), data_ptr, intptr_index); exit_point->Return(SmiFromInt32(element)); } BIND(&uint16_elements); { Comment("UINT16_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1)); - Node* element = Load(MachineType::Uint16(), backing_store, index); + Node* element = Load(MachineType::Uint16(), data_ptr, index); exit_point->Return(SmiFromInt32(element)); } BIND(&int16_elements); { Comment("INT16_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1)); - Node* element = Load(MachineType::Int16(), backing_store, index); + Node* element = Load(MachineType::Int16(), data_ptr, index); exit_point->Return(SmiFromInt32(element)); } BIND(&uint32_elements); { Comment("UINT32_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2)); - Node* element = Load(MachineType::Uint32(), backing_store, index); + Node* element = Load(MachineType::Uint32(), data_ptr, index); exit_point->Return(ChangeUint32ToTagged(element)); } BIND(&int32_elements); { Comment("INT32_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2)); - Node* element = Load(MachineType::Int32(), backing_store, index); + Node* element = Load(MachineType::Int32(), data_ptr, index); exit_point->Return(ChangeInt32ToTagged(element)); } BIND(&float32_elements); { Comment("FLOAT32_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2)); - Node* element = Load(MachineType::Float32(), backing_store, index); + Node* element = Load(MachineType::Float32(), data_ptr, index); var_double_value->Bind(ChangeFloat32ToFloat64(element)); Goto(rebox_double); } @@ -2090,7 +2211,7 @@ void AccessorAssembler::EmitElementLoad( { Comment("FLOAT64_ELEMENTS"); TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(3)); - Node* element = Load(MachineType::Float64(), backing_store, index); + Node* element = Load(MachineType::Float64(), data_ptr, index); var_double_value->Bind(element); Goto(rebox_double); } @@ -2098,15 +2219,13 @@ void AccessorAssembler::EmitElementLoad( { Comment("BIGINT64_ELEMENTS"); exit_point->Return(LoadFixedTypedArrayElementAsTagged( - backing_store, intptr_index, BIGINT64_ELEMENTS, - INTPTR_PARAMETERS)); + data_ptr, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS)); } BIND(&biguint64_elements); { Comment("BIGUINT64_ELEMENTS"); exit_point->Return(LoadFixedTypedArrayElementAsTagged( - backing_store, intptr_index, BIGUINT64_ELEMENTS, - INTPTR_PARAMETERS)); + data_ptr, intptr_index, BIGUINT64_ELEMENTS, INTPTR_PARAMETERS)); } } } @@ -2152,7 +2271,8 @@ void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map, BIND(&cont); } -void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map, +void AccessorAssembler::GenericElementLoad(Node* receiver, + TNode<Map> receiver_map, SloppyTNode<Int32T> instance_type, Node* index, Label* slow) { Comment("integer index"); @@ -2213,11 +2333,9 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map, } } -void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map, - SloppyTNode<Int32T> instance_type, - const LoadICParameters* p, - Label* slow, - UseStubCache use_stub_cache) { +void AccessorAssembler::GenericPropertyLoad( + Node* receiver, TNode<Map> receiver_map, SloppyTNode<Int32T> instance_type, + const LoadICParameters* p, Label* slow, UseStubCache use_stub_cache) { ExitPoint direct_exit(this); Comment("key is unique name"); @@ -2317,13 +2435,13 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map, BIND(&lookup_prototype_chain); { - VARIABLE(var_holder_map, MachineRepresentation::kTagged); + TVARIABLE(Map, var_holder_map); VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32); Label return_undefined(this), is_private_symbol(this); Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type}; Label loop(this, arraysize(merged_variables), merged_variables); - var_holder_map.Bind(receiver_map); + var_holder_map = receiver_map; var_holder_instance_type.Bind(instance_type); GotoIf(IsPrivateSymbol(name), &is_private_symbol); @@ -2338,7 +2456,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map, GotoIf(TaggedEqual(proto, NullConstant()), &return_undefined); TNode<Map> proto_map = LoadMap(proto); TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map); - var_holder_map.Bind(proto_map); + var_holder_map = proto_map; var_holder_instance_type.Bind(proto_instance_type); Label next_proto(this), return_value(this, &var_value), goto_slow(this); TryGetOwnProperty(p->context(), receiver, proto, proto_map, @@ -2394,8 +2512,6 @@ enum AccessorAssembler::StubCacheTable : int { }; Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) { - // See v8::internal::StubCache::PrimaryOffset(). - STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift); // Compute the hash of the name (use entire hash field). TNode<Uint32T> hash_field = LoadNameHashField(name); CSA_ASSERT(this, @@ -2422,7 +2538,7 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) { // Use the seed from the primary cache in the secondary cache. TNode<Int32T> name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name)); - TNode<Word32T> hash = Int32Sub(TruncateIntPtrToInt32(seed), name32); + TNode<Int32T> hash = Int32Sub(TruncateIntPtrToInt32(seed), name32); hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic)); int32_t mask = (StubCache::kSecondaryTableSize - 1) << StubCache::kCacheIndexShift; @@ -2436,7 +2552,8 @@ void AccessorAssembler::TryProbeStubCacheTable( StubCache::Table table = static_cast<StubCache::Table>(table_id); // The {table_offset} holds the entry offset times four (due to masking // and shifting optimizations). - const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift; + const int kMultiplier = + sizeof(StubCache::Entry) >> StubCache::kCacheIndexShift; entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier)); TNode<ExternalReference> key_base = ExternalConstant( @@ -2527,7 +2644,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p, Label try_polymorphic(this), if_handler(this, &var_handler); TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), recv_map, &if_handler, + TryMonomorphicCase(p->slot(), CAST(p->vector()), recv_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -2589,8 +2706,8 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) { // Check monomorphic case. TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler, - &var_handler, &try_polymorphic); + TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, + &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); { LazyLoadICParameters lazy_p(p); @@ -2673,21 +2790,25 @@ void AccessorAssembler::LoadIC_NoFeedback(const LoadICParameters* p) { } } -void AccessorAssembler::LoadGlobalIC(Node* vector, Node* slot, +void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector, + const LazyNode<Smi>& lazy_smi_slot, + const LazyNode<UintPtrT>& lazy_slot, const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name, TypeofMode typeof_mode, - ExitPoint* exit_point, - ParameterMode slot_mode) { + ExitPoint* exit_point) { Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred); - GotoIf(IsUndefined(vector), &miss); - - LoadGlobalIC_TryPropertyCellCase(CAST(vector), slot, lazy_context, exit_point, - &try_handler, &miss, slot_mode); + GotoIf(IsUndefined(maybe_feedback_vector), &miss); + { + TNode<FeedbackVector> vector = CAST(maybe_feedback_vector); + TNode<UintPtrT> slot = lazy_slot(); + LoadGlobalIC_TryPropertyCellCase(vector, slot, lazy_context, exit_point, + &try_handler, &miss); - BIND(&try_handler); - LoadGlobalIC_TryHandlerCase(CAST(vector), slot, lazy_context, lazy_name, - typeof_mode, exit_point, &miss, slot_mode); + BIND(&try_handler); + LoadGlobalIC_TryHandlerCase(vector, slot, lazy_smi_slot, lazy_context, + lazy_name, typeof_mode, exit_point, &miss); + } BIND(&miss); { @@ -2695,20 +2816,19 @@ void AccessorAssembler::LoadGlobalIC(Node* vector, Node* slot, TNode<Context> context = lazy_context(); TNode<Name> name = lazy_name(); exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, - ParameterToTagged(slot, slot_mode), vector, + lazy_smi_slot(), maybe_feedback_vector, SmiConstant(typeof_mode)); } } void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase( - TNode<FeedbackVector> vector, Node* slot, + TNode<FeedbackVector> vector, TNode<UintPtrT> slot, const LazyNode<Context>& lazy_context, ExitPoint* exit_point, - Label* try_handler, Label* miss, ParameterMode slot_mode) { + Label* try_handler, Label* miss) { Comment("LoadGlobalIC_TryPropertyCellCase"); Label if_lexical_var(this), if_property_cell(this); - TNode<MaybeObject> maybe_weak_ref = - LoadFeedbackVectorSlot(vector, slot, 0, slot_mode); + TNode<MaybeObject> maybe_weak_ref = LoadFeedbackVectorSlot(vector, slot); Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_property_cell); BIND(&if_property_cell); @@ -2739,16 +2859,16 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase( } void AccessorAssembler::LoadGlobalIC_TryHandlerCase( - TNode<FeedbackVector> vector, Node* slot, - const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name, - TypeofMode typeof_mode, ExitPoint* exit_point, Label* miss, - ParameterMode slot_mode) { + TNode<FeedbackVector> vector, TNode<UintPtrT> slot, + const LazyNode<Smi>& lazy_smi_slot, const LazyNode<Context>& lazy_context, + const LazyNode<Name>& lazy_name, TypeofMode typeof_mode, + ExitPoint* exit_point, Label* miss) { Comment("LoadGlobalIC_TryHandlerCase"); Label call_handler(this), non_smi(this); TNode<MaybeObject> feedback_element = - LoadFeedbackVectorSlot(vector, slot, kTaggedSize, slot_mode); + LoadFeedbackVectorSlot(vector, slot, kTaggedSize); TNode<Object> handler = CAST(feedback_element); GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), miss); @@ -2757,14 +2877,14 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase( : OnNonExistent::kReturnUndefined; TNode<Context> context = lazy_context(); - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSGlobalProxy> receiver = CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX)); TNode<Object> holder = LoadContextElement(native_context, Context::EXTENSION_INDEX); LazyLoadICParameters p([=] { return context; }, receiver, lazy_name, - ParameterToTagged(slot, slot_mode), vector, holder); + lazy_smi_slot, vector, holder); HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC, on_nonexistent); @@ -2788,8 +2908,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p, // Check monomorphic case. TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler, - &var_handler, &try_polymorphic); + TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, + &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); { LazyLoadICParameters lazy_p(p); @@ -2840,13 +2960,13 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p, // We might have a name in feedback, and a weak fixed array in the next // slot. Comment("KeyedLoadIC_try_polymorphic_name"); - TVARIABLE(Object, var_name, p->name()); + TVARIABLE(Name, var_name); TVARIABLE(IntPtrT, var_index); - Label if_polymorphic_name(this, &var_name), if_internalized(this), - if_notinternalized(this, Label::kDeferred); + Label if_polymorphic_name(this), feedback_matches(this), + if_internalized(this), if_notinternalized(this, Label::kDeferred); // Fast-case: The recorded {feedback} matches the {name}. - GotoIf(TaggedEqual(strong_feedback, p->name()), &if_polymorphic_name); + GotoIf(TaggedEqual(strong_feedback, p->name()), &feedback_matches); // Try to internalize the {name} if it isn't already. TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name, &miss, @@ -2861,16 +2981,15 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p, BIND(&if_notinternalized); { - // Try to internalize the {name}. - TNode<ExternalReference> function = ExternalConstant( - ExternalReference::try_internalize_string_function()); - TNode<ExternalReference> const isolate_ptr = - ExternalConstant(ExternalReference::isolate_address(isolate())); - var_name = CAST( - CallCFunction(function, MachineType::AnyTagged(), - std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair(MachineType::AnyTagged(), p->name()))); - Goto(&if_internalized); + TVARIABLE(IntPtrT, var_index); + TryInternalizeString(CAST(p->name()), &miss, &var_index, &if_internalized, + &var_name, &miss, &miss); + } + + BIND(&feedback_matches); + { + var_name = CAST(p->name()); + Goto(&if_polymorphic_name); } BIND(&if_polymorphic_name); @@ -2896,71 +3015,74 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p, } void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) { - TVARIABLE(IntPtrT, var_index); - TVARIABLE(Object, var_unique, p->name()); - Label if_index(this), if_unique_name(this), if_notunique(this), - if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred); + TVARIABLE(Object, var_name, p->name()); + Label if_runtime(this, Label::kDeferred); Node* receiver = p->receiver(); GotoIf(TaggedIsSmi(receiver), &if_runtime); GotoIf(IsNullOrUndefined(receiver), &if_runtime); - TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique, - &if_other, &if_notunique); - - BIND(&if_other); { - TNode<Name> name = - CAST(CallBuiltin(Builtins::kToName, p->context(), p->name())); - var_unique = name; - TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, - &if_runtime, &if_notunique); - } + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); + Label if_index(this), if_unique_name(this, &var_name), if_notunique(this), + if_other(this, Label::kDeferred); - BIND(&if_index); - { - TNode<Map> receiver_map = LoadMap(receiver); - TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); - GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(), - &if_runtime); - } + TryToName(var_name.value(), &if_index, &var_index, &if_unique_name, + &var_unique, &if_other, &if_notunique); - BIND(&if_unique_name); - { - LoadICParameters pp(p, var_unique.value()); - TNode<Map> receiver_map = LoadMap(receiver); - TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); - GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, - &if_runtime); - } + BIND(&if_unique_name); + { + LoadICParameters pp(p, var_unique.value()); + TNode<Map> receiver_map = LoadMap(receiver); + TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); + GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, + &if_runtime); + } - BIND(&if_notunique); - { - if (FLAG_internalize_on_the_fly) { - // Ideally we could return undefined directly here if the name is not - // found in the string table, i.e. it was never internalized, but that - // invariant doesn't hold with named property interceptors (at this - // point), so we take the {if_runtime} path instead. - Label if_in_string_table(this); - TryInternalizeString(var_unique.value(), &if_index, &var_index, - &if_in_string_table, &var_unique, &if_runtime, - &if_runtime); + BIND(&if_other); + { + var_name = CallBuiltin(Builtins::kToName, p->context(), var_name.value()); + TryToName(var_name.value(), &if_index, &var_index, &if_unique_name, + &var_unique, &if_runtime, &if_notunique); + } - BIND(&if_in_string_table); - { - // TODO(bmeurer): We currently use a version of GenericPropertyLoad - // here, where we don't try to probe the megamorphic stub cache after - // successfully internalizing the incoming string. Past experiments - // with this have shown that it causes too much traffic on the stub - // cache. We may want to re-evaluate that in the future. - LoadICParameters pp(p, var_unique.value()); - TNode<Map> receiver_map = LoadMap(receiver); - TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); - GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, - &if_runtime, kDontUseStubCache); + BIND(&if_notunique); + { + if (FLAG_internalize_on_the_fly) { + // Ideally we could return undefined directly here if the name is not + // found in the string table, i.e. it was never internalized, but that + // invariant doesn't hold with named property interceptors (at this + // point), so we take the {if_runtime} path instead. + Label if_in_string_table(this); + TryInternalizeString(CAST(var_name.value()), &if_index, &var_index, + &if_in_string_table, &var_unique, &if_runtime, + &if_runtime); + + BIND(&if_in_string_table); + { + // TODO(bmeurer): We currently use a version of GenericPropertyLoad + // here, where we don't try to probe the megamorphic stub cache + // after successfully internalizing the incoming string. Past + // experiments with this have shown that it causes too much traffic + // on the stub cache. We may want to re-evaluate that in the future. + LoadICParameters pp(p, var_unique.value()); + TNode<Map> receiver_map = LoadMap(receiver); + TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); + GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, + &if_runtime, kDontUseStubCache); + } + } else { + Goto(&if_runtime); } - } else { - Goto(&if_runtime); + } + + BIND(&if_index); + { + TNode<Map> receiver_map = LoadMap(receiver); + TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); + GenericElementLoad(receiver, receiver_map, instance_type, + var_index.value(), &if_runtime); } } @@ -2970,7 +3092,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) { IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1); // TODO(jkummerow): Should we use the GetProperty TF stub instead? TailCallRuntime(Runtime::kGetProperty, p->context(), p->receiver(), - var_unique.value()); + var_name.value()); } } @@ -2982,22 +3104,20 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p, Node* receiver = p->receiver(); TNode<Map> receiver_map = LoadReceiverMap(receiver); TNode<Name> name = CAST(p->name()); - Node* vector = p->vector(); - Node* slot = p->slot(); + TNode<FeedbackVector> vector = CAST(p->vector()); + TNode<Smi> slot = p->slot(); TNode<Context> context = p->context(); // When we get here, we know that the {name} matches the recorded // feedback name in the {vector} and can safely be used for the // LoadIC handler logic below. CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(receiver_map))); - CSA_ASSERT(this, - TaggedEqual( - name, LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS)), + CSA_ASSERT(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)), name, vector); // Check if we have a matching handler for the {receiver_map}. TNode<MaybeObject> feedback_element = - LoadFeedbackVectorSlot(vector, slot, kTaggedSize, SMI_PARAMETERS); + LoadFeedbackVectorSlot(vector, slot, kTaggedSize); TNode<WeakFixedArray> array = CAST(feedback_element); HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss); @@ -3038,8 +3158,8 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) { // Check monomorphic case. TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler, - &var_handler, &try_polymorphic); + TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, + &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); { Comment("StoreIC_if_handler"); @@ -3082,17 +3202,12 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) { void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) { Label if_lexical_var(this), if_heapobject(this); TNode<MaybeObject> maybe_weak_ref = - LoadFeedbackVectorSlot(pp->vector(), pp->slot(), 0, SMI_PARAMETERS); + LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot()); Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject); BIND(&if_heapobject); { Label try_handler(this), miss(this, Label::kDeferred); - // We use pre-monomorphic state for global stores that run into - // interceptors because the property doesn't exist yet. Using - // pre-monomorphic state gives it a chance to find more information the - // second time. - GotoIf(TaggedEqual(maybe_weak_ref, PremonomorphicSymbolConstant()), &miss); CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref)); TNode<PropertyCell> property_cell = @@ -3105,13 +3220,13 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) { BIND(&try_handler); { Comment("StoreGlobalIC_try_handler"); - TNode<MaybeObject> handler = LoadFeedbackVectorSlot( - pp->vector(), pp->slot(), kTaggedSize, SMI_PARAMETERS); + TNode<MaybeObject> handler = + LoadFeedbackVectorSlot(CAST(pp->vector()), pp->slot(), kTaggedSize); GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), &miss); DCHECK_NULL(pp->receiver()); - TNode<Context> native_context = LoadNativeContext(pp->context()); + TNode<NativeContext> native_context = LoadNativeContext(pp->context()); StoreICParameters p( pp->context(), LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX), @@ -3225,8 +3340,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) { // Check monomorphic case. TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler, - &var_handler, &try_polymorphic); + TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, + &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); { Comment("KeyedStoreIC_if_handler"); @@ -3266,8 +3381,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) { GotoIfNot(TaggedEqual(strong_feedback, p->name()), &miss); // If the name comparison succeeded, we know we have a feedback vector // with at least one map/handler pair. - TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot( - p->vector(), p->slot(), kTaggedSize, SMI_PARAMETERS); + TNode<MaybeObject> feedback_element = + LoadFeedbackVectorSlot(CAST(p->vector()), p->slot(), kTaggedSize); TNode<WeakFixedArray> array = CAST(feedback_element); HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss); @@ -3296,16 +3411,20 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) { GotoIf(IsUndefined(p->vector()), &miss); TNode<MaybeObject> feedback = - TryMonomorphicCase(p->slot(), p->vector(), array_map, &if_handler, + TryMonomorphicCase(p->slot(), CAST(p->vector()), array_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); { Comment("StoreInArrayLiteralIC_if_handler"); // This is a stripped-down version of HandleStoreICHandlerCase. + Label if_transitioning_element_store(this), if_smi_handler(this); + + // Check used to identify the Slow case. + // Currently only the Slow case uses a Smi handler. + GotoIf(TaggedIsSmi(var_handler.value()), &if_smi_handler); TNode<HeapObject> handler = CAST(var_handler.value()); - Label if_transitioning_element_store(this); GotoIfNot(IsCode(handler), &if_transitioning_element_store); TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), p->context(), p->receiver(), p->name(), p->value(), p->slot(), @@ -3324,6 +3443,22 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) { p->receiver(), p->name(), transition_map, p->value(), p->slot(), p->vector()); } + + BIND(&if_smi_handler); + { +#ifdef DEBUG + // A check to ensure that no other Smi handler uses this path. + TNode<Int32T> handler_word = SmiToInt32(CAST(var_handler.value())); + TNode<Uint32T> handler_kind = + DecodeWord32<StoreHandler::KindBits>(handler_word); + CSA_ASSERT(this, Word32Equal(handler_kind, + Int32Constant(StoreHandler::kSlow))); +#endif + + Comment("StoreInArrayLiteralIC_Slow"); + TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context(), + p->value(), p->receiver(), p->name()); + } } BIND(&try_polymorphic); @@ -3366,7 +3501,7 @@ void AccessorAssembler::GenerateLoadIC() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3379,7 +3514,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3392,7 +3527,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() { BIND(&if_handler); LazyLoadICParameters p([=] { return context; }, receiver, - [=] { return name; }, slot, vector); + [=] { return name; }, [=] { return slot; }, vector); HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit); BIND(&miss); @@ -3405,8 +3540,8 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); + TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ExitPoint direct_exit(this); @@ -3414,8 +3549,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() { Label if_handler(this, &var_handler), miss(this, Label::kDeferred); TNode<Map> receiver_map = LoadReceiverMap(receiver); - TNode<MaybeObject> feedback_element = - LoadFeedbackVectorSlot(vector, slot, 0, SMI_PARAMETERS); + TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(vector, slot); TNode<HeapObject> feedback = CAST(feedback_element); LoadICParameters p(context, receiver, name, slot, vector); @@ -3439,7 +3573,7 @@ void AccessorAssembler::GenerateLoadIC_NoFeedback() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); LoadICParameters p(context, receiver, name, slot, UndefinedConstant()); @@ -3475,13 +3609,17 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) { using Descriptor = LoadGlobalWithVectorDescriptor; TNode<Name> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); + TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); ExitPoint direct_exit(this); LoadGlobalIC( - vector, slot, + vector, + // lazy_smi_slot + [=] { return slot; }, + // lazy_slot + [=] { return Unsigned(SmiUntag(slot)); }, // lazy_context [=] { return context; }, // lazy_name @@ -3506,7 +3644,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3519,7 +3657,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3532,7 +3670,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<FeedbackVector> vector = LoadFeedbackVectorForStub(); @@ -3545,7 +3683,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<FeedbackVector> vector = LoadFeedbackVectorForStub(); @@ -3558,7 +3696,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3571,7 +3709,7 @@ void AccessorAssembler::GenerateStoreGlobalIC() { TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3584,7 +3722,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() { TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<FeedbackVector> vector = LoadFeedbackVectorForStub(); @@ -3597,7 +3735,7 @@ void AccessorAssembler::GenerateStoreIC() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3611,7 +3749,7 @@ void AccessorAssembler::GenerateStoreICTrampoline() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<FeedbackVector> vector = LoadFeedbackVectorForStub(); @@ -3625,7 +3763,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3639,7 +3777,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TNode<FeedbackVector> vector = LoadFeedbackVectorForStub(); @@ -3653,7 +3791,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() { Node* array = Parameter(Descriptor::kReceiver); TNode<Object> index = CAST(Parameter(Descriptor::kName)); Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3671,7 +3809,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() { // can be tail called from it. However, the feedback slot and vector are not // used. - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); TNode<JSFunction> object_fn = CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); TNode<Map> initial_map = CAST( @@ -3724,7 +3862,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { TNode<Object> source = CAST(Parameter(Descriptor::kSource)); TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags)); TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); - TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector)); + TNode<HeapObject> maybe_vector = CAST(Parameter(Descriptor::kVector)); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); TVARIABLE(MaybeObject, var_handler); Label if_handler(this, &var_handler), miss(this, Label::kDeferred), @@ -3734,10 +3872,11 @@ void AccessorAssembler::GenerateCloneObjectIC() { TNode<Map> source_map = LoadReceiverMap(source); GotoIf(IsDeprecatedMap(source_map), &miss); - GotoIf(IsUndefined(vector), &slow); + GotoIf(IsUndefined(maybe_vector), &slow); - TNode<MaybeObject> feedback = TryMonomorphicCase( - slot, vector, source_map, &if_handler, &var_handler, &try_polymorphic); + TNode<MaybeObject> feedback = + TryMonomorphicCase(slot, CAST(maybe_vector), source_map, &if_handler, + &var_handler, &try_polymorphic); BIND(&if_handler); { @@ -3801,30 +3940,28 @@ void AccessorAssembler::GenerateCloneObjectIC() { // Just copy the fields as raw data (pretending that there are no mutable // HeapNumbers). This doesn't need write barriers. - BuildFastLoop( + BuildFastLoop<IntPtrT>( source_start, source_size, - [=](Node* field_index) { - TNode<IntPtrT> field_offset = - TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)); + [=](TNode<IntPtrT> field_index) { + TNode<IntPtrT> field_offset = TimesTaggedSize(field_index); TNode<TaggedT> field = LoadObjectField<TaggedT>(CAST(source), field_offset); TNode<IntPtrT> result_offset = IntPtrAdd(field_offset, field_offset_difference); StoreObjectFieldNoWriteBarrier(object, result_offset, field); }, - 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); // If mutable HeapNumbers can occur, we need to go through the {object} // again here and properly clone them. We use a second loop here to // ensure that the GC (and heap verifier) always sees properly initialized // objects, i.e. never hits undefined values in double fields. if (!FLAG_unbox_double_fields) { - BuildFastLoop( + BuildFastLoop<IntPtrT>( source_start, source_size, - [=](Node* field_index) { - TNode<IntPtrT> result_offset = - IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)), - field_offset_difference); + [=](TNode<IntPtrT> field_index) { + TNode<IntPtrT> result_offset = IntPtrAdd( + TimesTaggedSize(field_index), field_offset_difference); TNode<Object> field = LoadObjectField(object, result_offset); Label if_done(this), if_mutableheapnumber(this, Label::kDeferred); GotoIf(TaggedIsSmi(field), &if_done); @@ -3838,7 +3975,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { } BIND(&if_done); }, - 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); } Return(object); @@ -3867,14 +4004,15 @@ void AccessorAssembler::GenerateCloneObjectIC() { BIND(&slow); { TailCallBuiltin(Builtins::kCloneObjectIC_Slow, context, source, flags, slot, - vector); + maybe_vector); } BIND(&miss); { Comment("CloneObjectIC_miss"); - TNode<HeapObject> map_or_result = CAST(CallRuntime( - Runtime::kCloneObjectIC_Miss, context, source, flags, slot, vector)); + TNode<HeapObject> map_or_result = + CAST(CallRuntime(Runtime::kCloneObjectIC_Miss, context, source, flags, + slot, maybe_vector)); var_handler = UncheckedCast<MaybeObject>(map_or_result); GotoIf(IsMap(map_or_result), &if_handler); CSA_ASSERT(this, IsJSObject(map_or_result)); @@ -3887,7 +4025,7 @@ void AccessorAssembler::GenerateKeyedHasIC() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3911,7 +4049,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() { Node* receiver = Parameter(Descriptor::kReceiver); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* slot = Parameter(Descriptor::kSlot); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); Node* vector = Parameter(Descriptor::kVector); TNode<Context> context = CAST(Parameter(Descriptor::kContext)); @@ -3919,5 +4057,54 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() { KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas); } +void AccessorAssembler::BranchIfPrototypesHaveNoElements( + TNode<Map> receiver_map, Label* definitely_no_elements, + Label* possibly_elements) { + TVARIABLE(Map, var_map, receiver_map); + Label loop_body(this, &var_map); + TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant(); + TNode<NumberDictionary> empty_slow_element_dictionary = + EmptySlowElementDictionaryConstant(); + Goto(&loop_body); + + BIND(&loop_body); + { + TNode<Map> map = var_map.value(); + TNode<HeapObject> prototype = LoadMapPrototype(map); + GotoIf(IsNull(prototype), definitely_no_elements); + TNode<Map> prototype_map = LoadMap(prototype); + TNode<Uint16T> prototype_instance_type = LoadMapInstanceType(prototype_map); + + // Pessimistically assume elements if a Proxy, Special API Object, + // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this + // instance type check, it's not necessary to check for interceptors or + // access checks. + Label if_custom(this, Label::kDeferred), if_notcustom(this); + Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type), + &if_custom, &if_notcustom); + + BIND(&if_custom); + { + // For string JSPrimitiveWrapper wrappers we still support the checks as + // long as they wrap the empty string. + GotoIfNot( + InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + possibly_elements); + TNode<Object> prototype_value = + LoadJSPrimitiveWrapperValue(CAST(prototype)); + Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); + } + + BIND(&if_notcustom); + { + TNode<FixedArrayBase> prototype_elements = LoadElements(CAST(prototype)); + var_map = prototype_map; + GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body); + Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary), + &loop_body, possibly_elements); + } + } +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/ic/accessor-assembler.h b/chromium/v8/src/ic/accessor-assembler.h index 0de2292fd6d..ccc2de9323a 100644 --- a/chromium/v8/src/ic/accessor-assembler.h +++ b/chromium/v8/src/ic/accessor-assembler.h @@ -5,6 +5,7 @@ #ifndef V8_IC_ACCESSOR_ASSEMBLER_H_ #define V8_IC_ACCESSOR_ASSEMBLER_H_ +#include "src/base/optional.h" #include "src/codegen/code-stub-assembler.h" namespace v8 { @@ -19,10 +20,6 @@ class ExitPoint; class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { public: using Node = compiler::Node; - template <class T> - using TNode = compiler::TNode<T>; - template <class T> - using SloppyTNode = compiler::SloppyTNode<T>; explicit AccessorAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} @@ -69,7 +66,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { struct LoadICParameters { LoadICParameters(TNode<Context> context, Node* receiver, TNode<Object> name, - Node* slot, Node* vector, Node* holder = nullptr) + TNode<Smi> slot, Node* vector, Node* holder = nullptr) : context_(context), receiver_(receiver), name_(name), @@ -88,7 +85,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { TNode<Context> context() const { return context_; } Node* receiver() const { return receiver_; } TNode<Object> name() const { return name_; } - Node* slot() const { return slot_; } + TNode<Smi> slot() const { return slot_; } Node* vector() const { return vector_; } Node* holder() const { return holder_; } @@ -96,15 +93,15 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { TNode<Context> context_; Node* receiver_; TNode<Object> name_; - Node* slot_; + TNode<Smi> slot_; Node* vector_; Node* holder_; }; struct LazyLoadICParameters { LazyLoadICParameters(LazyNode<Context> context, Node* receiver, - LazyNode<Object> name, Node* slot, Node* vector, - Node* holder = nullptr) + LazyNode<Object> name, LazyNode<Smi> slot, + Node* vector, Node* holder = nullptr) : context_(context), receiver_(receiver), name_(name), @@ -114,19 +111,17 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { explicit LazyLoadICParameters(const LoadICParameters* p) : receiver_(p->receiver()), - slot_(p->slot()), vector_(p->vector()), holder_(p->holder()) { - TNode<Context> p_context = p->context(); - context_ = [=] { return p_context; }; - TNode<Object> p_name = p->name(); - name_ = [=] { return p_name; }; + slot_ = [=] { return p->slot(); }; + context_ = [=] { return p->context(); }; + name_ = [=] { return p->name(); }; } TNode<Context> context() const { return context_(); } Node* receiver() const { return receiver_; } TNode<Object> name() const { return name_(); } - Node* slot() const { return slot_; } + TNode<Smi> slot() const { return slot_(); } Node* vector() const { return vector_; } Node* holder() const { return holder_; } @@ -134,16 +129,17 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { LazyNode<Context> context_; Node* receiver_; LazyNode<Object> name_; - Node* slot_; + LazyNode<Smi> slot_; Node* vector_; Node* holder_; }; - void LoadGlobalIC(Node* vector, Node* slot, + void LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector, + const LazyNode<Smi>& lazy_smi_slot, + const LazyNode<UintPtrT>& lazy_slot, const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name, TypeofMode typeof_mode, - ExitPoint* exit_point, - ParameterMode slot_mode = SMI_PARAMETERS); + ExitPoint* exit_point); // Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame // construction on common paths. @@ -157,8 +153,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { protected: struct StoreICParameters : public LoadICParameters { StoreICParameters(TNode<Context> context, Node* receiver, - TNode<Object> name, SloppyTNode<Object> value, Node* slot, - Node* vector) + TNode<Object> name, SloppyTNode<Object> value, + TNode<Smi> slot, Node* vector) : LoadICParameters(context, receiver, name, slot, vector), value_(value) {} @@ -185,20 +181,22 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { Label* miss, StoreTransitionMapFlags flags); - void JumpIfDataProperty(Node* details, Label* writable, Label* readonly); + void JumpIfDataProperty(TNode<Uint32T> details, Label* writable, + Label* readonly); void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield3 = nullptr); - void OverwriteExistingFastDataProperty(Node* object, Node* object_map, - Node* descriptors, - Node* descriptor_name_index, - Node* details, TNode<Object> value, - Label* slow, + void OverwriteExistingFastDataProperty(SloppyTNode<HeapObject> object, + TNode<Map> object_map, + TNode<DescriptorArray> descriptors, + TNode<IntPtrT> descriptor_name_index, + TNode<Uint32T> details, + TNode<Object> value, Label* slow, bool do_transitioning_store); - void CheckFieldType(TNode<DescriptorArray> descriptors, Node* name_index, - TNode<Word32T> representation, Node* value, - Label* bailout); + void CheckFieldType(TNode<DescriptorArray> descriptors, + TNode<IntPtrT> name_index, TNode<Word32T> representation, + Node* value, Label* bailout); private: // Stub generation entry points. @@ -232,12 +230,11 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { // IC dispatcher behavior. // Checks monomorphic case. Returns {feedback} entry of the vector. - TNode<MaybeObject> TryMonomorphicCase(Node* slot, Node* vector, - Node* receiver_map, Label* if_handler, - TVariable<MaybeObject>* var_handler, - Label* if_miss); - void HandlePolymorphicCase(Node* receiver_map, TNode<WeakFixedArray> feedback, - Label* if_handler, + TNode<MaybeObject> TryMonomorphicCase( + TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map, + Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss); + void HandlePolymorphicCase(TNode<Map> receiver_map, + TNode<WeakFixedArray> feedback, Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss); @@ -249,15 +246,14 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { ElementSupport support_elements = kOnlyProperties, LoadAccessMode access_mode = LoadAccessMode::kLoad); - void HandleLoadICSmiHandlerCase(const LazyLoadICParameters* p, Node* holder, - SloppyTNode<Smi> smi_handler, - SloppyTNode<Object> handler, Label* miss, - ExitPoint* exit_point, - OnNonExistent on_nonexistent, - ElementSupport support_elements, - LoadAccessMode access_mode); + void HandleLoadICSmiHandlerCase( + const LazyLoadICParameters* p, SloppyTNode<HeapObject> holder, + SloppyTNode<Smi> smi_handler, SloppyTNode<Object> handler, Label* miss, + ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent, + ElementSupport support_elements, LoadAccessMode access_mode); - void HandleLoadICProtoHandler(const LazyLoadICParameters* p, Node* handler, + void HandleLoadICProtoHandler(const LazyLoadICParameters* p, + TNode<DataHandler> handler, Variable* var_holder, Variable* var_smi_handler, Label* if_smi_handler, Label* miss, ExitPoint* exit_point, ICMode ic_mode, @@ -273,40 +269,43 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { TNode<WordT> handler_word, TNode<DataHandler> handler, TNode<IntPtrT> handler_kind, ExitPoint* exit_point); - void HandleLoadField(Node* holder, Node* handler_word, + void HandleLoadField(SloppyTNode<JSObject> holder, TNode<WordT> handler_word, Variable* var_double_value, Label* rebox_double, - ExitPoint* exit_point); + Label* miss, ExitPoint* exit_point); void EmitAccessCheck(TNode<Context> expected_native_context, - TNode<Context> context, Node* receiver, + TNode<Context> context, TNode<Object> receiver, Label* can_access, Label* miss); void HandleLoadICSmiHandlerLoadNamedCase( - const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind, - TNode<WordT> handler_word, Label* rebox_double, - Variable* var_double_value, SloppyTNode<Object> handler, Label* miss, - ExitPoint* exit_point, OnNonExistent on_nonexistent, + const LazyLoadICParameters* p, TNode<HeapObject> holder, + TNode<IntPtrT> handler_kind, TNode<WordT> handler_word, + Label* rebox_double, Variable* var_double_value, + SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point, + ICMode ic_mode, OnNonExistent on_nonexistent, ElementSupport support_elements); void HandleLoadICSmiHandlerHasNamedCase(const LazyLoadICParameters* p, - Node* holder, + TNode<HeapObject> holder, TNode<IntPtrT> handler_kind, - Label* miss, ExitPoint* exit_point); + Label* miss, ExitPoint* exit_point, + ICMode ic_mode); // LoadGlobalIC implementation. - void LoadGlobalIC_TryPropertyCellCase( - TNode<FeedbackVector> vector, Node* slot, - const LazyNode<Context>& lazy_context, ExitPoint* exit_point, - Label* try_handler, Label* miss, - ParameterMode slot_mode = SMI_PARAMETERS); + void LoadGlobalIC_TryPropertyCellCase(TNode<FeedbackVector> vector, + TNode<UintPtrT> slot, + const LazyNode<Context>& lazy_context, + ExitPoint* exit_point, + Label* try_handler, Label* miss); - void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector, Node* slot, + void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector, + TNode<UintPtrT> slot, + const LazyNode<Smi>& lazy_smi_slot, const LazyNode<Context>& lazy_context, const LazyNode<Name>& lazy_name, TypeofMode typeof_mode, - ExitPoint* exit_point, Label* miss, - ParameterMode slot_mode); + ExitPoint* exit_point, Label* miss); // StoreIC implementation. @@ -314,59 +313,66 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { TNode<StoreHandler> handler, Label* miss, ICMode ic_mode, ElementSupport support_elements); - void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder, - Node* value, Label* miss); - void HandleStoreFieldAndReturn(Node* handler_word, Node* holder, - Representation representation, Node* value, - Label* miss); + void HandleStoreICSmiHandlerCase(SloppyTNode<Word32T> handler_word, + SloppyTNode<JSObject> holder, + SloppyTNode<Object> value, Label* miss); + void HandleStoreFieldAndReturn(TNode<Word32T> handler_word, + TNode<JSObject> holder, TNode<Object> value, + base::Optional<TNode<Float64T>> double_value, + Representation representation, Label* miss); void CheckPrototypeValidityCell(TNode<Object> maybe_validity_cell, Label* miss); - void HandleStoreICNativeDataProperty(const StoreICParameters* p, Node* holder, - Node* handler_word); + void HandleStoreICNativeDataProperty(const StoreICParameters* p, + SloppyTNode<HeapObject> holder, + TNode<Word32T> handler_word); void HandleStoreToProxy(const StoreICParameters* p, Node* proxy, Label* miss, ElementSupport support_elements); - void HandleStoreAccessor(const StoreICParameters* p, Node* holder, - Node* handler_word); + void HandleStoreAccessor(const StoreICParameters* p, + SloppyTNode<HeapObject> holder, + TNode<Word32T> handler_word); // KeyedLoadIC_Generic implementation. - void GenericElementLoad(Node* receiver, Node* receiver_map, + void GenericElementLoad(Node* receiver, TNode<Map> receiver_map, SloppyTNode<Int32T> instance_type, Node* index, Label* slow); enum UseStubCache { kUseStubCache, kDontUseStubCache }; - void GenericPropertyLoad(Node* receiver, Node* receiver_map, + void GenericPropertyLoad(Node* receiver, TNode<Map> receiver_map, SloppyTNode<Int32T> instance_type, const LoadICParameters* p, Label* slow, UseStubCache use_stub_cache = kUseStubCache); // Low-level helpers. - using OnCodeHandler = std::function<void(Node* code_handler)>; - using OnFoundOnReceiver = - std::function<void(Node* properties, Node* name_index)>; + using OnCodeHandler = std::function<void(TNode<Code> code_handler)>; + using OnFoundOnReceiver = std::function<void(TNode<NameDictionary> properties, + TNode<IntPtrT> name_index)>; template <typename ICHandler, typename ICParameters> - Node* HandleProtoHandler(const ICParameters* p, Node* handler, - const OnCodeHandler& on_code_handler, - const OnFoundOnReceiver& on_found_on_receiver, - Label* miss, ICMode ic_mode); - - Node* PrepareValueForStore(Node* handler_word, Node* holder, - Representation representation, Node* value, - Label* bailout); + TNode<Object> HandleProtoHandler( + const ICParameters* p, TNode<DataHandler> handler, + const OnCodeHandler& on_code_handler, + const OnFoundOnReceiver& on_found_on_receiver, Label* miss, + ICMode ic_mode); + + void CheckHeapObjectTypeMatchesDescriptor(TNode<Word32T> handler_word, + TNode<JSObject> holder, + TNode<Object> value, + Label* bailout); + // Double fields store double values in a mutable box, where stores are + // writes into this box rather than HeapNumber assignment. + void CheckDescriptorConsidersNumbersMutable(TNode<Word32T> handler_word, + TNode<JSObject> holder, + Label* bailout); // Extends properties backing store by JSObject::kFieldsAdded elements, // returns updated properties backing store. Node* ExtendPropertiesBackingStore(Node* object, Node* index); - void StoreNamedField(Node* handler_word, Node* object, bool is_inobject, - Representation representation, Node* value, - Label* bailout); - void EmitFastElementsBoundsCheck(Node* object, Node* elements, Node* intptr_index, Node* is_jsarray_condition, Label* miss); @@ -379,7 +385,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { LoadAccessMode access_mode = LoadAccessMode::kLoad); void NameDictionaryNegativeLookup(Node* object, SloppyTNode<Name> name, Label* miss); - TNode<BoolT> IsPropertyDetailsConst(Node* details); + TNode<BoolT> IsPropertyDetailsConst(TNode<Uint32T> details); // Stub cache access helpers. @@ -395,6 +401,10 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { TNode<Map> map, Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss); + + void BranchIfPrototypesHaveNoElements(TNode<Map> receiver_map, + Label* definitely_no_elements, + Label* possibly_elements); }; // Abstraction over direct and indirect exit points. Direct exits correspond to diff --git a/chromium/v8/src/ic/binary-op-assembler.cc b/chromium/v8/src/ic/binary-op-assembler.cc index f6bec6eab9f..ee488100e9e 100644 --- a/chromium/v8/src/ic/binary-op-assembler.cc +++ b/chromium/v8/src/ic/binary-op-assembler.cc @@ -9,21 +9,19 @@ namespace v8 { namespace internal { -using compiler::Node; - -Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, - Node* rhs, Node* slot_id, - Node* feedback_vector, - bool rhs_is_smi) { +TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( + TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { // Shared entry for floating point addition. Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred), check_rhsisoddball(this, Label::kDeferred), call_with_oddball_feedback(this), call_with_any_feedback(this), call_add_stub(this), end(this), bigint(this, Label::kDeferred); - VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64); - VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64); - VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned); - VARIABLE(var_result, MachineRepresentation::kTagged); + TVARIABLE(Float64T, var_fadd_lhs); + TVARIABLE(Float64T, var_fadd_rhs); + TVARIABLE(Smi, var_type_feedback); + TVARIABLE(Object, var_result); // Check if the {lhs} is a Smi or a HeapObject. Label if_lhsissmi(this); @@ -32,13 +30,14 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, // both Smi and Number operations, so this path should not be marked as // Deferred. Label if_lhsisnotsmi(this, - rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); + rhs_known_smi ? Label::kDeferred : Label::kNonDeferred); Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi); BIND(&if_lhsissmi); { Comment("lhs is Smi"); - if (!rhs_is_smi) { + TNode<Smi> lhs_smi = CAST(lhs); + if (!rhs_known_smi) { // Check if the {rhs} is also a Smi. Label if_rhsissmi(this), if_rhsisnotsmi(this); Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi); @@ -46,10 +45,11 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, BIND(&if_rhsisnotsmi); { // Check if the {rhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); + TNode<HeapObject> rhs_heap_object = CAST(rhs); + GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball); - var_fadd_lhs.Bind(SmiToFloat64(lhs)); - var_fadd_rhs.Bind(LoadHeapNumberValue(rhs)); + var_fadd_lhs = SmiToFloat64(lhs_smi); + var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object); Goto(&do_fadd); } @@ -62,21 +62,21 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, // is for AddSmi operation. For the normal Add operation, we want to fast // path both Smi and Number operations, so this path should not be marked // as Deferred. + TNode<Smi> rhs_smi = CAST(rhs); Label if_overflow(this, - rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); - TNode<Smi> smi_result = TrySmiAdd(CAST(lhs), CAST(rhs), &if_overflow); + rhs_known_smi ? Label::kDeferred : Label::kNonDeferred); + TNode<Smi> smi_result = TrySmiAdd(lhs_smi, rhs_smi, &if_overflow); // Not overflowed. { - var_type_feedback.Bind( - SmiConstant(BinaryOperationFeedback::kSignedSmall)); - var_result.Bind(smi_result); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); + var_result = smi_result; Goto(&end); } BIND(&if_overflow); { - var_fadd_lhs.Bind(SmiToFloat64(lhs)); - var_fadd_rhs.Bind(SmiToFloat64(rhs)); + var_fadd_lhs = SmiToFloat64(lhs_smi); + var_fadd_rhs = SmiToFloat64(rhs_smi); Goto(&do_fadd); } } @@ -85,9 +85,10 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, BIND(&if_lhsisnotsmi); { // Check if {lhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber); + TNode<HeapObject> lhs_heap_object = CAST(lhs); + GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber); - if (!rhs_is_smi) { + if (!rhs_known_smi) { // Check if the {rhs} is Smi. Label if_rhsissmi(this), if_rhsisnotsmi(this); Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi); @@ -95,29 +96,30 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, BIND(&if_rhsisnotsmi); { // Check if the {rhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); + TNode<HeapObject> rhs_heap_object = CAST(rhs); + GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball); - var_fadd_lhs.Bind(LoadHeapNumberValue(lhs)); - var_fadd_rhs.Bind(LoadHeapNumberValue(rhs)); + var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object); + var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object); Goto(&do_fadd); } BIND(&if_rhsissmi); } { - var_fadd_lhs.Bind(LoadHeapNumberValue(lhs)); - var_fadd_rhs.Bind(SmiToFloat64(rhs)); + var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object); + var_fadd_rhs = SmiToFloat64(CAST(rhs)); Goto(&do_fadd); } } BIND(&do_fadd); { - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); TNode<Float64T> value = Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value()); TNode<HeapNumber> result = AllocateHeapNumberWithValue(value); - var_result.Bind(result); + var_result = result; Goto(&end); } @@ -125,7 +127,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, { // No checks on rhs are done yet. We just know lhs is not a number or Smi. Label if_lhsisoddball(this), if_lhsisnotoddball(this); - TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs); + TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs)); TNode<BoolT> lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE); Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball); @@ -135,39 +137,40 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback); // Check if {rhs} is a HeapNumber. - Branch(IsHeapNumber(rhs), &call_with_oddball_feedback, + Branch(IsHeapNumber(CAST(rhs)), &call_with_oddball_feedback, &check_rhsisoddball); } BIND(&if_lhsisnotoddball); { + // Check if the {rhs} is a smi, and exit the string and bigint check early + // if it is. + GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); + TNode<HeapObject> rhs_heap_object = CAST(rhs); + Label lhs_is_string(this), lhs_is_bigint(this); GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string); GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint); Goto(&call_with_any_feedback); BIND(&lhs_is_bigint); - { - GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); - Branch(IsBigInt(rhs), &bigint, &call_with_any_feedback); - } + Branch(IsBigInt(rhs_heap_object), &bigint, &call_with_any_feedback); BIND(&lhs_is_string); - // Check if the {rhs} is a smi, and exit the string check early if it is. - GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); - - TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs); + { + TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs_heap_object); - // Exit unless {rhs} is a string. Since {lhs} is a string we no longer - // need an Oddball check. - GotoIfNot(IsStringInstanceType(rhs_instance_type), - &call_with_any_feedback); + // Exit unless {rhs} is a string. Since {lhs} is a string we no longer + // need an Oddball check. + GotoIfNot(IsStringInstanceType(rhs_instance_type), + &call_with_any_feedback); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kString)); - var_result.Bind( - CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kString); + var_result = + CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs); - Goto(&end); + Goto(&end); + } } } @@ -175,7 +178,7 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, { // Check if rhs is an oddball. At this point we know lhs is either a // Smi or number or oddball and rhs is not a number or Smi. - TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs); + TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs)); TNode<BoolT> rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE); GotoIf(rhs_is_oddball, &call_with_oddball_feedback); @@ -186,59 +189,58 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs, { // Both {lhs} and {rhs} are of BigInt type. Label bigint_too_big(this); - var_result.Bind( - CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs)); + var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs); // Check for sentinel that signals BigIntTooBig exception. GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); Goto(&end); BIND(&bigint_too_big); { // Update feedback to prevent deopt loop. UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny), - feedback_vector, slot_id); + maybe_feedback_vector, slot_id); ThrowRangeError(context, MessageTemplate::kBigIntTooBig); } } BIND(&call_with_oddball_feedback); { - var_type_feedback.Bind( - SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball); Goto(&call_add_stub); } BIND(&call_with_any_feedback); { - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny); Goto(&call_add_stub); } BIND(&call_add_stub); { - var_result.Bind(CallBuiltin(Builtins::kAdd, context, lhs, rhs)); + var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs); Goto(&end); } BIND(&end); - UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id); + UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id); return var_result.value(); } -Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( - Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector, +TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback( + TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, const SmiOperation& smiOperation, const FloatOperation& floatOperation, - Operation op, bool rhs_is_smi) { + Operation op, bool rhs_known_smi) { Label do_float_operation(this), end(this), call_stub(this), check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this), if_lhsisnotnumber(this, Label::kDeferred), if_bigint(this, Label::kDeferred); - VARIABLE(var_float_lhs, MachineRepresentation::kFloat64); - VARIABLE(var_float_rhs, MachineRepresentation::kFloat64); - VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned); - VARIABLE(var_result, MachineRepresentation::kTagged); + TVARIABLE(Float64T, var_float_lhs); + TVARIABLE(Float64T, var_float_rhs); + TVARIABLE(Smi, var_type_feedback); + TVARIABLE(Object, var_result); Label if_lhsissmi(this); // If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi @@ -246,25 +248,28 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( // operation, we want to fast path both Smi and Number operations, so this // path should not be marked as Deferred. Label if_lhsisnotsmi(this, - rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); + rhs_known_smi ? Label::kDeferred : Label::kNonDeferred); Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi); // Check if the {lhs} is a Smi or a HeapObject. BIND(&if_lhsissmi); { Comment("lhs is Smi"); - if (!rhs_is_smi) { + TNode<Smi> lhs_smi = CAST(lhs); + if (!rhs_known_smi) { // Check if the {rhs} is also a Smi. Label if_rhsissmi(this), if_rhsisnotsmi(this); Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi); + BIND(&if_rhsisnotsmi); { // Check if {rhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); + TNode<HeapObject> rhs_heap_object = CAST(rhs); + GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball); // Perform a floating point operation. - var_float_lhs.Bind(SmiToFloat64(lhs)); - var_float_rhs.Bind(LoadHeapNumberValue(rhs)); + var_float_lhs = SmiToFloat64(lhs_smi); + var_float_rhs = LoadHeapNumberValue(rhs_heap_object); Goto(&do_float_operation); } @@ -273,7 +278,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( { Comment("perform smi operation"); - var_result.Bind(smiOperation(lhs, rhs, &var_type_feedback)); + var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback); Goto(&end); } } @@ -282,9 +287,10 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( { Comment("lhs is not Smi"); // Check if the {lhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber); + TNode<HeapObject> lhs_heap_object = CAST(lhs); + GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber); - if (!rhs_is_smi) { + if (!rhs_known_smi) { // Check if the {rhs} is a Smi. Label if_rhsissmi(this), if_rhsisnotsmi(this); Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi); @@ -292,11 +298,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( BIND(&if_rhsisnotsmi); { // Check if the {rhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); + TNode<HeapObject> rhs_heap_object = CAST(rhs); + GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball); // Perform a floating point operation. - var_float_lhs.Bind(LoadHeapNumberValue(lhs)); - var_float_rhs.Bind(LoadHeapNumberValue(rhs)); + var_float_lhs = LoadHeapNumberValue(lhs_heap_object); + var_float_rhs = LoadHeapNumberValue(rhs_heap_object); Goto(&do_float_operation); } @@ -305,19 +312,19 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( { // Perform floating point operation. - var_float_lhs.Bind(LoadHeapNumberValue(lhs)); - var_float_rhs.Bind(SmiToFloat64(rhs)); + var_float_lhs = LoadHeapNumberValue(lhs_heap_object); + var_float_rhs = SmiToFloat64(CAST(rhs)); Goto(&do_float_operation); } } BIND(&do_float_operation); { - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); - Node* lhs_value = var_float_lhs.value(); - Node* rhs_value = var_float_rhs.value(); - Node* value = floatOperation(lhs_value, rhs_value); - var_result.Bind(AllocateHeapNumberWithValue(value)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); + TNode<Float64T> lhs_value = var_float_lhs.value(); + TNode<Float64T> rhs_value = var_float_rhs.value(); + TNode<Float64T> value = floatOperation(lhs_value, rhs_value); + var_result = AllocateHeapNumberWithValue(value); Goto(&end); } @@ -325,7 +332,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( { // No checks on rhs are done yet. We just know lhs is not a number or Smi. Label if_left_bigint(this), if_left_oddball(this); - TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs); + TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs)); GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint); TNode<BoolT> lhs_is_oddball = InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE); @@ -338,18 +345,18 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( BIND(&if_rhsissmi); { - var_type_feedback.Bind( - SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + var_type_feedback = + SmiConstant(BinaryOperationFeedback::kNumberOrOddball); Goto(&call_stub); } BIND(&if_rhsisnotsmi); { // Check if {rhs} is a HeapNumber. - GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball); + GotoIfNot(IsHeapNumber(CAST(rhs)), &check_rhsisoddball); - var_type_feedback.Bind( - SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + var_type_feedback = + SmiConstant(BinaryOperationFeedback::kNumberOrOddball); Goto(&call_stub); } } @@ -357,7 +364,7 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( BIND(&if_left_bigint); { GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback); - Branch(IsBigInt(rhs), &if_bigint, &call_with_any_feedback); + Branch(IsBigInt(CAST(rhs)), &if_bigint, &call_with_any_feedback); } } @@ -365,39 +372,38 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( { // Check if rhs is an oddball. At this point we know lhs is either a // Smi or number or oddball and rhs is not a number or Smi. - TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs); + TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs)); GotoIf(IsBigIntInstanceType(rhs_instance_type), &if_bigint); TNode<BoolT> rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE); GotoIfNot(rhs_is_oddball, &call_with_any_feedback); - var_type_feedback.Bind( - SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball); Goto(&call_stub); } // This handles the case where at least one input is a BigInt. BIND(&if_bigint); { - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); if (op == Operation::kAdd) { - var_result.Bind(CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs)); + var_result = CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs); } else { - var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs, - SmiConstant(op))); + var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs, + SmiConstant(op)); } Goto(&end); } BIND(&call_with_any_feedback); { - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny); Goto(&call_stub); } BIND(&call_stub); { - Node* result; + TNode<Object> result; switch (op) { case Operation::kSubtract: result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs); @@ -414,34 +420,35 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback( default: UNREACHABLE(); } - var_result.Bind(result); + var_result = result; Goto(&end); } BIND(&end); - UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id); + UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id); return var_result.value(); } -Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs, - Node* rhs, Node* slot_id, - Node* feedback_vector, - bool rhs_is_smi) { - auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { +TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback( + TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { + auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs, + TVariable<Smi>* var_type_feedback) { Label end(this); TVARIABLE(Number, var_result); // If rhs is known to be an Smi (for SubSmi) we want to fast path Smi // operation. For the normal Sub operation, we want to fast path both // Smi and Number operations, so this path should not be marked as Deferred. Label if_overflow(this, - rhs_is_smi ? Label::kDeferred : Label::kNonDeferred); - var_result = TrySmiSub(CAST(lhs), CAST(rhs), &if_overflow); - var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); + rhs_known_smi ? Label::kDeferred : Label::kNonDeferred); + var_result = TrySmiSub(lhs, rhs, &if_overflow); + *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); Goto(&end); BIND(&if_overflow); { - var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber)); + *var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs)); var_result = AllocateHeapNumberWithValue(value); Goto(&end); @@ -450,91 +457,97 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs, BIND(&end); return var_result.value(); }; - auto floatFunction = [=](Node* lhs, Node* rhs) { + auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) { return Float64Sub(lhs, rhs); }; return Generate_BinaryOperationWithFeedback( - context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction, - Operation::kSubtract, rhs_is_smi); + context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction, + floatFunction, Operation::kSubtract, rhs_known_smi); } -Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs, - Node* rhs, Node* slot_id, - Node* feedback_vector, - bool rhs_is_smi) { - auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { - TNode<Number> result = SmiMul(CAST(lhs), CAST(rhs)); - var_type_feedback->Bind(SelectSmiConstant( +TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback( + TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { + auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs, + TVariable<Smi>* var_type_feedback) { + TNode<Number> result = SmiMul(lhs, rhs); + *var_type_feedback = SelectSmiConstant( TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber)); + BinaryOperationFeedback::kNumber); return result; }; - auto floatFunction = [=](Node* lhs, Node* rhs) { + auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) { return Float64Mul(lhs, rhs); }; return Generate_BinaryOperationWithFeedback( - context, lhs, rhs, slot_id, feedback_vector, smiFunction, floatFunction, - Operation::kMultiply, rhs_is_smi); + context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction, + floatFunction, Operation::kMultiply, rhs_known_smi); } -Node* BinaryOpAssembler::Generate_DivideWithFeedback( - Node* context, Node* dividend, Node* divisor, Node* slot_id, - Node* feedback_vector, bool rhs_is_smi) { - auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback( + TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { + auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs, + TVariable<Smi>* var_type_feedback) { + TVARIABLE(Object, var_result); // If rhs is known to be an Smi (for DivSmi) we want to fast path Smi // operation. For the normal Div operation, we want to fast path both // Smi and Number operations, so this path should not be marked as Deferred. - Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred), + Label bailout(this, rhs_known_smi ? Label::kDeferred : Label::kNonDeferred), end(this); - var_result.Bind(TrySmiDiv(CAST(lhs), CAST(rhs), &bailout)); - var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); + var_result = TrySmiDiv(lhs, rhs, &bailout); + *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); Goto(&end); BIND(&bailout); { - var_type_feedback->Bind( - SmiConstant(BinaryOperationFeedback::kSignedSmallInputs)); + *var_type_feedback = + SmiConstant(BinaryOperationFeedback::kSignedSmallInputs); TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs)); - var_result.Bind(AllocateHeapNumberWithValue(value)); + var_result = AllocateHeapNumberWithValue(value); Goto(&end); } BIND(&end); return var_result.value(); }; - auto floatFunction = [=](Node* lhs, Node* rhs) { + auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) { return Float64Div(lhs, rhs); }; return Generate_BinaryOperationWithFeedback( - context, dividend, divisor, slot_id, feedback_vector, smiFunction, - floatFunction, Operation::kDivide, rhs_is_smi); + context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction, + floatFunction, Operation::kDivide, rhs_known_smi); } -Node* BinaryOpAssembler::Generate_ModulusWithFeedback( - Node* context, Node* dividend, Node* divisor, Node* slot_id, - Node* feedback_vector, bool rhs_is_smi) { - auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) { - TNode<Number> result = SmiMod(CAST(lhs), CAST(rhs)); - var_type_feedback->Bind(SelectSmiConstant( +TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback( + TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { + auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs, + TVariable<Smi>* var_type_feedback) { + TNode<Number> result = SmiMod(lhs, rhs); + *var_type_feedback = SelectSmiConstant( TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber)); + BinaryOperationFeedback::kNumber); return result; }; - auto floatFunction = [=](Node* lhs, Node* rhs) { + auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) { return Float64Mod(lhs, rhs); }; return Generate_BinaryOperationWithFeedback( - context, dividend, divisor, slot_id, feedback_vector, smiFunction, - floatFunction, Operation::kModulus, rhs_is_smi); + context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction, + floatFunction, Operation::kModulus, rhs_known_smi); } -Node* BinaryOpAssembler::Generate_ExponentiateWithFeedback( - Node* context, Node* base, Node* exponent, Node* slot_id, - Node* feedback_vector, bool rhs_is_smi) { +TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback( + TNode<Context> context, TNode<Object> base, TNode<Object> exponent, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi) { // We currently don't optimize exponentiation based on feedback. TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny); - UpdateFeedback(dummy_feedback, feedback_vector, slot_id); + UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id); return CallBuiltin(Builtins::kExponentiate, context, base, exponent); } diff --git a/chromium/v8/src/ic/binary-op-assembler.h b/chromium/v8/src/ic/binary-op-assembler.h index 26324660c85..37484909d42 100644 --- a/chromium/v8/src/ic/binary-op-assembler.h +++ b/chromium/v8/src/ic/binary-op-assembler.h @@ -17,44 +17,50 @@ class CodeAssemblerState; class BinaryOpAssembler : public CodeStubAssembler { public: - using Node = compiler::Node; - explicit BinaryOpAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs, - Node* slot_id, Node* feedback_vector, - bool rhs_is_smi); + TNode<Object> Generate_AddWithFeedback( + TNode<Context> context, TNode<Object> left, TNode<Object> right, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); - Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs, - Node* slot_id, Node* feedback_vector, - bool rhs_is_smi); + TNode<Object> Generate_SubtractWithFeedback( + TNode<Context> context, TNode<Object> left, TNode<Object> right, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); - Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs, - Node* slot_id, Node* feedback_vector, - bool rhs_is_smi); + TNode<Object> Generate_MultiplyWithFeedback( + TNode<Context> context, TNode<Object> left, TNode<Object> right, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); - Node* Generate_DivideWithFeedback(Node* context, Node* dividend, - Node* divisor, Node* slot_id, - Node* feedback_vector, bool rhs_is_smi); + TNode<Object> Generate_DivideWithFeedback( + TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); - Node* Generate_ModulusWithFeedback(Node* context, Node* dividend, - Node* divisor, Node* slot_id, - Node* feedback_vector, bool rhs_is_smi); + TNode<Object> Generate_ModulusWithFeedback( + TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); - Node* Generate_ExponentiateWithFeedback(Node* context, Node* dividend, - Node* divisor, Node* slot_id, - Node* feedback_vector, - bool rhs_is_smi); + TNode<Object> Generate_ExponentiateWithFeedback( + TNode<Context> context, TNode<Object> base, TNode<Object> exponent, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); private: - using SmiOperation = std::function<Node*(Node*, Node*, Variable*)>; - using FloatOperation = std::function<Node*(Node*, Node*)>; - - Node* Generate_BinaryOperationWithFeedback( - Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector, + using SmiOperation = + std::function<TNode<Object>(TNode<Smi>, TNode<Smi>, TVariable<Smi>*)>; + using FloatOperation = + std::function<TNode<Float64T>(TNode<Float64T>, TNode<Float64T>)>; + + TNode<Object> Generate_BinaryOperationWithFeedback( + TNode<Context> context, TNode<Object> left, TNode<Object> right, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, const SmiOperation& smiOperation, const FloatOperation& floatOperation, - Operation op, bool rhs_is_smi); + Operation op, bool rhs_known_smi); }; } // namespace internal diff --git a/chromium/v8/src/ic/handler-configuration-inl.h b/chromium/v8/src/ic/handler-configuration-inl.h index c0ff8a4c9b1..95ef3532778 100644 --- a/chromium/v8/src/ic/handler-configuration-inl.h +++ b/chromium/v8/src/ic/handler-configuration-inl.h @@ -43,6 +43,11 @@ Handle<Smi> LoadHandler::LoadInterceptor(Isolate* isolate) { return handle(Smi::FromInt(config), isolate); } +Handle<Smi> LoadHandler::LoadSlow(Isolate* isolate) { + int config = KindBits::encode(kSlow); + return handle(Smi::FromInt(config), isolate); +} + Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) { int config = KindBits::encode(kField) | IsInobjectBits::encode(field_index.is_inobject()) | @@ -127,6 +132,16 @@ Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) { return handle(Smi::FromInt(config), isolate); } +Handle<Smi> StoreHandler::StoreInterceptor(Isolate* isolate) { + int config = KindBits::encode(kInterceptor); + return handle(Smi::FromInt(config), isolate); +} + +Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate) { + int config = KindBits::encode(kSlow); + return handle(Smi::FromInt(config), isolate); +} + Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) { int config = KindBits::encode(kProxy); return handle(Smi::FromInt(config), isolate); @@ -135,29 +150,12 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) { Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind, int descriptor, FieldIndex field_index, Representation representation) { - FieldRepresentation field_rep; - switch (representation.kind()) { - case Representation::kSmi: - field_rep = kSmi; - break; - case Representation::kDouble: - field_rep = kDouble; - break; - case Representation::kHeapObject: - field_rep = kHeapObject; - break; - case Representation::kTagged: - field_rep = kTagged; - break; - default: - UNREACHABLE(); - } - + DCHECK(!representation.IsNone()); DCHECK(kind == kField || kind == kConstField); int config = KindBits::encode(kind) | IsInobjectBits::encode(field_index.is_inobject()) | - FieldRepresentationBits::encode(field_rep) | + RepresentationBits::encode(representation.kind()) | DescriptorBits::encode(descriptor) | FieldIndexBits::encode(field_index.index()); return handle(Smi::FromInt(config), isolate); diff --git a/chromium/v8/src/ic/handler-configuration.cc b/chromium/v8/src/ic/handler-configuration.cc index 814935c6ebe..3af5fe49537 100644 --- a/chromium/v8/src/ic/handler-configuration.cc +++ b/chromium/v8/src/ic/handler-configuration.cc @@ -196,7 +196,7 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate, bool is_dictionary_map = transition_map->is_dictionary_map(); #ifdef DEBUG if (!is_dictionary_map) { - int descriptor = transition_map->LastAdded(); + InternalIndex descriptor = transition_map->LastAdded(); Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(), isolate); PropertyDetails details = descriptors->GetDetails(descriptor); diff --git a/chromium/v8/src/ic/handler-configuration.h b/chromium/v8/src/ic/handler-configuration.h index 80d19d73ecf..fd0cee29206 100644 --- a/chromium/v8/src/ic/handler-configuration.h +++ b/chromium/v8/src/ic/handler-configuration.h @@ -43,6 +43,7 @@ class LoadHandler final : public DataHandler { kApiGetter, kApiGetterHolderIsPrototype, kInterceptor, + kSlow, kProxy, kNonExistent, kModuleExport @@ -113,6 +114,9 @@ class LoadHandler final : public DataHandler { // interceptor. static inline Handle<Smi> LoadInterceptor(Isolate* isolate); + // Creates a Smi-handler for loading a property from a object. + static inline Handle<Smi> LoadSlow(Isolate* isolate); + // Creates a Smi-handler for loading a field from fast object. static inline Handle<Smi> LoadField(Isolate* isolate, FieldIndex field_index); @@ -197,13 +201,13 @@ class StoreHandler final : public DataHandler { kApiSetterHolderIsPrototype, kGlobalProxy, kNormal, + kInterceptor, + kSlow, kProxy, kKindsNumber // Keep last }; using KindBits = BitField<Kind, 0, 4>; - enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged }; - // Applicable to kGlobalProxy, kProxy kinds. // Defines whether access rights check should be done on receiver object. @@ -231,10 +235,10 @@ class StoreHandler final : public DataHandler { // Encoding when KindBits contains kField or kTransitionToField. // using IsInobjectBits = DescriptorBits::Next<bool, 1>; - using FieldRepresentationBits = IsInobjectBits::Next<FieldRepresentation, 2>; + using RepresentationBits = IsInobjectBits::Next<Representation::Kind, 3>; // +1 here is to cover all possible JSObject header sizes. using FieldIndexBits = - FieldRepresentationBits::Next<unsigned, kDescriptorIndexBitCount + 1>; + RepresentationBits::Next<unsigned, kDescriptorIndexBitCount + 1>; // Make sure we don't overflow the smi. STATIC_ASSERT(FieldIndexBits::kLastUsedBit < kSmiValueSize); @@ -283,6 +287,12 @@ class StoreHandler final : public DataHandler { // Creates a Smi-handler for storing a property to a slow object. static inline Handle<Smi> StoreNormal(Isolate* isolate); + // Creates a Smi-handler for storing a property to an interceptor. + static inline Handle<Smi> StoreInterceptor(Isolate* isolate); + + // Creates a Smi-handler for storing a property. + static inline Handle<Smi> StoreSlow(Isolate* isolate); + // Creates a Smi-handler for storing a property on a proxy. static inline Handle<Smi> StoreProxy(Isolate* isolate); diff --git a/chromium/v8/src/ic/ic-stats.cc b/chromium/v8/src/ic/ic-stats.cc index f387239aeee..54d48566310 100644 --- a/chromium/v8/src/ic/ic-stats.cc +++ b/chromium/v8/src/ic/ic-stats.cc @@ -94,6 +94,7 @@ ICInfo::ICInfo() script_offset(0), script_name(nullptr), line_num(-1), + column_num(-1), is_constructor(false), is_optimized(false), map(nullptr), @@ -106,6 +107,7 @@ void ICInfo::Reset() { script_offset = 0; script_name = nullptr; line_num = -1; + column_num = -1; is_constructor = false; is_optimized = false; state.clear(); @@ -127,6 +129,7 @@ void ICInfo::AppendToTracedValue(v8::tracing::TracedValue* value) const { if (script_offset) value->SetInteger("offset", script_offset); if (script_name) value->SetString("scriptName", script_name); if (line_num != -1) value->SetInteger("lineNum", line_num); + if (column_num != -1) value->SetInteger("columnNum", column_num); if (is_constructor) value->SetInteger("constructor", is_constructor); if (!state.empty()) value->SetString("state", state); if (map) { diff --git a/chromium/v8/src/ic/ic-stats.h b/chromium/v8/src/ic/ic-stats.h index 76c65c3862c..44b968c6c0e 100644 --- a/chromium/v8/src/ic/ic-stats.h +++ b/chromium/v8/src/ic/ic-stats.h @@ -34,6 +34,7 @@ struct ICInfo { int script_offset; const char* script_name; int line_num; + int column_num; bool is_constructor; bool is_optimized; std::string state; diff --git a/chromium/v8/src/ic/ic.cc b/chromium/v8/src/ic/ic.cc index 54f4be7a221..4ac5fd7abef 100644 --- a/chromium/v8/src/ic/ic.cc +++ b/chromium/v8/src/ic/ic.cc @@ -15,6 +15,7 @@ #include "src/execution/execution.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" +#include "src/execution/protectors-inl.h" #include "src/execution/runtime-profiler.h" #include "src/handles/handles-inl.h" #include "src/ic/call-optimization.h" @@ -47,8 +48,6 @@ char IC::TransitionMarkFromState(IC::State state) { return 'X'; case UNINITIALIZED: return '0'; - case PREMONOMORPHIC: - return '.'; case MONOMORPHIC: return '1'; case RECOMPUTE_HANDLER: @@ -343,11 +342,6 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) { return changed; } -void IC::ConfigureVectorState(Handle<Map> map) { - nexus()->ConfigurePremonomorphic(map); - OnFeedbackChanged("Premonomorphic"); -} - void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map, Handle<Object> handler) { ConfigureVectorState(name, map, MaybeObjectHandle(handler)); @@ -383,11 +377,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) { // of its properties; throw a TypeError in that case. if (IsAnyHas() ? !object->IsJSReceiver() : object->IsNullOrUndefined(isolate())) { - if (use_ic && state() != PREMONOMORPHIC) { + if (use_ic) { // Ensure the IC state progresses. TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver); update_receiver_map(object); - PatchCache(name, slow_stub()); + SetCache(name, LoadHandler::LoadSlow(isolate())); TraceIC("LoadIC", name); } @@ -490,7 +484,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) { } else { // Given combination of indices can't be encoded, so use slow stub. TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_SlowStub); - PatchCache(name, slow_stub()); + SetCache(name, LoadHandler::LoadSlow(isolate())); } TraceIC("LoadGlobalIC", name); } @@ -613,11 +607,11 @@ bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) { return transitioned_map == target_map; } -void IC::PatchCache(Handle<Name> name, Handle<Object> handler) { - PatchCache(name, MaybeObjectHandle(handler)); +void IC::SetCache(Handle<Name> name, Handle<Object> handler) { + SetCache(name, MaybeObjectHandle(handler)); } -void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) { +void IC::SetCache(Handle<Name> name, const MaybeObjectHandle& handler) { DCHECK(IsHandler(*handler)); // Currently only load and store ICs support non-code handlers. DCHECK(IsAnyLoad() || IsAnyStore() || IsAnyHas()); @@ -625,7 +619,6 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) { case NO_FEEDBACK: UNREACHABLE(); case UNINITIALIZED: - case PREMONOMORPHIC: UpdateMonomorphicIC(handler, name); break; case RECOMPUTE_HANDLER: @@ -659,7 +652,7 @@ __attribute__((__aligned__(32))) void LoadIC::UpdateCaches(LookupIterator* lookup) { Handle<Object> code; if (lookup->state() == LookupIterator::ACCESS_CHECK) { - code = slow_stub(); + code = LoadHandler::LoadSlow(isolate()); } else if (!lookup->IsFound()) { TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH); Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate()); @@ -683,7 +676,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) { code = ComputeHandler(lookup); } - PatchCache(lookup->name(), code); + SetCache(lookup->name(), code); TraceIC("LoadIC", lookup->name()); } @@ -798,7 +791,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) { isolate()); if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) { TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub); - return slow_stub(); + return LoadHandler::LoadSlow(isolate()); } if ((getter->IsFunctionTemplateInfo() && @@ -807,7 +800,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) { JSFunction::cast(*getter).shared().BreakAtEntry())) { // Do not install an IC if the api function has a breakpoint. TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub); - return slow_stub(); + return LoadHandler::LoadSlow(isolate()); } Handle<Smi> smi_handler; @@ -817,7 +810,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) { if (!call_optimization.IsCompatibleReceiverMap(map, holder) || !holder->HasFastProperties()) { TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub); - return slow_stub(); + return LoadHandler::LoadSlow(isolate()); } CallOptimization::HolderLookup holder_lookup; @@ -868,7 +861,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) { !holder->HasFastProperties() || (info->is_sloppy() && !receiver->IsJSReceiver())) { TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub); - return slow_stub(); + return LoadHandler::LoadSlow(isolate()); } Handle<Smi> smi_handler = LoadHandler::LoadNativeDataProperty( @@ -1076,7 +1069,7 @@ bool AllowConvertHoleElementToUndefined(Isolate* isolate, } // For other {receiver}s we need to check the "no elements" protector. - if (isolate->IsNoElementsProtectorIntact()) { + if (Protectors::IsNoElementsIntact(isolate)) { if (receiver_map->IsStringMap()) { return true; } @@ -1315,12 +1308,11 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value, case LookupIterator::INTERCEPTOR: { Handle<JSObject> holder = it->GetHolder<JSObject>(); InterceptorInfo info = holder->GetNamedInterceptor(); - if (it->HolderIsReceiverOrHiddenPrototype()) { - return !info.non_masking() && receiver.is_identical_to(holder) && - !info.setter().IsUndefined(isolate()); - } else if (!info.getter().IsUndefined(isolate()) || - !info.query().IsUndefined(isolate())) { - return false; + if ((it->HolderIsReceiverOrHiddenPrototype() && + !info.non_masking()) || + !info.getter().IsUndefined(isolate()) || + !info.query().IsUndefined(isolate())) { + return true; } break; } @@ -1403,7 +1395,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name, } else { // Given combination of indices can't be encoded, so use slow stub. TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_SlowStub); - PatchCache(name, slow_stub()); + SetCache(name, StoreHandler::StoreSlow(isolate())); } TraceIC("StoreGlobalIC", name); } @@ -1432,11 +1424,11 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name, // If the object is undefined or null it's illegal to try to set any // properties on it; throw a TypeError in that case. if (object->IsNullOrUndefined(isolate())) { - if (use_ic && state() != PREMONOMORPHIC) { + if (use_ic) { // Ensure the IC state progresses. TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver); update_receiver_map(object); - PatchCache(name, slow_stub()); + SetCache(name, StoreHandler::StoreSlow(isolate())); TraceIC("StoreIC", name); } return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name); @@ -1481,30 +1473,11 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value, } handler = ComputeHandler(lookup); } else { - if (state() == UNINITIALIZED && IsStoreGlobalIC() && - lookup->state() == LookupIterator::INTERCEPTOR) { - InterceptorInfo info = - lookup->GetHolder<JSObject>()->GetNamedInterceptor(); - if (!lookup->HolderIsReceiverOrHiddenPrototype() && - !info.getter().IsUndefined(isolate())) { - // Utilize premonomorphic state for global store ics that run into - // an interceptor because the property doesn't exist yet. - // After we actually set the property, we'll have more information. - // Premonomorphism gives us a chance to find more information the - // second time. - TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_Premonomorphic); - ConfigureVectorState(receiver_map()); - TraceIC("StoreGlobalIC", lookup->name()); - return; - } - } - set_slow_stub_reason("LookupForWrite said 'false'"); - // TODO(marja): change slow_stub to return MaybeObjectHandle. - handler = MaybeObjectHandle(slow_stub()); + handler = MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } - PatchCache(lookup->name(), handler); + SetCache(lookup->name(), handler); TraceIC("StoreIC", lookup->name()); } @@ -1542,12 +1515,27 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { case LookupIterator::INTERCEPTOR: { Handle<JSObject> holder = lookup->GetHolder<JSObject>(); - USE(holder); + InterceptorInfo info = holder->GetNamedInterceptor(); + + // If the interceptor is on the receiver + if (lookup->HolderIsReceiverOrHiddenPrototype() && !info.non_masking()) { + // return a store interceptor smi handler if there is one, + if (!info.setter().IsUndefined(isolate())) { + return MaybeObjectHandle(StoreHandler::StoreInterceptor(isolate())); + } + // otherwise return a slow-case smi handler. + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); + } - DCHECK(!holder->GetNamedInterceptor().setter().IsUndefined(isolate())); - // TODO(jgruber): Update counter name. - TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub); - return MaybeObjectHandle(BUILTIN_CODE(isolate(), StoreInterceptorIC)); + // If the interceptor is a getter/query interceptor on the prototype + // chain, return an invalidatable slow handler so it can turn fast if the + // interceptor is masked by a regular property later. + DCHECK(!info.getter().IsUndefined(isolate()) || + !info.query().IsUndefined(isolate())); + Handle<Object> handler = StoreHandler::StoreThroughPrototype( + isolate(), receiver_map(), holder, + StoreHandler::StoreSlow(isolate())); + return MaybeObjectHandle(handler); } case LookupIterator::ACCESSOR: { @@ -1559,7 +1547,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { if (!holder->HasFastProperties()) { set_slow_stub_reason("accessor on slow map"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + MaybeObjectHandle handler = + MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); + return handler; } Handle<Object> accessors = lookup->GetAccessors(); if (accessors->IsAccessorInfo()) { @@ -1567,18 +1557,18 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { if (v8::ToCData<Address>(info->setter()) == kNullAddress) { set_slow_stub_reason("setter == kNullAddress"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } if (AccessorInfo::cast(*accessors).is_special_data_property() && !lookup->HolderIsReceiverOrHiddenPrototype()) { set_slow_stub_reason("special data property in prototype chain"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } if (!AccessorInfo::IsCompatibleReceiverMap(info, receiver_map())) { set_slow_stub_reason("incompatible receiver type"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } Handle<Smi> smi_handler = StoreHandler::StoreNativeDataProperty( @@ -1598,7 +1588,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) { set_slow_stub_reason("setter not a function"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } if ((setter->IsFunctionTemplateInfo() && @@ -1607,7 +1597,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { JSFunction::cast(*setter).shared().BreakAtEntry())) { // Do not install an IC if the api function has a breakpoint. TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } CallOptimization call_optimization(isolate(), setter); @@ -1631,11 +1621,11 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { } set_slow_stub_reason("incompatible receiver"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } else if (setter->IsFunctionTemplateInfo()) { set_slow_stub_reason("setter non-simple template"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } Handle<Smi> smi_handler = @@ -1651,7 +1641,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { isolate(), receiver_map(), holder, smi_handler)); } TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } case LookupIterator::DATA: { @@ -1694,7 +1684,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) { DCHECK_EQ(kDescriptor, lookup->property_details().location()); set_slow_stub_reason("constant property"); TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub); - return MaybeObjectHandle(slow_stub()); + return MaybeObjectHandle(StoreHandler::StoreSlow(isolate())); } case LookupIterator::JSPROXY: { Handle<JSReceiver> receiver = @@ -1905,7 +1895,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers( // TODO(mvstanton): Consider embedding store_mode in the state of the slow // keyed store ic for uniformity. TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub); - handler = slow_stub(); + handler = StoreHandler::StoreSlow(isolate()); } else { { @@ -2532,7 +2522,7 @@ static bool CanFastCloneObject(Handle<Map> map) { } DescriptorArray descriptors = map->instance_descriptors(); - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(i); Name key = descriptors.GetKey(i); if (details.kind() != kData || !details.IsEnumerable() || diff --git a/chromium/v8/src/ic/ic.h b/chromium/v8/src/ic/ic.h index 29f3b4a60a2..a3c68f4fbf9 100644 --- a/chromium/v8/src/ic/ic.h +++ b/chromium/v8/src/ic/ic.h @@ -74,8 +74,6 @@ class IC { // Configure for most states. bool ConfigureVectorState(IC::State new_state, Handle<Object> key); - // Configure the vector for PREMONOMORPHIC. - void ConfigureVectorState(Handle<Map> map); // Configure the vector for MONOMORPHIC. void ConfigureVectorState(Handle<Name> name, Handle<Map> map, Handle<Object> handler); @@ -103,8 +101,8 @@ class IC { void CopyICToMegamorphicCache(Handle<Name> name); bool IsTransitionOfMonomorphicTarget(Map source_map, Map target_map); - void PatchCache(Handle<Name> name, Handle<Object> handler); - void PatchCache(Handle<Name> name, const MaybeObjectHandle& handler); + void SetCache(Handle<Name> name, Handle<Object> handler); + void SetCache(Handle<Name> name, const MaybeObjectHandle& handler); FeedbackSlotKind kind() const { return kind_; } bool IsGlobalIC() const { return IsLoadGlobalIC() || IsStoreGlobalIC(); } bool IsLoadIC() const { return IsLoadICKind(kind_); } @@ -188,11 +186,6 @@ class LoadIC : public IC { Handle<Name> name); protected: - virtual Handle<Code> slow_stub() const { - return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIC_Slow) - : BUILTIN_CODE(isolate(), LoadIC_Slow); - } - // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupIterator* lookup); @@ -211,11 +204,6 @@ class LoadGlobalIC : public LoadIC { : LoadIC(isolate, vector, slot, kind) {} V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Name> name); - - protected: - Handle<Code> slow_stub() const override { - return BUILTIN_CODE(isolate(), LoadGlobalIC_Slow); - } }; class KeyedLoadIC : public LoadIC { @@ -268,11 +256,6 @@ class StoreIC : public IC { protected: // Stub accessors. - virtual Handle<Code> slow_stub() const { - // All StoreICs share the same slow stub. - return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow); - } - // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupIterator* lookup, Handle<Object> value, @@ -292,11 +275,6 @@ class StoreGlobalIC : public StoreIC { V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Name> name, Handle<Object> value); - - protected: - Handle<Code> slow_stub() const override { - return BUILTIN_CODE(isolate(), StoreGlobalIC_Slow); - } }; enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap }; @@ -328,10 +306,6 @@ class KeyedStoreIC : public StoreIC { KeyedAccessStoreMode store_mode, Handle<Map> new_receiver_map); - Handle<Code> slow_stub() const override { - return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow); - } - private: Handle<Map> ComputeTransitionedMap(Handle<Map> map, TransitionMode transition_mode); @@ -356,11 +330,6 @@ class StoreInArrayLiteralIC : public KeyedStoreIC { } void Store(Handle<JSArray> array, Handle<Object> index, Handle<Object> value); - - private: - Handle<Code> slow_stub() const override { - return BUILTIN_CODE(isolate(), StoreInArrayLiteralIC_Slow); - } }; } // namespace internal diff --git a/chromium/v8/src/ic/keyed-store-generic.cc b/chromium/v8/src/ic/keyed-store-generic.cc index bb4e6cb4278..ff830a022e5 100644 --- a/chromium/v8/src/ic/keyed-store-generic.cc +++ b/chromium/v8/src/ic/keyed-store-generic.cc @@ -16,10 +16,6 @@ namespace v8 { namespace internal { -using Node = compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; - enum class StoreMode { kOrdinary, kInLiteral }; class KeyedStoreGenericAssembler : public AccessorAssembler { @@ -62,9 +58,11 @@ class KeyedStoreGenericAssembler : public AccessorAssembler { TNode<Object> key, TNode<Object> value, Maybe<LanguageMode> language_mode); - void EmitGenericElementStore(Node* receiver, TNode<Map> receiver_map, - Node* instance_type, TNode<IntPtrT> index, - Node* value, Node* context, Label* slow); + void EmitGenericElementStore(TNode<JSObject> receiver, + TNode<Map> receiver_map, + TNode<Uint16T> instance_type, + TNode<IntPtrT> index, TNode<Object> value, + TNode<Context> context, Label* slow); // If language mode is not provided it is deduced from the feedback slot's // kind. @@ -86,38 +84,46 @@ class KeyedStoreGenericAssembler : public AccessorAssembler { Label* non_fast_elements, Label* only_fast_elements); - void TryRewriteElements(Node* receiver, TNode<Map> receiver_map, - Node* elements, Node* native_context, + void TryRewriteElements(TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<FixedArrayBase> elements, + TNode<NativeContext> native_context, ElementsKind from_kind, ElementsKind to_kind, Label* bailout); - void StoreElementWithCapacity(Node* receiver, TNode<Map> receiver_map, + void StoreElementWithCapacity(TNode<JSObject> receiver, + TNode<Map> receiver_map, SloppyTNode<FixedArrayBase> elements, TNode<Word32T> elements_kind, - TNode<IntPtrT> index, Node* value, - Node* context, Label* slow, + TNode<IntPtrT> index, SloppyTNode<Object> value, + TNode<Context> context, Label* slow, UpdateLength update_length); - void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value, + void MaybeUpdateLengthAndReturn(TNode<JSObject> receiver, + TNode<IntPtrT> index, TNode<Object> value, UpdateLength update_length); - void TryChangeToHoleyMapHelper(Node* receiver, TNode<Map> receiver_map, - Node* native_context, ElementsKind packed_kind, + void TryChangeToHoleyMapHelper(TNode<JSObject> receiver, + TNode<Map> receiver_map, + TNode<NativeContext> native_context, + ElementsKind packed_kind, ElementsKind holey_kind, Label* done, Label* map_mismatch, Label* bailout); - void TryChangeToHoleyMap(Node* receiver, TNode<Map> receiver_map, - TNode<Word32T> current_elements_kind, Node* context, - ElementsKind packed_kind, Label* bailout); - void TryChangeToHoleyMapMulti(Node* receiver, TNode<Map> receiver_map, + void TryChangeToHoleyMap(TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<Word32T> current_elements_kind, + TNode<Context> context, ElementsKind packed_kind, + Label* bailout); + void TryChangeToHoleyMapMulti(TNode<JSObject> receiver, + TNode<Map> receiver_map, TNode<Word32T> current_elements_kind, - Node* context, ElementsKind packed_kind, + TNode<Context> context, + ElementsKind packed_kind, ElementsKind packed_kind_2, Label* bailout); - void LookupPropertyOnPrototypeChain(TNode<Map> receiver_map, Node* name, - Label* accessor, - Variable* var_accessor_pair, - Variable* var_accessor_holder, - Label* readonly, Label* bailout); + void LookupPropertyOnPrototypeChain( + TNode<Map> receiver_map, TNode<Name> name, Label* accessor, + TVariable<Object>* var_accessor_pair, + TVariable<HeapObject>* var_accessor_holder, Label* readonly, + Label* bailout); TNode<Map> FindCandidateStoreICTransitionMapHandler(TNode<Map> map, TNode<Name> name, @@ -173,18 +179,18 @@ void KeyedStoreGenericGenerator::SetPropertyInLiteral( void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements( TNode<Map> receiver_map, Label* non_fast_elements, Label* only_fast_elements) { - VARIABLE(var_map, MachineRepresentation::kTagged); - var_map.Bind(receiver_map); + TVARIABLE(Map, var_map); + var_map = receiver_map; Label loop_body(this, &var_map); Goto(&loop_body); BIND(&loop_body); { - Node* map = var_map.value(); + TNode<Map> map = var_map.value(); TNode<HeapObject> prototype = LoadMapPrototype(map); GotoIf(IsNull(prototype), only_fast_elements); TNode<Map> prototype_map = LoadMap(prototype); - var_map.Bind(prototype_map); + var_map = prototype_map; TNode<Uint16T> instance_type = LoadMapInstanceType(prototype_map); GotoIf(IsCustomElementsReceiverInstanceType(instance_type), non_fast_elements); @@ -196,9 +202,9 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements( } void KeyedStoreGenericAssembler::TryRewriteElements( - Node* receiver, TNode<Map> receiver_map, Node* elements, - Node* native_context, ElementsKind from_kind, ElementsKind to_kind, - Label* bailout) { + TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<FixedArrayBase> elements, TNode<NativeContext> native_context, + ElementsKind from_kind, ElementsKind to_kind, Label* bailout) { DCHECK(IsFastPackedElementsKind(from_kind)); ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind); ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind); @@ -206,12 +212,12 @@ void KeyedStoreGenericAssembler::TryRewriteElements( TrapAllocationMemento(receiver, bailout); } Label perform_transition(this), check_holey_map(this); - VARIABLE(var_target_map, MachineRepresentation::kTagged); + TVARIABLE(Map, var_target_map); // Check if the receiver has the default |from_kind| map. { TNode<Map> packed_map = LoadJSArrayElementsMap(from_kind, native_context); GotoIf(TaggedNotEqual(receiver_map, packed_map), &check_holey_map); - var_target_map.Bind( + var_target_map = CAST( LoadContextElement(native_context, Context::ArrayMapIndex(to_kind))); Goto(&perform_transition); } @@ -222,7 +228,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements( TNode<Object> holey_map = LoadContextElement( native_context, Context::ArrayMapIndex(holey_from_kind)); GotoIf(TaggedNotEqual(receiver_map, holey_map), bailout); - var_target_map.Bind(LoadContextElement( + var_target_map = CAST(LoadContextElement( native_context, Context::ArrayMapIndex(holey_to_kind))); Goto(&perform_transition); } @@ -240,9 +246,9 @@ void KeyedStoreGenericAssembler::TryRewriteElements( } void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper( - Node* receiver, TNode<Map> receiver_map, Node* native_context, - ElementsKind packed_kind, ElementsKind holey_kind, Label* done, - Label* map_mismatch, Label* bailout) { + TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<NativeContext> native_context, ElementsKind packed_kind, + ElementsKind holey_kind, Label* done, Label* map_mismatch, Label* bailout) { TNode<Map> packed_map = LoadJSArrayElementsMap(packed_kind, native_context); GotoIf(TaggedNotEqual(receiver_map, packed_map), map_mismatch); if (AllocationSite::ShouldTrack(packed_kind, holey_kind)) { @@ -255,23 +261,23 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper( } void KeyedStoreGenericAssembler::TryChangeToHoleyMap( - Node* receiver, TNode<Map> receiver_map, - TNode<Word32T> current_elements_kind, Node* context, + TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<Word32T> current_elements_kind, TNode<Context> context, ElementsKind packed_kind, Label* bailout) { ElementsKind holey_kind = GetHoleyElementsKind(packed_kind); Label already_holey(this); GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)), &already_holey); - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind, holey_kind, &already_holey, bailout, bailout); BIND(&already_holey); } void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti( - Node* receiver, TNode<Map> receiver_map, - TNode<Word32T> current_elements_kind, Node* context, + TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<Word32T> current_elements_kind, TNode<Context> context, ElementsKind packed_kind, ElementsKind packed_kind_2, Label* bailout) { ElementsKind holey_kind = GetHoleyElementsKind(packed_kind); ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2); @@ -282,7 +288,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti( GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)), &already_holey); - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind, holey_kind, &already_holey, &check_other_kind, bailout); @@ -294,7 +300,8 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti( } void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn( - Node* receiver, Node* index, Node* value, UpdateLength update_length) { + TNode<JSObject> receiver, TNode<IntPtrT> index, TNode<Object> value, + UpdateLength update_length) { if (update_length != kDontChangeLength) { TNode<Smi> new_length = SmiTag(Signed(IntPtrAdd(index, IntPtrConstant(1)))); StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length, @@ -304,10 +311,10 @@ void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn( } void KeyedStoreGenericAssembler::StoreElementWithCapacity( - Node* receiver, TNode<Map> receiver_map, + TNode<JSObject> receiver, TNode<Map> receiver_map, SloppyTNode<FixedArrayBase> elements, TNode<Word32T> elements_kind, - TNode<IntPtrT> index, Node* value, Node* context, Label* slow, - UpdateLength update_length) { + TNode<IntPtrT> index, SloppyTNode<Object> value, TNode<Context> context, + Label* slow, UpdateLength update_length) { if (update_length != kDontChangeLength) { CSA_ASSERT(this, InstanceTypeEqual(LoadMapInstanceType(receiver_map), JS_ARRAY_TYPE)); @@ -331,8 +338,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( // FixedArray backing store -> Smi or object elements. { - TNode<IntPtrT> offset = ElementOffsetFromIndex( - index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize); + TNode<IntPtrT> offset = + ElementOffsetFromIndex(index, PACKED_ELEMENTS, kHeaderSize); // Check if we're about to overwrite the hole. We can safely do that // only if there can be no setters on the prototype chain. // If we know that we're storing beyond the previous array length, we @@ -387,8 +394,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( // Transition to the required ElementsKind. { Label transition_to_double(this), transition_to_object(this); - TNode<Context> native_context = LoadNativeContext(context); - Branch(TaggedEqual(LoadMap(value), HeapNumberMapConstant()), + TNode<NativeContext> native_context = LoadNativeContext(context); + Branch(TaggedEqual(LoadMap(CAST(value)), HeapNumberMapConstant()), &transition_to_double, &transition_to_object); BIND(&transition_to_double); { @@ -401,11 +408,11 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( PACKED_SMI_ELEMENTS, target_kind, slow); // Reload migrated elements. TNode<FixedArrayBase> double_elements = LoadElements(receiver); - TNode<IntPtrT> double_offset = ElementOffsetFromIndex( - index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize); + TNode<IntPtrT> double_offset = + ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize); // Make sure we do not store signalling NaNs into double arrays. TNode<Float64T> double_value = - Float64SilenceNaN(LoadHeapNumberValue(value)); + Float64SilenceNaN(LoadHeapNumberValue(CAST(value))); StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements, double_offset, double_value); MaybeUpdateLengthAndReturn(receiver, index, value, update_length); @@ -434,8 +441,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( &check_cow_elements); // FixedDoubleArray backing store -> double elements. { - TNode<IntPtrT> offset = ElementOffsetFromIndex( - index, PACKED_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize); + TNode<IntPtrT> offset = + ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize); // Check if we're about to overwrite the hole. We can safely do that // only if there can be no setters on the prototype chain. { @@ -457,7 +464,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( // Try to store the value as a double. { Label non_number_value(this); - Node* double_value = TryTaggedToFloat64(value, &non_number_value); + TNode<Float64T> double_value = + TryTaggedToFloat64(value, &non_number_value); // Make sure we do not store signalling NaNs into double arrays. double_value = Float64SilenceNaN(double_value); @@ -475,7 +483,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( // Transition to object elements. { - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); ElementsKind target_kind = update_length == kBumpLengthWithGap ? HOLEY_ELEMENTS : PACKED_ELEMENTS; @@ -483,8 +491,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( PACKED_DOUBLE_ELEMENTS, target_kind, slow); // Reload migrated elements. TNode<FixedArrayBase> fast_elements = LoadElements(receiver); - TNode<IntPtrT> fast_offset = ElementOffsetFromIndex( - index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize); + TNode<IntPtrT> fast_offset = + ElementOffsetFromIndex(index, PACKED_ELEMENTS, kHeaderSize); Store(fast_elements, fast_offset, value); MaybeUpdateLengthAndReturn(receiver, index, value, update_length); } @@ -498,8 +506,9 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity( } void KeyedStoreGenericAssembler::EmitGenericElementStore( - Node* receiver, TNode<Map> receiver_map, Node* instance_type, - TNode<IntPtrT> index, Node* value, Node* context, Label* slow) { + TNode<JSObject> receiver, TNode<Map> receiver_map, + TNode<Uint16T> instance_type, TNode<IntPtrT> index, TNode<Object> value, + TNode<Context> context, Label* slow) { Label if_fast(this), if_in_bounds(this), if_out_of_bounds(this), if_increment_length_by_one(this), if_bump_length_with_gap(this), if_grow(this), if_nonfast(this), if_typed_array(this), @@ -517,7 +526,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore( } BIND(&if_array); { - TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(receiver)); + TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(CAST(receiver))); GotoIf(UintPtrLessThan(index, length), &if_in_bounds); TNode<IntPtrT> capacity = SmiUntag(LoadFixedArrayBaseLength(elements)); GotoIf(UintPtrGreaterThanOrEqual(index, capacity), &if_grow); @@ -595,32 +604,32 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore( } void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain( - TNode<Map> receiver_map, Node* name, Label* accessor, - Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly, + TNode<Map> receiver_map, TNode<Name> name, Label* accessor, + TVariable<Object>* var_accessor_pair, + TVariable<HeapObject>* var_accessor_holder, Label* readonly, Label* bailout) { Label ok_to_write(this); - VARIABLE(var_holder, MachineRepresentation::kTagged); - var_holder.Bind(LoadMapPrototype(receiver_map)); - VARIABLE(var_holder_map, MachineRepresentation::kTagged); - var_holder_map.Bind(LoadMap(var_holder.value())); + TVARIABLE(HeapObject, var_holder); + TVARIABLE(Map, var_holder_map); + var_holder = LoadMapPrototype(receiver_map); + var_holder_map = LoadMap(var_holder.value()); - Variable* merged_variables[] = {&var_holder, &var_holder_map}; - Label loop(this, arraysize(merged_variables), merged_variables); + Label loop(this, {&var_holder, &var_holder_map}); Goto(&loop); BIND(&loop); { - Node* holder = var_holder.value(); + TNode<HeapObject> holder = var_holder.value(); GotoIf(IsNull(holder), &ok_to_write); - Node* holder_map = var_holder_map.value(); + TNode<Map> holder_map = var_holder_map.value(); TNode<Uint16T> instance_type = LoadMapInstanceType(holder_map); Label next_proto(this); { Label found(this), found_fast(this), found_dict(this), found_global(this); TVARIABLE(HeapObject, var_meta_storage); TVARIABLE(IntPtrT, var_entry); - TryLookupProperty(holder, holder_map, instance_type, name, &found_fast, - &found_dict, &found_global, &var_meta_storage, - &var_entry, &next_proto, bailout); + TryLookupProperty(CAST(holder), holder_map, instance_type, name, + &found_fast, &found_dict, &found_global, + &var_meta_storage, &var_entry, &next_proto, bailout); BIND(&found_fast); { TNode<DescriptorArray> descriptors = CAST(var_meta_storage.value()); @@ -631,10 +640,10 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain( // Accessor case. // TODO(jkummerow): Implement a trimmed-down // LoadAccessorFromFastObject. - VARIABLE(var_details, MachineRepresentation::kWord32); + TVARIABLE(Uint32T, var_details); LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index, &var_details, var_accessor_pair); - var_accessor_holder->Bind(holder); + *var_accessor_holder = holder; Goto(accessor); } @@ -648,9 +657,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain( if (accessor != nullptr) { // Accessor case. - var_accessor_pair->Bind( - LoadValueByKeyIndex<NameDictionary>(dictionary, entry)); - var_accessor_holder->Bind(holder); + *var_accessor_pair = + LoadValueByKeyIndex<NameDictionary>(dictionary, entry); + *var_accessor_holder = holder; Goto(accessor); } else { Goto(&ok_to_write); @@ -666,14 +675,14 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain( TNode<Object> value = LoadObjectField(property_cell, PropertyCell::kValueOffset); GotoIf(TaggedEqual(value, TheHoleConstant()), &next_proto); - TNode<Int32T> details = LoadAndUntagToWord32ObjectField( - property_cell, PropertyCell::kPropertyDetailsRawOffset); + TNode<Uint32T> details = Unsigned(LoadAndUntagToWord32ObjectField( + property_cell, PropertyCell::kPropertyDetailsRawOffset)); JumpIfDataProperty(details, &ok_to_write, readonly); if (accessor != nullptr) { // Accessor case. - var_accessor_pair->Bind(value); - var_accessor_holder->Bind(holder); + *var_accessor_pair = value; + *var_accessor_holder = holder; Goto(accessor); } else { Goto(&ok_to_write); @@ -686,8 +695,8 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain( GotoIf(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), bailout); TNode<HeapObject> proto = LoadMapPrototype(holder_map); GotoIf(IsNull(proto), &ok_to_write); - var_holder.Bind(proto); - var_holder_map.Bind(LoadMap(proto)); + var_holder = proto; + var_holder_map = LoadMap(proto); Goto(&loop); } BIND(&ok_to_write); @@ -763,8 +772,10 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( const StoreICParameters* p, ExitPoint* exit_point, Label* slow, Maybe<LanguageMode> maybe_language_mode) { CSA_ASSERT(this, IsSimpleObjectMap(receiver_map)); - VARIABLE(var_accessor_pair, MachineRepresentation::kTagged); - VARIABLE(var_accessor_holder, MachineRepresentation::kTagged); + // TODO(rmcilroy) Type as Struct once we use a trimmed down + // LoadAccessorFromFastObject instead of LoadPropertyFromFastObject. + TVARIABLE(Object, var_accessor_pair); + TVARIABLE(HeapObject, var_accessor_holder); Label fast_properties(this), dictionary_properties(this), accessor(this), readonly(this); TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map); @@ -792,11 +803,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( if (ShouldCallSetter()) { // Accessor case. // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject. - VARIABLE(var_details, MachineRepresentation::kWord32); + TVARIABLE(Uint32T, var_details); LoadPropertyFromFastObject(receiver, receiver_map, descriptors, name_index, &var_details, &var_accessor_pair); - var_accessor_holder.Bind(receiver); + var_accessor_holder = receiver; Goto(&accessor); } else { // Handle accessor to data property reconfiguration in runtime. @@ -836,7 +847,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( TVARIABLE(IntPtrT, var_name_index); Label dictionary_found(this, &var_name_index), not_found(this); - TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver))); + TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver)); NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found, &var_name_index, ¬_found); BIND(&dictionary_found); @@ -849,9 +860,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( if (ShouldCallSetter()) { // Accessor case. - var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>( - properties, var_name_index.value())); - var_accessor_holder.Bind(receiver); + var_accessor_pair = LoadValueByKeyIndex<NameDictionary>( + properties, var_name_index.value()); + var_accessor_holder = receiver; Goto(&accessor); } else { // We must reconfigure an accessor property to a data property @@ -870,6 +881,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( BIND(¬_found); { + // TODO(jkummerow): Also add support to correctly handle integer exotic + // cases for typed arrays and remove this check here. + GotoIf(InstanceTypeEqual(LoadMapInstanceType(receiver_map), + JS_TYPED_ARRAY_TYPE), + slow); CheckForAssociatedProtector(name, slow); Label extensible(this), is_private_symbol(this); TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map); @@ -909,7 +925,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( BIND(&accessor); { Label not_callable(this); - Node* accessor_pair = var_accessor_pair.value(); + TNode<Struct> accessor_pair = CAST(var_accessor_pair.value()); GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow); CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE)); TNode<HeapObject> setter = @@ -951,7 +967,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( LanguageMode language_mode; if (maybe_language_mode.To(&language_mode)) { if (language_mode == LanguageMode::kStrict) { - Node* type = Typeof(p->receiver()); + TNode<String> type = Typeof(p->receiver()); ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty, name, type, p->receiver()); } else { @@ -969,15 +985,16 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore( // Helper that is used by the public KeyedStoreGeneric and by SetProperty. void KeyedStoreGenericAssembler::KeyedStoreGeneric( - TNode<Context> context, TNode<Object> receiver, TNode<Object> key, + TNode<Context> context, TNode<Object> receiver_maybe_smi, TNode<Object> key, TNode<Object> value, Maybe<LanguageMode> language_mode) { TVARIABLE(IntPtrT, var_index); - TVARIABLE(Object, var_unique, key); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), not_internalized(this), slow(this); - GotoIf(TaggedIsSmi(receiver), &slow); - TNode<Map> receiver_map = LoadMap(CAST(receiver)); + GotoIf(TaggedIsSmi(receiver_maybe_smi), &slow); + TNode<HeapObject> receiver = CAST(receiver_maybe_smi); + TNode<Map> receiver_map = LoadMap(receiver); TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); // Receivers requiring non-standard element accesses (interceptors, access // checks, strings and string wrappers, proxies) are handled in the runtime. @@ -989,14 +1006,14 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric( BIND(&if_index); { Comment("integer index"); - EmitGenericElementStore(receiver, receiver_map, instance_type, + EmitGenericElementStore(CAST(receiver), receiver_map, instance_type, var_index.value(), value, context, &slow); } BIND(&if_unique_name); { Comment("key is unique name"); - StoreICParameters p(context, receiver, var_unique.value(), value, nullptr, + StoreICParameters p(context, receiver, var_unique.value(), value, {}, nullptr); ExitPoint direct_exit(this); EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &direct_exit, @@ -1006,7 +1023,7 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric( BIND(¬_internalized); { if (FLAG_internalize_on_the_fly) { - TryInternalizeString(key, &if_index, &var_index, &if_unique_name, + TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, &slow, &slow); } else { Goto(&slow); @@ -1049,30 +1066,34 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context, void KeyedStoreGenericAssembler::StoreIC_NoFeedback() { using Descriptor = StoreDescriptor; - Node* receiver = Parameter(Descriptor::kReceiver); + TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver)); TNode<Object> name = CAST(Parameter(Descriptor::kName)); - Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> value = CAST(Parameter(Descriptor::kValue)); + TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Label miss(this, Label::kDeferred), store_property(this); - GotoIf(TaggedIsSmi(receiver), &miss); - TNode<Map> receiver_map = LoadMap(receiver); - TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); - // Receivers requiring non-standard element accesses (interceptors, access - // checks, strings and string wrappers, proxies) are handled in the runtime. - GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss); + GotoIf(TaggedIsSmi(receiver_maybe_smi), &miss); + { - StoreICParameters p(CAST(context), receiver, name, value, slot, - UndefinedConstant()); - EmitGenericPropertyStore(receiver, receiver_map, &p, &miss); + TNode<HeapObject> receiver = CAST(receiver_maybe_smi); + TNode<Map> receiver_map = LoadMap(receiver); + TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map); + // Receivers requiring non-standard element accesses (interceptors, access + // checks, strings and string wrappers, proxies) are handled in the runtime. + GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss); + { + StoreICParameters p(context, receiver, name, value, slot, + UndefinedConstant()); + EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &miss); + } } BIND(&miss); { TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, - UndefinedConstant(), receiver, name); + UndefinedConstant(), receiver_maybe_smi, name); } } @@ -1082,7 +1103,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context, TNode<Name> unique_name, TNode<Object> value, LanguageMode language_mode) { - StoreICParameters p(context, receiver, unique_name, value, nullptr, nullptr); + StoreICParameters p(context, receiver, unique_name, value, {}, nullptr); Label done(this), slow(this, Label::kDeferred); ExitPoint exit_point(this, [&](Node* result) { Goto(&done); }); diff --git a/chromium/v8/src/ic/keyed-store-generic.h b/chromium/v8/src/ic/keyed-store-generic.h index efee0da80e6..8047fe64934 100644 --- a/chromium/v8/src/ic/keyed-store-generic.h +++ b/chromium/v8/src/ic/keyed-store-generic.h @@ -13,9 +13,6 @@ namespace internal { class KeyedStoreGenericGenerator { public: - template <class T> - using TNode = compiler::TNode<T>; - static void Generate(compiler::CodeAssemblerState* state); // Building block for fast path of Object.assign implementation. diff --git a/chromium/v8/src/ic/stub-cache.cc b/chromium/v8/src/ic/stub-cache.cc index 04381bf693c..c1d9aea3748 100644 --- a/chromium/v8/src/ic/stub-cache.cc +++ b/chromium/v8/src/ic/stub-cache.cc @@ -26,11 +26,10 @@ void StubCache::Initialize() { Clear(); } -// Hash algorithm for the primary table. This algorithm is replicated in -// assembler for every architecture. Returns an index into the table that +// Hash algorithm for the primary table. This algorithm is replicated in +// the AccessorAssembler. Returns an index into the table that // is scaled by 1 << kCacheIndexShift. int StubCache::PrimaryOffset(Name name, Map map) { - STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); // Compute the hash of the name (use entire hash field). DCHECK(name.HasHashCode()); uint32_t field = name.hash_field(); diff --git a/chromium/v8/src/ic/stub-cache.h b/chromium/v8/src/ic/stub-cache.h index 87acc0e007e..dc3317588db 100644 --- a/chromium/v8/src/ic/stub-cache.h +++ b/chromium/v8/src/ic/stub-cache.h @@ -78,10 +78,15 @@ class V8_EXPORT_PRIVATE StubCache { Isolate* isolate() { return isolate_; } - // Setting the entry size such that the index is shifted by Name::kHashShift - // is convenient; shifting down the length field (to extract the hash code) - // automatically discards the hash bit field. - static const int kCacheIndexShift = Name::kHashShift; + // Ideally we would set kCacheIndexShift to Name::kHashShift, such that + // the bit field inside the hash field gets shifted out implicitly. However, + // sizeof(Entry) needs to be a multiple of 1 << kCacheIndexShift, and it + // isn't clear whether letting one bit of the bit field leak into the index + // computation is bad enough to warrant an additional shift to get rid of it. + static const int kCacheIndexShift = 2; + // The purpose of the static assert is to make us reconsider this choice + // if the bit field ever grows even more. + STATIC_ASSERT(kCacheIndexShift == Name::kHashShift - 1); static const int kPrimaryTableBits = 11; static const int kPrimaryTableSize = (1 << kPrimaryTableBits); @@ -125,7 +130,10 @@ class V8_EXPORT_PRIVATE StubCache { // of sizeof(Entry). This makes it easier to avoid making mistakes // in the hashed offset computations. static Entry* entry(Entry* table, int offset) { - const int multiplier = sizeof(*table) >> Name::kHashShift; + // The size of {Entry} must be a multiple of 1 << kCacheIndexShift. + STATIC_ASSERT((sizeof(*table) >> kCacheIndexShift) << kCacheIndexShift == + sizeof(*table)); + const int multiplier = sizeof(*table) >> kCacheIndexShift; return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) + offset * multiplier); } diff --git a/chromium/v8/src/init/bootstrapper.cc b/chromium/v8/src/init/bootstrapper.cc index f7e25ca0bbc..148c60d89d3 100644 --- a/chromium/v8/src/init/bootstrapper.cc +++ b/chromium/v8/src/init/bootstrapper.cc @@ -12,6 +12,7 @@ #include "src/debug/debug.h" #include "src/execution/isolate-inl.h" #include "src/execution/microtask-queue.h" +#include "src/execution/protectors.h" #include "src/extensions/cputracemark-extension.h" #include "src/extensions/externalize-string-extension.h" #include "src/extensions/free-buffer-extension.h" @@ -130,15 +131,15 @@ static bool isValidCpuTraceMarkFunctionName() { } void Bootstrapper::InitializeOncePerProcess() { - v8::RegisterExtension(v8::base::make_unique<FreeBufferExtension>()); - v8::RegisterExtension(v8::base::make_unique<GCExtension>(GCFunctionName())); - v8::RegisterExtension(v8::base::make_unique<ExternalizeStringExtension>()); - v8::RegisterExtension(v8::base::make_unique<StatisticsExtension>()); - v8::RegisterExtension(v8::base::make_unique<TriggerFailureExtension>()); - v8::RegisterExtension(v8::base::make_unique<IgnitionStatisticsExtension>()); + v8::RegisterExtension(std::make_unique<FreeBufferExtension>()); + v8::RegisterExtension(std::make_unique<GCExtension>(GCFunctionName())); + v8::RegisterExtension(std::make_unique<ExternalizeStringExtension>()); + v8::RegisterExtension(std::make_unique<StatisticsExtension>()); + v8::RegisterExtension(std::make_unique<TriggerFailureExtension>()); + v8::RegisterExtension(std::make_unique<IgnitionStatisticsExtension>()); if (isValidCpuTraceMarkFunctionName()) { - v8::RegisterExtension(v8::base::make_unique<CpuTraceMarkExtension>( - FLAG_expose_cputracemark_as)); + v8::RegisterExtension( + std::make_unique<CpuTraceMarkExtension>(FLAG_expose_cputracemark_as)); } } @@ -284,6 +285,9 @@ class Genesis { void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to); void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to); + Handle<Map> CreateInitialMapForArraySubclass(int size, + int inobject_properties); + static bool CompileExtension(Isolate* isolate, v8::Extension* extension); Isolate* isolate_; @@ -867,6 +871,29 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) { generator_next_internal->shared().set_native(false); native_context()->set_generator_next_internal(*generator_next_internal); + // Internal version of async module functions, flagged as non-native such + // that they don't show up in Error traces. + { + Handle<JSFunction> async_module_evaluate_internal = + SimpleCreateFunction(isolate(), factory()->next_string(), + Builtins::kAsyncModuleEvaluate, 1, false); + async_module_evaluate_internal->shared().set_native(false); + native_context()->set_async_module_evaluate_internal( + *async_module_evaluate_internal); + + Handle<JSFunction> call_async_module_fulfilled = + SimpleCreateFunction(isolate(), factory()->empty_string(), + Builtins::kCallAsyncModuleFulfilled, 1, false); + native_context()->set_call_async_module_fulfilled( + *call_async_module_fulfilled); + + Handle<JSFunction> call_async_module_rejected = + SimpleCreateFunction(isolate(), factory()->empty_string(), + Builtins::kCallAsyncModuleRejected, 1, false); + native_context()->set_call_async_module_rejected( + *call_async_module_rejected); + } + // Create maps for generator functions and their prototypes. Store those // maps in the native context. The "prototype" property descriptor is // writable, non-enumerable, and non-configurable (as per ES6 draft @@ -1098,9 +1125,9 @@ void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name, PropertyAttributes attributes, Handle<AccessorPair> accessor_pair) { DescriptorArray descriptors = map->instance_descriptors(); - int idx = descriptors.SearchWithCache(isolate, *name, *map); + InternalIndex entry = descriptors.SearchWithCache(isolate, *name, *map); Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes); - descriptors.Replace(idx, &d); + descriptors.Replace(entry, &d); } } // namespace @@ -1274,8 +1301,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals( DCHECK(native_context() ->get(Context::GLOBAL_PROXY_INDEX) .IsUndefined(isolate()) || - native_context()->global_proxy() == *global_proxy); - native_context()->set_global_proxy(*global_proxy); + native_context()->global_proxy_object() == *global_proxy); + native_context()->set_global_proxy_object(*global_proxy); return global_object; } @@ -2432,7 +2459,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { // -- R e g E x p // Builtin functions for RegExp.prototype. Handle<JSFunction> regexp_fun = InstallFunction( - isolate_, global, "RegExp", JS_REGEXP_TYPE, + isolate_, global, "RegExp", JS_REG_EXP_TYPE, JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize, JSRegExp::kInObjectFieldCount, factory->the_hole_value(), Builtins::kRegExpConstructor); @@ -2455,7 +2482,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kRegExpPrototypeExec, 1, true); native_context()->set_regexp_exec_function(*fun); DCHECK_EQ(JSRegExp::kExecFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } SimpleInstallGetter(isolate_, prototype, factory->dotAll_string(), @@ -2488,7 +2515,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kRegExpPrototypeMatch, 1, true); native_context()->set_regexp_match_function(*fun); DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } { @@ -2497,7 +2524,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, "[Symbol.matchAll]", Builtins::kRegExpPrototypeMatchAll, 1, true); native_context()->set_regexp_match_all_function(*fun); DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } { @@ -2506,7 +2533,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kRegExpPrototypeReplace, 2, false); native_context()->set_regexp_replace_function(*fun); DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } { @@ -2515,7 +2542,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kRegExpPrototypeSearch, 1, true); native_context()->set_regexp_search_function(*fun); DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } { @@ -2524,7 +2551,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kRegExpPrototypeSplit, 2, false); native_context()->set_regexp_split_function(*fun); DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); } Handle<Map> prototype_map(prototype->map(), isolate()); @@ -2616,7 +2643,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string()); - cell->set_value(Smi::FromInt(Isolate::kProtectorValid)); + cell->set_value(Smi::FromInt(Protectors::kProtectorValid)); native_context()->set_regexp_species_protector(*cell); } @@ -2647,7 +2674,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, true); Handle<JSFunction> regexp_string_iterator_function = CreateFunction( - isolate(), "RegExpStringIterator", JS_REGEXP_STRING_ITERATOR_TYPE, + isolate(), "RegExpStringIterator", JS_REG_EXP_STRING_ITERATOR_TYPE, JSRegExpStringIterator::kSize, 0, regexp_string_iterator_prototype, Builtins::kIllegal); regexp_string_iterator_function->shared().set_native(false); @@ -2886,7 +2913,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { // -- D a t e T i m e F o r m a t Handle<JSFunction> date_time_format_constructor = InstallFunction( - isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE, + isolate_, intl, "DateTimeFormat", JS_DATE_TIME_FORMAT_TYPE, JSDateTimeFormat::kSize, 0, factory->the_hole_value(), Builtins::kDateTimeFormatConstructor); date_time_format_constructor->shared().set_length(0); @@ -2914,13 +2941,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, SimpleInstallGetter(isolate_, prototype, factory->format_string(), Builtins::kDateTimeFormatPrototypeFormat, false); + + SimpleInstallFunction(isolate_, prototype, "formatRange", + Builtins::kDateTimeFormatPrototypeFormatRange, 2, + false); + SimpleInstallFunction( + isolate_, prototype, "formatRangeToParts", + Builtins::kDateTimeFormatPrototypeFormatRangeToParts, 2, false); } { // -- N u m b e r F o r m a t - Handle<JSFunction> number_format_constructor = InstallFunction( - isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE, - JSNumberFormat::kSize, 0, factory->the_hole_value(), - Builtins::kNumberFormatConstructor); + Handle<JSFunction> number_format_constructor = + InstallFunction(isolate_, intl, "NumberFormat", JS_NUMBER_FORMAT_TYPE, + JSNumberFormat::kSize, 0, factory->the_hole_value(), + Builtins::kNumberFormatConstructor); number_format_constructor->shared().set_length(0); number_format_constructor->shared().DontAdaptArguments(); InstallWithIntrinsicDefaultProto( @@ -2949,8 +2983,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { // -- C o l l a t o r Handle<JSFunction> collator_constructor = InstallFunction( - isolate_, intl, "Collator", JS_INTL_COLLATOR_TYPE, JSCollator::kSize, - 0, factory->the_hole_value(), Builtins::kCollatorConstructor); + isolate_, intl, "Collator", JS_COLLATOR_TYPE, JSCollator::kSize, 0, + factory->the_hole_value(), Builtins::kCollatorConstructor); collator_constructor->shared().DontAdaptArguments(); InstallWithIntrinsicDefaultProto(isolate_, collator_constructor, Context::INTL_COLLATOR_FUNCTION_INDEX); @@ -2974,7 +3008,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { // -- V 8 B r e a k I t e r a t o r Handle<JSFunction> v8_break_iterator_constructor = InstallFunction( - isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE, + isolate_, intl, "v8BreakIterator", JS_V8_BREAK_ITERATOR_TYPE, JSV8BreakIterator::kSize, 0, factory->the_hole_value(), Builtins::kV8BreakIteratorConstructor); v8_break_iterator_constructor->shared().DontAdaptArguments(); @@ -3009,11 +3043,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, } { // -- P l u r a l R u l e s - Handle<JSFunction> plural_rules_constructor = InstallFunction( - isolate_, intl, "PluralRules", JS_INTL_PLURAL_RULES_TYPE, - JSPluralRules::kSize, 0, factory->the_hole_value(), - Builtins::kPluralRulesConstructor); + Handle<JSFunction> plural_rules_constructor = + InstallFunction(isolate_, intl, "PluralRules", JS_PLURAL_RULES_TYPE, + JSPluralRules::kSize, 0, factory->the_hole_value(), + Builtins::kPluralRulesConstructor); plural_rules_constructor->shared().DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, plural_rules_constructor, + Context::INTL_PLURAL_RULES_FUNCTION_INDEX); SimpleInstallFunction(isolate(), plural_rules_constructor, "supportedLocalesOf", @@ -3032,13 +3069,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Builtins::kPluralRulesPrototypeSelect, 1, false); } - { // -- R e l a t i v e T i m e F o r m a t e + { // -- R e l a t i v e T i m e F o r m a t Handle<JSFunction> relative_time_format_fun = InstallFunction( - isolate(), intl, "RelativeTimeFormat", - JS_INTL_RELATIVE_TIME_FORMAT_TYPE, JSRelativeTimeFormat::kSize, 0, - factory->the_hole_value(), Builtins::kRelativeTimeFormatConstructor); + isolate(), intl, "RelativeTimeFormat", JS_RELATIVE_TIME_FORMAT_TYPE, + JSRelativeTimeFormat::kSize, 0, factory->the_hole_value(), + Builtins::kRelativeTimeFormatConstructor); relative_time_format_fun->shared().set_length(0); relative_time_format_fun->shared().DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, relative_time_format_fun, + Context::INTL_RELATIVE_TIME_FORMAT_FUNCTION_INDEX); SimpleInstallFunction( isolate(), relative_time_format_fun, "supportedLocalesOf", @@ -3063,12 +3103,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, } { // -- L i s t F o r m a t - Handle<JSFunction> list_format_fun = InstallFunction( - isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE, - JSListFormat::kSize, 0, factory->the_hole_value(), - Builtins::kListFormatConstructor); + Handle<JSFunction> list_format_fun = + InstallFunction(isolate(), intl, "ListFormat", JS_LIST_FORMAT_TYPE, + JSListFormat::kSize, 0, factory->the_hole_value(), + Builtins::kListFormatConstructor); list_format_fun->shared().set_length(0); list_format_fun->shared().DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, list_format_fun, Context::INTL_LIST_FORMAT_FUNCTION_INDEX); SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf", Builtins::kListFormatSupportedLocalesOf, 1, false); @@ -3091,7 +3133,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, { // -- L o c a l e Handle<JSFunction> locale_fun = InstallFunction( - isolate(), intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0, + isolate(), intl, "Locale", JS_LOCALE_TYPE, JSLocale::kSize, 0, factory->the_hole_value(), Builtins::kLocaleConstructor); InstallWithIntrinsicDefaultProto(isolate(), locale_fun, Context::INTL_LOCALE_FUNCTION_INDEX); @@ -3394,7 +3436,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, isolate_, prototype, "set", Builtins::kMapPrototypeSet, 2, true); // Check that index of "set" function in JSCollection is correct. DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); native_context()->set_map_set(*map_set); Handle<JSFunction> map_has = SimpleInstallFunction( @@ -3490,7 +3532,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, isolate_, prototype, "add", Builtins::kSetPrototypeAdd, 1, true); // Check that index of "add" function in JSCollection is correct. DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); native_context()->set_set_add(*set_add); Handle<JSFunction> set_delete = SimpleInstallFunction( @@ -3523,6 +3565,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Handle<Map> map = factory->NewMap( JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize, TERMINAL_FAST_ELEMENTS_KIND, JSModuleNamespace::kInObjectFieldCount); + map->SetConstructor(native_context()->object_function()); Map::SetPrototype(isolate(), map, isolate_->factory()->null_value()); Map::EnsureDescriptorSlack(isolate_, map, 1); native_context()->set_js_module_namespace_map(*map); @@ -3593,7 +3636,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, isolate_, prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true); // Check that index of "set" function in JSWeakCollection is correct. DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); native_context()->set_weakmap_set(*weakmap_set); SimpleInstallFunction(isolate_, prototype, "has", @@ -3628,7 +3671,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, isolate_, prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true); // Check that index of "add" function in JSWeakCollection is correct. DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex, - prototype->map().LastAdded()); + prototype->map().LastAdded().as_int()); native_context()->set_weakset_add(*weakset_add); @@ -3748,7 +3791,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, Handle<String> arguments_string = factory->Arguments_string(); NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype( arguments_string, isolate_->initial_object_prototype(), - JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, 2, + JS_ARGUMENTS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, 2, Builtins::kIllegal, MUTABLE); Handle<JSFunction> function = factory->NewFunction(args); Handle<Map> map(function->initial_map(), isolate()); @@ -3805,8 +3848,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object, callee->set_setter(*poison); // Create the map. Allocate one in-object field for length. - Handle<Map> map = factory->NewMap( - JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1); + Handle<Map> map = + factory->NewMap(JS_ARGUMENTS_OBJECT_TYPE, + JSStrictArgumentsObject::kSize, PACKED_ELEMENTS, 1); // Create the descriptor array for the arguments object. Map::EnsureDescriptorSlack(isolate_, map, 2); @@ -4265,16 +4309,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_chaining) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_nullish) +EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await) #ifdef V8_INTL_SUPPORT EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system) -EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE( harmony_intl_dateformat_fractional_second_digits) -EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_quarter) -EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style) -EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_numberformat_unified) +EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_other_calendars) #endif // V8_INTL_SUPPORT #undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE @@ -4419,34 +4461,20 @@ void Genesis::InitializeGlobal_harmony_promise_all_settled() { } } -#ifdef V8_INTL_SUPPORT - -void Genesis::InitializeGlobal_harmony_intl_date_format_range() { - if (!FLAG_harmony_intl_date_format_range) return; - - Handle<JSObject> intl = Handle<JSObject>::cast( - JSReceiver::GetProperty( - isolate(), - Handle<JSReceiver>(native_context()->global_object(), isolate()), - factory()->InternalizeUtf8String("Intl")) - .ToHandleChecked()); - - Handle<JSFunction> date_time_format_constructor = Handle<JSFunction>::cast( - JSReceiver::GetProperty( - isolate(), intl, factory()->InternalizeUtf8String("DateTimeFormat")) - .ToHandleChecked()); +void Genesis::InitializeGlobal_harmony_regexp_match_indices() { + if (!FLAG_harmony_regexp_match_indices) return; - Handle<JSObject> prototype( - JSObject::cast(date_time_format_constructor->prototype()), isolate_); - - SimpleInstallFunction(isolate_, prototype, "formatRange", - Builtins::kDateTimeFormatPrototypeFormatRange, 2, - false); - SimpleInstallFunction(isolate_, prototype, "formatRangeToParts", - Builtins::kDateTimeFormatPrototypeFormatRangeToParts, 2, - false); + // Add indices accessor to JSRegExpResult's initial map. + Handle<Map> initial_map(native_context()->regexp_result_map(), isolate()); + Descriptor d = Descriptor::AccessorConstant( + factory()->indices_string(), factory()->regexp_result_indices_accessor(), + NONE); + Map::EnsureDescriptorSlack(isolate(), initial_map, 1); + initial_map->AppendDescriptor(isolate(), &d); } +#ifdef V8_INTL_SUPPORT + void Genesis::InitializeGlobal_harmony_intl_segmenter() { if (!FLAG_harmony_intl_segmenter) return; Handle<JSObject> intl = Handle<JSObject>::cast( @@ -4457,10 +4485,12 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() { .ToHandleChecked()); Handle<JSFunction> segmenter_fun = InstallFunction( - isolate(), intl, "Segmenter", JS_INTL_SEGMENTER_TYPE, JSSegmenter::kSize, - 0, factory()->the_hole_value(), Builtins::kSegmenterConstructor); + isolate(), intl, "Segmenter", JS_SEGMENTER_TYPE, JSSegmenter::kSize, 0, + factory()->the_hole_value(), Builtins::kSegmenterConstructor); segmenter_fun->shared().set_length(0); segmenter_fun->shared().DontAdaptArguments(); + InstallWithIntrinsicDefaultProto(isolate_, segmenter_fun, + Context::INTL_SEGMENTER_FUNCTION_INDEX); SimpleInstallFunction(isolate(), segmenter_fun, "supportedLocalesOf", Builtins::kSegmenterSupportedLocalesOf, 1, false); @@ -4515,7 +4545,7 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() { isolate()->factory()->SegmentIterator_string()) .ToHandleChecked(); Handle<JSFunction> segment_iterator_fun = CreateFunction( - isolate(), name_string, JS_INTL_SEGMENT_ITERATOR_TYPE, + isolate(), name_string, JS_SEGMENT_ITERATOR_TYPE, JSSegmentIterator::kSize, 0, prototype, Builtins::kIllegal); segment_iterator_fun->shared().set_native(false); @@ -4900,42 +4930,10 @@ bool Genesis::InstallNatives() { // predefines the properties index, input, and groups). { // JSRegExpResult initial map. - - // Find global.Array.prototype to inherit from. - Handle<JSFunction> array_constructor(native_context()->array_function(), - isolate()); - Handle<JSObject> array_prototype( - JSObject::cast(array_constructor->instance_prototype()), isolate()); - - // Add initial map. - Handle<Map> initial_map = factory()->NewMap( - JS_ARRAY_TYPE, JSRegExpResult::kSize, TERMINAL_FAST_ELEMENTS_KIND, - JSRegExpResult::kInObjectPropertyCount); - initial_map->SetConstructor(*array_constructor); - - // Set prototype on map. - initial_map->set_has_non_instance_prototype(false); - Map::SetPrototype(isolate(), initial_map, array_prototype); - - // Update map with length accessor from Array and add "index", "input" and - // "groups". - Map::EnsureDescriptorSlack(isolate(), initial_map, - JSRegExpResult::kInObjectPropertyCount + 1); - - // length descriptor. - { - JSFunction array_function = native_context()->array_function(); - Handle<DescriptorArray> array_descriptors( - array_function.initial_map().instance_descriptors(), isolate()); - Handle<String> length = factory()->length_string(); - int old = array_descriptors->SearchWithCache( - isolate(), *length, array_function.initial_map()); - DCHECK_NE(old, DescriptorArray::kNotFound); - Descriptor d = Descriptor::AccessorConstant( - length, handle(array_descriptors->GetStrongValue(old), isolate()), - array_descriptors->GetDetails(old).attributes()); - initial_map->AppendDescriptor(isolate(), &d); - } + // Add additional slack to the initial map in case regexp_match_indices + // are enabled to account for the additional descriptor. + Handle<Map> initial_map = CreateInitialMapForArraySubclass( + JSRegExpResult::kSize, JSRegExpResult::kInObjectPropertyCount); // index descriptor. { @@ -4961,9 +4959,53 @@ bool Genesis::InstallNatives() { initial_map->AppendDescriptor(isolate(), &d); } + // Private internal only fields. All of the remaining fields have special + // symbols to prevent their use in Javascript. + // cached_indices_or_match_info descriptor. + { + PropertyAttributes attribs = DONT_ENUM; + { + Descriptor d = Descriptor::DataField( + isolate(), + factory()->regexp_result_cached_indices_or_match_info_symbol(), + JSRegExpResult::kCachedIndicesOrMatchInfoIndex, attribs, + Representation::Tagged()); + initial_map->AppendDescriptor(isolate(), &d); + } + + // names descriptor. + { + Descriptor d = Descriptor::DataField( + isolate(), factory()->regexp_result_names_symbol(), + JSRegExpResult::kNamesIndex, attribs, Representation::Tagged()); + initial_map->AppendDescriptor(isolate(), &d); + } + } + native_context()->set_regexp_result_map(*initial_map); } + // Create a constructor for JSRegExpResultIndices (a variant of Array that + // predefines the groups property). + { + // JSRegExpResultIndices initial map. + Handle<Map> initial_map = CreateInitialMapForArraySubclass( + JSRegExpResultIndices::kSize, + JSRegExpResultIndices::kInObjectPropertyCount); + + // groups descriptor. + { + Descriptor d = Descriptor::DataField( + isolate(), factory()->groups_string(), + JSRegExpResultIndices::kGroupsIndex, NONE, Representation::Tagged()); + initial_map->AppendDescriptor(isolate(), &d); + DCHECK_EQ(initial_map->LastAdded().as_int(), + JSRegExpResultIndices::kGroupsDescriptorIndex); + } + + native_context()->set_regexp_result_indices_map(*initial_map); + } + // Add @@iterator method to the arguments object maps. { PropertyAttributes attribs = DONT_ENUM; @@ -5263,7 +5305,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, if (from->HasFastProperties()) { Handle<DescriptorArray> descs = Handle<DescriptorArray>(from->map().instance_descriptors(), isolate()); - for (int i = 0; i < from->map().NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : from->map().IterateOwnDescriptors()) { PropertyDetails details = descs->GetDetails(i); if (details.location() == kField) { if (details.kind() == kData) { @@ -5365,6 +5407,45 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) { JSObject::ForceSetPrototype(to, proto); } +Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size, + int inobject_properties) { + // Find global.Array.prototype to inherit from. + Handle<JSFunction> array_constructor(native_context()->array_function(), + isolate()); + Handle<JSObject> array_prototype(native_context()->initial_array_prototype(), + isolate()); + + // Add initial map. + Handle<Map> initial_map = factory()->NewMap( + JS_ARRAY_TYPE, size, TERMINAL_FAST_ELEMENTS_KIND, inobject_properties); + initial_map->SetConstructor(*array_constructor); + + // Set prototype on map. + initial_map->set_has_non_instance_prototype(false); + Map::SetPrototype(isolate(), initial_map, array_prototype); + + // Update map with length accessor from Array. + static constexpr int kTheLengthAccessor = 1; + Map::EnsureDescriptorSlack(isolate(), initial_map, + inobject_properties + kTheLengthAccessor); + + // length descriptor. + { + JSFunction array_function = native_context()->array_function(); + Handle<DescriptorArray> array_descriptors( + array_function.initial_map().instance_descriptors(), isolate()); + Handle<String> length = factory()->length_string(); + InternalIndex old = array_descriptors->SearchWithCache( + isolate(), *length, array_function.initial_map()); + DCHECK(old.is_found()); + Descriptor d = Descriptor::AccessorConstant( + length, handle(array_descriptors->GetStrongValue(old), isolate()), + array_descriptors->GetDetails(old).attributes()); + initial_map->AppendDescriptor(isolate(), &d); + } + return initial_map; +} + Genesis::Genesis( Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy, v8::Local<v8::ObjectTemplate> global_proxy_template, diff --git a/chromium/v8/src/init/heap-symbols.h b/chromium/v8/src/init/heap-symbols.h index ce5a4f1a8b2..9d630f547bd 100644 --- a/chromium/v8/src/init/heap-symbols.h +++ b/chromium/v8/src/init/heap-symbols.h @@ -82,6 +82,7 @@ V(_, plusSign_string, "plusSign") \ V(_, quarter_string, "quarter") \ V(_, region_string, "region") \ + V(_, relatedYear_string, "relatedYear") \ V(_, scientific_string, "scientific") \ V(_, second_string, "second") \ V(_, segment_string, "segment") \ @@ -107,7 +108,8 @@ V(_, unit_string, "unit") \ V(_, unitDisplay_string, "unitDisplay") \ V(_, weekday_string, "weekday") \ - V(_, year_string, "year") + V(_, year_string, "year") \ + V(_, yearName_string, "yearName") #else // V8_INTL_SUPPORT #define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) #endif // V8_INTL_SUPPORT @@ -202,6 +204,7 @@ V(_, illegal_access_string, "illegal access") \ V(_, illegal_argument_string, "illegal argument") \ V(_, index_string, "index") \ + V(_, indices_string, "indices") \ V(_, Infinity_string, "Infinity") \ V(_, infinity_string, "infinity") \ V(_, input_string, "input") \ @@ -209,6 +212,8 @@ V(_, Int32Array_string, "Int32Array") \ V(_, Int8Array_string, "Int8Array") \ V(_, isExtensible_string, "isExtensible") \ + V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \ + V(_, jsMemoryRange_string, "jsMemoryRange") \ V(_, keys_string, "keys") \ V(_, lastIndex_string, "lastIndex") \ V(_, length_string, "length") \ @@ -299,6 +304,7 @@ V(_, toJSON_string, "toJSON") \ V(_, toString_string, "toString") \ V(_, true_string, "true") \ + V(_, total_string, "total") \ V(_, TypeError_string, "TypeError") \ V(_, Uint16Array_string, "Uint16Array") \ V(_, Uint32Array_string, "Uint32Array") \ @@ -318,35 +324,36 @@ V(_, writable_string, "writable") \ V(_, zero_string, "0") -#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \ - V(_, call_site_frame_array_symbol) \ - V(_, call_site_frame_index_symbol) \ - V(_, console_context_id_symbol) \ - V(_, console_context_name_symbol) \ - V(_, class_fields_symbol) \ - V(_, class_positions_symbol) \ - V(_, detailed_stack_trace_symbol) \ - V(_, elements_transition_symbol) \ - V(_, error_end_pos_symbol) \ - V(_, error_script_symbol) \ - V(_, error_start_pos_symbol) \ - V(_, frozen_symbol) \ - V(_, generic_symbol) \ - V(_, home_object_symbol) \ - V(_, interpreter_trampoline_symbol) \ - V(_, megamorphic_symbol) \ - V(_, native_context_index_symbol) \ - V(_, nonextensible_symbol) \ - V(_, not_mapped_symbol) \ - V(_, premonomorphic_symbol) \ - V(_, promise_debug_marker_symbol) \ - V(_, promise_forwarding_handler_symbol) \ - V(_, promise_handled_by_symbol) \ - V(_, sealed_symbol) \ - V(_, stack_trace_symbol) \ - V(_, strict_function_transition_symbol) \ - V(_, wasm_exception_tag_symbol) \ - V(_, wasm_exception_values_symbol) \ +#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \ + V(_, call_site_frame_array_symbol) \ + V(_, call_site_frame_index_symbol) \ + V(_, console_context_id_symbol) \ + V(_, console_context_name_symbol) \ + V(_, class_fields_symbol) \ + V(_, class_positions_symbol) \ + V(_, detailed_stack_trace_symbol) \ + V(_, elements_transition_symbol) \ + V(_, error_end_pos_symbol) \ + V(_, error_script_symbol) \ + V(_, error_start_pos_symbol) \ + V(_, frozen_symbol) \ + V(_, generic_symbol) \ + V(_, home_object_symbol) \ + V(_, interpreter_trampoline_symbol) \ + V(_, megamorphic_symbol) \ + V(_, native_context_index_symbol) \ + V(_, nonextensible_symbol) \ + V(_, not_mapped_symbol) \ + V(_, promise_debug_marker_symbol) \ + V(_, promise_forwarding_handler_symbol) \ + V(_, promise_handled_by_symbol) \ + V(_, regexp_result_cached_indices_or_match_info_symbol) \ + V(_, regexp_result_names_symbol) \ + V(_, sealed_symbol) \ + V(_, stack_trace_symbol) \ + V(_, strict_function_transition_symbol) \ + V(_, wasm_exception_tag_symbol) \ + V(_, wasm_exception_values_symbol) \ V(_, uninitialized_symbol) #define PUBLIC_SYMBOL_LIST_GENERATOR(V, _) \ diff --git a/chromium/v8/src/init/icu_util.cc b/chromium/v8/src/init/icu_util.cc index 81c66e6a20f..22ea3837cde 100644 --- a/chromium/v8/src/init/icu_util.cc +++ b/chromium/v8/src/init/icu_util.cc @@ -40,26 +40,23 @@ bool InitializeICUDefaultLocation(const char* exec_path, const char* icu_data_file) { #if !defined(V8_INTL_SUPPORT) return true; -#else -#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE +#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE if (icu_data_file) { return InitializeICU(icu_data_file); } - char* icu_data_file_default; #if defined(V8_TARGET_LITTLE_ENDIAN) - base::RelativePath(&icu_data_file_default, exec_path, "icudtl.dat"); + std::unique_ptr<char[]> icu_data_file_default = + base::RelativePath(exec_path, "icudtl.dat"); #elif defined(V8_TARGET_BIG_ENDIAN) - base::RelativePath(&icu_data_file_default, exec_path, "icudtb.dat"); + std::unique_ptr<char[]> icu_data_file_default = + base::RelativePath(exec_path, "icudtb.dat"); #else #error Unknown byte ordering #endif - bool result = InitializeICU(icu_data_file_default); - free(icu_data_file_default); - return result; + return InitializeICU(icu_data_file_default.get()); #else return InitializeICU(nullptr); #endif -#endif } bool InitializeICU(const char* icu_data_file) { diff --git a/chromium/v8/src/init/isolate-allocator.cc b/chromium/v8/src/init/isolate-allocator.cc index 6a9b4c33cda..b9ec6c3f43b 100644 --- a/chromium/v8/src/init/isolate-allocator.cc +++ b/chromium/v8/src/init/isolate-allocator.cc @@ -6,6 +6,7 @@ #include "src/base/bounded-page-allocator.h" #include "src/common/ptr-compr.h" #include "src/execution/isolate.h" +#include "src/utils/memcopy.h" #include "src/utils/utils.h" namespace v8 { @@ -38,21 +39,39 @@ IsolateAllocator::~IsolateAllocator() { } #if V8_TARGET_ARCH_64_BIT + +namespace { + +// "IsolateRootBiasPage" is an optional region before the 4Gb aligned +// reservation. This "IsolateRootBiasPage" page is supposed to be used for +// storing part of the Isolate object when Isolate::isolate_root_bias() is +// not zero. +inline size_t GetIsolateRootBiasPageSize( + v8::PageAllocator* platform_page_allocator) { + return RoundUp(Isolate::isolate_root_bias(), + platform_page_allocator->AllocatePageSize()); +} + +} // namespace + Address IsolateAllocator::InitReservation() { v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator(); - // Reserve a 4Gb region so that the middle is 4Gb aligned. - // The VirtualMemory API does not support such an constraint so we have to - // implement it manually here. - size_t reservation_size = kPtrComprHeapReservationSize; - size_t base_alignment = kPtrComprIsolateRootAlignment; + const size_t kIsolateRootBiasPageSize = + GetIsolateRootBiasPageSize(platform_page_allocator); + + // Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the + // resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned. + const size_t reservation_size = + kPtrComprHeapReservationSize + kIsolateRootBiasPageSize; + const size_t base_alignment = kPtrComprIsolateRootAlignment; const int kMaxAttempts = 4; for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { Address hint = RoundDown(reinterpret_cast<Address>( platform_page_allocator->GetRandomMmapAddr()), - base_alignment) + - kPtrComprIsolateRootBias; + base_alignment) - + kIsolateRootBiasPageSize; // Within this reservation there will be a sub-region with proper alignment. VirtualMemory padded_reservation(platform_page_allocator, @@ -60,12 +79,11 @@ Address IsolateAllocator::InitReservation() { reinterpret_cast<void*>(hint)); if (!padded_reservation.IsReserved()) break; - // Find such a sub-region inside the reservation that it's middle is - // |base_alignment|-aligned. + // Find properly aligned sub-region inside the reservation. Address address = - RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias, + RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize, base_alignment) - - kPtrComprIsolateRootBias; + kIsolateRootBiasPageSize; CHECK(padded_reservation.InVM(address, reservation_size)); #if defined(V8_OS_FUCHSIA) @@ -98,16 +116,16 @@ Address IsolateAllocator::InitReservation() { if (!reservation.IsReserved()) break; // The reservation could still be somewhere else but we can accept it - // if the reservation has the required alignment. - Address aligned_address = - RoundUp(reservation.address() + kPtrComprIsolateRootBias, + // if it has the required alignment. + Address address = + RoundUp(reservation.address() + kIsolateRootBiasPageSize, base_alignment) - - kPtrComprIsolateRootBias; + kIsolateRootBiasPageSize; - if (reservation.address() == aligned_address) { + if (reservation.address() == address) { reservation_ = std::move(reservation); CHECK_EQ(reservation_.size(), reservation_size); - return aligned_address; + return address; } } } @@ -116,21 +134,26 @@ Address IsolateAllocator::InitReservation() { return kNullAddress; } -void IsolateAllocator::CommitPagesForIsolate(Address heap_address) { - CHECK(reservation_.InVM(heap_address, kPtrComprHeapReservationSize)); +void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) { + v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator(); + + const size_t kIsolateRootBiasPageSize = + GetIsolateRootBiasPageSize(platform_page_allocator); - Address isolate_root = heap_address + kPtrComprIsolateRootBias; + Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize; CHECK(IsAligned(isolate_root, kPtrComprIsolateRootAlignment)); - v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator(); + CHECK(reservation_.InVM( + heap_reservation_address, + kPtrComprHeapReservationSize + kIsolateRootBiasPageSize)); // Simplify BoundedPageAllocator's life by configuring it to use same page // size as the Heap will use (MemoryChunk::kPageSize). size_t page_size = RoundUp(size_t{1} << kPageSizeBits, platform_page_allocator->AllocatePageSize()); - page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>( - platform_page_allocator, heap_address, kPtrComprHeapReservationSize, + page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>( + platform_page_allocator, isolate_root, kPtrComprHeapReservationSize, page_size); page_allocator_ = page_allocator_instance_.get(); @@ -139,7 +162,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) { // Inform the bounded page allocator about reserved pages. { - Address reserved_region_address = RoundDown(isolate_address, page_size); + Address reserved_region_address = isolate_root; size_t reserved_region_size = RoundUp(isolate_end, page_size) - reserved_region_address; @@ -163,10 +186,8 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) { PageAllocator::kReadWrite)); if (Heap::ShouldZapGarbage()) { - for (Address address = committed_region_address; - address < committed_region_size; address += kSystemPointerSize) { - base::Memory<Address>(address) = static_cast<Address>(kZapValue); - } + MemsetPointer(reinterpret_cast<Address*>(committed_region_address), + kZapValue, committed_region_size / kSystemPointerSize); } } isolate_memory_ = reinterpret_cast<void*>(isolate_address); diff --git a/chromium/v8/src/init/isolate-allocator.h b/chromium/v8/src/init/isolate-allocator.h index cd0e102d40a..5f8b48ef3a3 100644 --- a/chromium/v8/src/init/isolate-allocator.h +++ b/chromium/v8/src/init/isolate-allocator.h @@ -5,6 +5,8 @@ #ifndef V8_INIT_ISOLATE_ALLOCATOR_H_ #define V8_INIT_ISOLATE_ALLOCATOR_H_ +#include <memory> + #include "src/base/bounded-page-allocator.h" #include "src/base/page-allocator.h" #include "src/common/globals.h" @@ -46,7 +48,7 @@ class V8_EXPORT_PRIVATE IsolateAllocator final { private: Address InitReservation(); - void CommitPagesForIsolate(Address heap_address); + void CommitPagesForIsolate(Address heap_reservation_address); // The allocated memory for Isolate instance. void* isolate_memory_ = nullptr; diff --git a/chromium/v8/src/init/startup-data-util.cc b/chromium/v8/src/init/startup-data-util.cc index 54d697c591d..d234c152f88 100644 --- a/chromium/v8/src/init/startup-data-util.cc +++ b/chromium/v8/src/init/startup-data-util.cc @@ -38,6 +38,10 @@ void FreeStartupData() { DeleteStartupData(&g_snapshot); } +// TODO(jgruber): Rename to FreeStartupData once natives support has been +// removed (https://crbug.com/v8/7624). +void FreeStartupDataSnapshotOnly() { DeleteStartupData(&g_snapshot); } + void Load(const char* blob_file, v8::StartupData* startup_data, void (*setter_fn)(v8::StartupData*)) { ClearStartupData(startup_data); @@ -67,7 +71,7 @@ void Load(const char* blob_file, v8::StartupData* startup_data, } void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) { - Load(natives_blob, &g_natives, v8::V8::SetNativesDataBlob); + Load(natives_blob, &g_natives, i::V8::SetNativesBlob); Load(snapshot_blob, &g_snapshot, v8::V8::SetSnapshotDataBlob); atexit(&FreeStartupData); @@ -78,19 +82,17 @@ void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) { void InitializeExternalStartupData(const char* directory_path) { #ifdef V8_USE_EXTERNAL_STARTUP_DATA - char* natives; - char* snapshot; const char* snapshot_name = "snapshot_blob.bin"; #ifdef V8_MULTI_SNAPSHOTS if (!FLAG_untrusted_code_mitigations) { snapshot_name = "snapshot_blob_trusted.bin"; } #endif - LoadFromFiles( - base::RelativePath(&natives, directory_path, "natives_blob.bin"), - base::RelativePath(&snapshot, directory_path, snapshot_name)); - free(natives); - free(snapshot); + std::unique_ptr<char[]> natives = + base::RelativePath(directory_path, "natives_blob.bin"); + std::unique_ptr<char[]> snapshot = + base::RelativePath(directory_path, snapshot_name); + LoadFromFiles(natives.get(), snapshot.get()); #endif // V8_USE_EXTERNAL_STARTUP_DATA } @@ -101,5 +103,12 @@ void InitializeExternalStartupData(const char* natives_blob, #endif // V8_USE_EXTERNAL_STARTUP_DATA } +void InitializeExternalStartupDataFromFile(const char* snapshot_blob) { +#ifdef V8_USE_EXTERNAL_STARTUP_DATA + Load(snapshot_blob, &g_snapshot, v8::V8::SetSnapshotDataBlob); + atexit(&FreeStartupDataSnapshotOnly); +#endif // V8_USE_EXTERNAL_STARTUP_DATA +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/init/startup-data-util.h b/chromium/v8/src/init/startup-data-util.h index dfa26510abd..e4d1e540f76 100644 --- a/chromium/v8/src/init/startup-data-util.h +++ b/chromium/v8/src/init/startup-data-util.h @@ -21,6 +21,7 @@ void InitializeExternalStartupData(const char* directory_path); void InitializeExternalStartupData(const char* natives_blob, const char* snapshot_blob); +void InitializeExternalStartupDataFromFile(const char* snapshot_blob); } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/init/v8.cc b/chromium/v8/src/init/v8.cc index 15eb929332a..fd26c60848e 100644 --- a/chromium/v8/src/init/v8.cc +++ b/chromium/v8/src/init/v8.cc @@ -90,6 +90,12 @@ void V8::InitializeOncePerProcessImpl() { FLAG_expose_wasm = false; } + if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) { + // Turning off the tier-up strategy, because the --regexp-interpret-all and + // --regexp-tier-up flags are incompatible. + FLAG_regexp_tier_up = false; + } + // The --jitless and --interpreted-frames-native-stack flags are incompatible // since the latter requires code generation while the former prohibits code // generation. diff --git a/chromium/v8/src/inspector/custom-preview.h b/chromium/v8/src/inspector/custom-preview.h index 1e8c74a154c..d7b24adcce3 100644 --- a/chromium/v8/src/inspector/custom-preview.h +++ b/chromium/v8/src/inspector/custom-preview.h @@ -5,6 +5,8 @@ #ifndef V8_INSPECTOR_CUSTOM_PREVIEW_H_ #define V8_INSPECTOR_CUSTOM_PREVIEW_H_ +#include <memory> + #include "src/inspector/protocol/Protocol.h" #include "src/inspector/protocol/Runtime.h" diff --git a/chromium/v8/src/inspector/injected-script.cc b/chromium/v8/src/inspector/injected-script.cc index 18a10285dd0..6afc6486e42 100644 --- a/chromium/v8/src/inspector/injected-script.cc +++ b/chromium/v8/src/inspector/injected-script.cc @@ -289,7 +289,7 @@ Response InjectedScript::getProperties( int sessionId = m_sessionId; v8::TryCatch tryCatch(isolate); - *properties = v8::base::make_unique<Array<PropertyDescriptor>>(); + *properties = std::make_unique<Array<PropertyDescriptor>>(); std::vector<PropertyMirror> mirrors; PropertyAccumulator accumulator(&mirrors); if (!ValueMirror::getProperties(context, object, ownProperties, @@ -366,10 +366,8 @@ Response InjectedScript::getInternalAndPrivateProperties( internalProperties, std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>* privateProperties) { - *internalProperties = - v8::base::make_unique<Array<InternalPropertyDescriptor>>(); - *privateProperties = - v8::base::make_unique<Array<PrivatePropertyDescriptor>>(); + *internalProperties = std::make_unique<Array<InternalPropertyDescriptor>>(); + *privateProperties = std::make_unique<Array<PrivatePropertyDescriptor>>(); if (!value->IsObject()) return Response::OK(); @@ -521,7 +519,7 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable( if (columnSet.find(property->getName()) == columnSet.end()) continue; columnMap[property->getName()] = property.get(); } - auto filtered = v8::base::make_unique<Array<PropertyPreview>>(); + auto filtered = std::make_unique<Array<PropertyPreview>>(); for (const String16& column : selectedColumns) { if (columnMap.find(column) == columnMap.end()) continue; filtered->push_back(columnMap[column]->clone()); diff --git a/chromium/v8/src/inspector/injected-script.h b/chromium/v8/src/inspector/injected-script.h index d007e9121ec..080769f7121 100644 --- a/chromium/v8/src/inspector/injected-script.h +++ b/chromium/v8/src/inspector/injected-script.h @@ -31,6 +31,7 @@ #ifndef V8_INSPECTOR_INJECTED_SCRIPT_H_ #define V8_INSPECTOR_INJECTED_SCRIPT_H_ +#include <memory> #include <unordered_map> #include <unordered_set> diff --git a/chromium/v8/src/inspector/inspected-context.cc b/chromium/v8/src/inspector/inspected-context.cc index 8098aa5cacc..03a799cb5b4 100644 --- a/chromium/v8/src/inspector/inspected-context.cc +++ b/chromium/v8/src/inspector/inspected-context.cc @@ -112,7 +112,7 @@ InjectedScript* InspectedContext::getInjectedScript(int sessionId) { InjectedScript* InspectedContext::createInjectedScript(int sessionId) { std::unique_ptr<InjectedScript> injectedScript = - v8::base::make_unique<InjectedScript>(this, sessionId); + std::make_unique<InjectedScript>(this, sessionId); CHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end()); m_injectedScripts[sessionId] = std::move(injectedScript); return getInjectedScript(sessionId); diff --git a/chromium/v8/src/inspector/inspected-context.h b/chromium/v8/src/inspector/inspected-context.h index 4ec52dc1e4f..68b865de672 100644 --- a/chromium/v8/src/inspector/inspected-context.h +++ b/chromium/v8/src/inspector/inspected-context.h @@ -5,6 +5,7 @@ #ifndef V8_INSPECTOR_INSPECTED_CONTEXT_H_ #define V8_INSPECTOR_INSPECTED_CONTEXT_H_ +#include <memory> #include <unordered_map> #include <unordered_set> diff --git a/chromium/v8/src/inspector/remote-object-id.h b/chromium/v8/src/inspector/remote-object-id.h index b199032359e..5a35c13e58f 100644 --- a/chromium/v8/src/inspector/remote-object-id.h +++ b/chromium/v8/src/inspector/remote-object-id.h @@ -5,6 +5,8 @@ #ifndef V8_INSPECTOR_REMOTE_OBJECT_ID_H_ #define V8_INSPECTOR_REMOTE_OBJECT_ID_H_ +#include <memory> + #include "src/inspector/protocol/Forward.h" namespace v8_inspector { diff --git a/chromium/v8/src/inspector/search-util.h b/chromium/v8/src/inspector/search-util.h index 3c8a9fe31ca..5958a404f04 100644 --- a/chromium/v8/src/inspector/search-util.h +++ b/chromium/v8/src/inspector/search-util.h @@ -5,6 +5,8 @@ #ifndef V8_INSPECTOR_SEARCH_UTIL_H_ #define V8_INSPECTOR_SEARCH_UTIL_H_ +#include <memory> + #include "src/inspector/protocol/Debugger.h" #include "src/inspector/string-util.h" diff --git a/chromium/v8/src/inspector/string-16.cc b/chromium/v8/src/inspector/string-16.cc index 3a91169ac8e..3adffeddf1c 100644 --- a/chromium/v8/src/inspector/string-16.cc +++ b/chromium/v8/src/inspector/string-16.cc @@ -84,6 +84,13 @@ String16 String16::fromInteger(size_t number) { } // static +String16 String16::fromInteger64(int64_t number) { + char buffer[50]; + v8::base::OS::SNPrintF(buffer, arraysize(buffer), "%" PRId64 "", number); + return String16(buffer); +} + +// static String16 String16::fromDouble(double number) { char arr[50]; v8::internal::Vector<char> buffer(arr, arraysize(arr)); diff --git a/chromium/v8/src/inspector/string-16.h b/chromium/v8/src/inspector/string-16.h index c1dd5cb9291..910a2e49c63 100644 --- a/chromium/v8/src/inspector/string-16.h +++ b/chromium/v8/src/inspector/string-16.h @@ -37,6 +37,7 @@ class String16 { static String16 fromInteger(int); static String16 fromInteger(size_t); + static String16 fromInteger64(int64_t); static String16 fromDouble(double); static String16 fromDouble(double, int precision); diff --git a/chromium/v8/src/inspector/string-util.cc b/chromium/v8/src/inspector/string-util.cc index 20c8951e2a8..bae5dd91a90 100644 --- a/chromium/v8/src/inspector/string-util.cc +++ b/chromium/v8/src/inspector/string-util.cc @@ -169,15 +169,6 @@ StringBufferImpl::StringBufferImpl(String16& string) { m_string = toStringView(m_owner); } -String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId) { - const size_t kBufferSize = 35; - - char buffer[kBufferSize]; - v8::base::OS::SNPrintF(buffer, kBufferSize, "(%08" PRIX64 "%08" PRIX64 ")", - debuggerId.first, debuggerId.second); - return String16(buffer); -} - String16 stackTraceIdToString(uintptr_t id) { String16Builder builder; builder.appendNumber(static_cast<size_t>(id)); diff --git a/chromium/v8/src/inspector/string-util.h b/chromium/v8/src/inspector/string-util.h index 513f436136e..9b6a8bdd5d7 100644 --- a/chromium/v8/src/inspector/string-util.h +++ b/chromium/v8/src/inspector/string-util.h @@ -101,13 +101,23 @@ class StringUtil { // therefore it's unnecessary to provide an implementation here. class Binary { public: - const uint8_t* data() const { UNIMPLEMENTED(); } - size_t size() const { UNIMPLEMENTED(); } + Binary() = default; + + const uint8_t* data() const { return bytes_->data(); } + size_t size() const { return bytes_->size(); } String toBase64() const { UNIMPLEMENTED(); } static Binary fromBase64(const String& base64, bool* success) { UNIMPLEMENTED(); } - static Binary fromSpan(const uint8_t* data, size_t size) { UNIMPLEMENTED(); } + static Binary fromSpan(const uint8_t* data, size_t size) { + return Binary(std::make_shared<std::vector<uint8_t>>(data, data + size)); + } + + private: + std::shared_ptr<std::vector<uint8_t>> bytes_; + + explicit Binary(std::shared_ptr<std::vector<uint8_t>> bytes) + : bytes_(bytes) {} }; } // namespace protocol @@ -149,7 +159,6 @@ class BinaryStringBuffer : public StringBuffer { DISALLOW_COPY_AND_ASSIGN(BinaryStringBuffer); }; -String16 debuggerIdToString(const std::pair<int64_t, int64_t>& debuggerId); String16 stackTraceIdToString(uintptr_t id); } // namespace v8_inspector diff --git a/chromium/v8/src/inspector/v8-console-message.cc b/chromium/v8/src/inspector/v8-console-message.cc index 458e4d40279..e4c678a2729 100644 --- a/chromium/v8/src/inspector/v8-console-message.cc +++ b/chromium/v8/src/inspector/v8-console-message.cc @@ -258,7 +258,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session, v8::Local<v8::Context> context = inspectedContext->context(); auto args = - v8::base::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>(); + std::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>(); v8::Local<v8::Value> value = m_arguments[0]->Get(isolate); if (value->IsObject() && m_type == ConsoleAPIType::kTable && @@ -341,8 +341,8 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend, arguments = wrapArguments(session, generatePreview); if (!inspector->hasConsoleMessageStorage(contextGroupId)) return; if (!arguments) { - arguments = v8::base::make_unique< - protocol::Array<protocol::Runtime::RemoteObject>>(); + arguments = + std::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>(); if (!m_message.isEmpty()) { std::unique_ptr<protocol::Runtime::RemoteObject> messageArg = protocol::Runtime::RemoteObject::create() diff --git a/chromium/v8/src/inspector/v8-console-message.h b/chromium/v8/src/inspector/v8-console-message.h index cca5b47265f..04bd10ff73f 100644 --- a/chromium/v8/src/inspector/v8-console-message.h +++ b/chromium/v8/src/inspector/v8-console-message.h @@ -7,7 +7,9 @@ #include <deque> #include <map> +#include <memory> #include <set> + #include "include/v8.h" #include "src/inspector/protocol/Console.h" #include "src/inspector/protocol/Forward.h" diff --git a/chromium/v8/src/inspector/v8-console.cc b/chromium/v8/src/inspector/v8-console.cc index 0f476f23161..f4d0ffa0550 100644 --- a/chromium/v8/src/inspector/v8-console.cc +++ b/chromium/v8/src/inspector/v8-console.cc @@ -691,7 +691,7 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI( v8::Local<v8::ArrayBuffer> data = v8::ArrayBuffer::New(isolate, sizeof(CommandLineAPIData)); - *static_cast<CommandLineAPIData*>(data->GetContents().Data()) = + *static_cast<CommandLineAPIData*>(data->GetBackingStore()->Data()) = CommandLineAPIData(this, sessionId); createBoundFunctionProperty(context, commandLineAPI, data, "dir", &V8Console::call<&V8Console::Dir>, diff --git a/chromium/v8/src/inspector/v8-console.h b/chromium/v8/src/inspector/v8-console.h index 03d89ced109..4d38c51a2a2 100644 --- a/chromium/v8/src/inspector/v8-console.h +++ b/chromium/v8/src/inspector/v8-console.h @@ -106,14 +106,14 @@ class V8Console : public v8::debug::ConsoleDelegate { int)> static void call(const v8::FunctionCallbackInfo<v8::Value>& info) { CommandLineAPIData* data = static_cast<CommandLineAPIData*>( - info.Data().As<v8::ArrayBuffer>()->GetContents().Data()); + info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data()); (data->first->*func)(info, data->second); } template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&, const v8::debug::ConsoleContext&)> static void call(const v8::FunctionCallbackInfo<v8::Value>& info) { CommandLineAPIData* data = static_cast<CommandLineAPIData*>( - info.Data().As<v8::ArrayBuffer>()->GetContents().Data()); + info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data()); v8::debug::ConsoleCallArguments args(info); (data->first->*func)(args, v8::debug::ConsoleContext()); } diff --git a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc index e5458823ea7..18bf43fbbc8 100644 --- a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc +++ b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc @@ -262,7 +262,7 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) { Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator, InjectedScript* injectedScript, std::unique_ptr<Array<Scope>>* scopes) { - *scopes = v8::base::make_unique<Array<Scope>>(); + *scopes = std::make_unique<Array<Scope>>(); if (!injectedScript) return Response::OK(); if (iterator->Done()) return Response::OK(); @@ -353,8 +353,8 @@ Response V8DebuggerAgentImpl::enable(Maybe<double> maxScriptsCacheSize, String16* outDebuggerId) { m_maxScriptCacheSize = v8::base::saturated_cast<size_t>( maxScriptsCacheSize.fromMaybe(std::numeric_limits<double>::max())); - *outDebuggerId = debuggerIdToString( - m_debugger->debuggerIdFor(m_session->contextGroupId())); + *outDebuggerId = + m_debugger->debuggerIdFor(m_session->contextGroupId()).toString(); if (enabled()) return Response::OK(); if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId())) @@ -472,7 +472,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl( Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition, String16* outBreakpointId, std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) { - *locations = v8::base::make_unique<Array<protocol::Debugger::Location>>(); + *locations = std::make_unique<Array<protocol::Debugger::Location>>(); int specified = (optionalURL.isJust() ? 1 : 0) + (optionalURLRegex.isJust() ? 1 : 0) + @@ -708,8 +708,8 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints( v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations); } - *locations = v8::base::make_unique< - protocol::Array<protocol::Debugger::BreakLocation>>(); + *locations = + std::make_unique<protocol::Array<protocol::Debugger::BreakLocation>>(); for (size_t i = 0; i < v8Locations.size(); ++i) { std::unique_ptr<protocol::Debugger::BreakLocation> breakLocation = protocol::Debugger::BreakLocation::create() @@ -752,17 +752,19 @@ Response V8DebuggerAgentImpl::getStackTrace( std::unique_ptr<protocol::Runtime::StackTrace>* outStackTrace) { bool isOk = false; int64_t id = inStackTraceId->getId().toInteger64(&isOk); - std::pair<int64_t, int64_t> debuggerId; + if (!isOk) return Response::Error("Invalid stack trace id"); + + V8DebuggerId debuggerId; if (inStackTraceId->hasDebuggerId()) { - debuggerId = - m_debugger->debuggerIdFor(inStackTraceId->getDebuggerId(String16())); + debuggerId = V8DebuggerId(inStackTraceId->getDebuggerId(String16())); } else { debuggerId = m_debugger->debuggerIdFor(m_session->contextGroupId()); } - V8StackTraceId v8StackTraceId(id, debuggerId); - if (!isOk || v8StackTraceId.IsInvalid()) { + if (!debuggerId.isValid()) return Response::Error("Invalid stack trace id"); + + V8StackTraceId v8StackTraceId(id, debuggerId.pair()); + if (v8StackTraceId.IsInvalid()) return Response::Error("Invalid stack trace id"); - } auto stack = m_debugger->stackTraceFor(m_session->contextGroupId(), v8StackTraceId); if (!stack) { @@ -872,11 +874,10 @@ Response V8DebuggerAgentImpl::searchInContent( if (it == m_scripts.end()) return Response::Error("No script for id: " + scriptId); - *results = - v8::base::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>( - searchInTextByLinesImpl(m_session, it->second->source(0), query, - optionalCaseSensitive.fromMaybe(false), - optionalIsRegex.fromMaybe(false))); + *results = std::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>( + searchInTextByLinesImpl(m_session, it->second->source(0), query, + optionalCaseSensitive.fromMaybe(false), + optionalIsRegex.fromMaybe(false))); return Response::OK(); } @@ -961,6 +962,20 @@ Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId, return Response::OK(); } +Response V8DebuggerAgentImpl::getWasmBytecode(const String16& scriptId, + protocol::Binary* bytecode) { + if (!enabled()) return Response::Error(kDebuggerNotEnabled); + ScriptsMap::iterator it = m_scripts.find(scriptId); + if (it == m_scripts.end()) + return Response::Error("No script for id: " + scriptId); + v8::MemorySpan<const uint8_t> span; + if (!it->second->wasmBytecode().To(&span)) + return Response::Error("Script with id " + scriptId + + " is not WebAssembly"); + *bytecode = protocol::Binary::fromSpan(span.data(), span.size()); + return Response::OK(); +} + void V8DebuggerAgentImpl::pushBreakDetails( const String16& breakReason, std::unique_ptr<protocol::DictionaryValue> breakAuxData) { @@ -1040,13 +1055,7 @@ Response V8DebuggerAgentImpl::stepOut() { Response V8DebuggerAgentImpl::pauseOnAsyncCall( std::unique_ptr<protocol::Runtime::StackTraceId> inParentStackTraceId) { - bool isOk = false; - int64_t stackTraceId = inParentStackTraceId->getId().toInteger64(&isOk); - if (!isOk) { - return Response::Error("Invalid stack trace id"); - } - m_debugger->pauseOnAsyncCall(m_session->contextGroupId(), stackTraceId, - inParentStackTraceId->getDebuggerId(String16())); + // Deprecated, just return OK. return Response::OK(); } @@ -1270,11 +1279,11 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges( Response V8DebuggerAgentImpl::currentCallFrames( std::unique_ptr<Array<CallFrame>>* result) { if (!isPaused()) { - *result = v8::base::make_unique<Array<CallFrame>>(); + *result = std::make_unique<Array<CallFrame>>(); return Response::OK(); } v8::HandleScope handles(m_isolate); - *result = v8::base::make_unique<Array<CallFrame>>(); + *result = std::make_unique<Array<CallFrame>>(); auto iterator = v8::debug::StackTraceIterator::Create(m_isolate); int frameOrdinal = 0; for (; !iterator->Done(); iterator->Advance(), frameOrdinal++) { @@ -1373,28 +1382,10 @@ V8DebuggerAgentImpl::currentExternalStackTrace() { if (externalParent.IsInvalid()) return nullptr; return protocol::Runtime::StackTraceId::create() .setId(stackTraceIdToString(externalParent.id)) - .setDebuggerId(debuggerIdToString(externalParent.debugger_id)) + .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString()) .build(); } -std::unique_ptr<protocol::Runtime::StackTraceId> -V8DebuggerAgentImpl::currentScheduledAsyncCall() { - v8_inspector::V8StackTraceId scheduledAsyncCall = - m_debugger->scheduledAsyncCall(); - if (scheduledAsyncCall.IsInvalid()) return nullptr; - std::unique_ptr<protocol::Runtime::StackTraceId> asyncCallStackTrace = - protocol::Runtime::StackTraceId::create() - .setId(stackTraceIdToString(scheduledAsyncCall.id)) - .build(); - // TODO(kozyatinskiy): extract this check to IsLocal function. - if (scheduledAsyncCall.debugger_id.first || - scheduledAsyncCall.debugger_id.second) { - asyncCallStackTrace->setDebuggerId( - debuggerIdToString(scheduledAsyncCall.debugger_id)); - } - return asyncCallStackTrace; -} - bool V8DebuggerAgentImpl::isPaused() const { return m_debugger->isPausedInContextGroup(m_session->contextGroupId()); } @@ -1602,7 +1593,7 @@ void V8DebuggerAgentImpl::didPause( } } - auto hitBreakpointIds = v8::base::make_unique<Array<String16>>(); + auto hitBreakpointIds = std::make_unique<Array<String16>>(); for (const auto& id : hitBreakpoints) { auto it = m_breakpointsOnScriptRun.find(id); @@ -1655,12 +1646,11 @@ void V8DebuggerAgentImpl::didPause( std::unique_ptr<Array<CallFrame>> protocolCallFrames; Response response = currentCallFrames(&protocolCallFrames); if (!response.isSuccess()) - protocolCallFrames = v8::base::make_unique<Array<CallFrame>>(); + protocolCallFrames = std::make_unique<Array<CallFrame>>(); m_frontend.paused(std::move(protocolCallFrames), breakReason, std::move(breakAuxData), std::move(hitBreakpointIds), - currentAsyncStackTrace(), currentExternalStackTrace(), - currentScheduledAsyncCall()); + currentAsyncStackTrace(), currentExternalStackTrace()); } void V8DebuggerAgentImpl::didContinue() { diff --git a/chromium/v8/src/inspector/v8-debugger-agent-impl.h b/chromium/v8/src/inspector/v8-debugger-agent-impl.h index 0a5a169907c..e6b35b845a9 100644 --- a/chromium/v8/src/inspector/v8-debugger-agent-impl.h +++ b/chromium/v8/src/inspector/v8-debugger-agent-impl.h @@ -6,6 +6,7 @@ #define V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_ #include <deque> +#include <memory> #include <unordered_map> #include <vector> @@ -94,6 +95,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend { Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) override; Response getScriptSource(const String16& scriptId, String16* scriptSource) override; + Response getWasmBytecode(const String16& scriptId, + protocol::Binary* bytecode) override; Response pause() override; Response resume() override; Response stepOver() override; @@ -165,7 +168,6 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend { std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*); std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace(); std::unique_ptr<protocol::Runtime::StackTraceId> currentExternalStackTrace(); - std::unique_ptr<protocol::Runtime::StackTraceId> currentScheduledAsyncCall(); void setPauseOnExceptionsImpl(int); diff --git a/chromium/v8/src/inspector/v8-debugger-script.cc b/chromium/v8/src/inspector/v8-debugger-script.cc index b83eafc96a9..99511fc144e 100644 --- a/chromium/v8/src/inspector/v8-debugger-script.cc +++ b/chromium/v8/src/inspector/v8-debugger-script.cc @@ -141,6 +141,12 @@ class ActualScript : public V8DebuggerScript { static_cast<int>(pos), static_cast<int>(substringLength)); return String16(buffer.get(), substringLength); } + v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override { + v8::HandleScope scope(m_isolate); + auto script = this->script(); + if (!script->IsWasm()) return v8::Nothing<v8::MemorySpan<const uint8_t>>(); + return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode()); + } int startLine() const override { return m_startLine; } int startColumn() const override { return m_startColumn; } int endLine() const override { return m_endLine; } @@ -281,9 +287,8 @@ class ActualScript : public V8DebuggerScript { m_startLine = script->LineOffset(); m_startColumn = script->ColumnOffset(); std::vector<int> lineEnds = script->LineEnds(); - CHECK(lineEnds.size()); - int source_length = lineEnds[lineEnds.size() - 1]; if (lineEnds.size()) { + int source_length = lineEnds[lineEnds.size() - 1]; m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1; if (lineEnds.size() > 1) { m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1; @@ -356,6 +361,9 @@ class WasmVirtualScript : public V8DebuggerScript { return m_wasmTranslation->GetSource(m_id, m_functionIndex) .substring(pos, len); } + v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const override { + return v8::Nothing<v8::MemorySpan<const uint8_t>>(); + } int startLine() const override { return m_wasmTranslation->GetStartLine(m_id, m_functionIndex); } @@ -462,17 +470,17 @@ class WasmVirtualScript : public V8DebuggerScript { std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create( v8::Isolate* isolate, v8::Local<v8::debug::Script> scriptObj, bool isLiveEdit, V8DebuggerAgentImpl* agent, V8InspectorClient* client) { - return v8::base::make_unique<ActualScript>(isolate, scriptObj, isLiveEdit, - agent, client); + return std::make_unique<ActualScript>(isolate, scriptObj, isLiveEdit, agent, + client); } std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm( v8::Isolate* isolate, WasmTranslation* wasmTranslation, v8::Local<v8::debug::WasmScript> underlyingScript, String16 id, String16 url, int functionIndex) { - return v8::base::make_unique<WasmVirtualScript>( - isolate, wasmTranslation, underlyingScript, std::move(id), std::move(url), - functionIndex); + return std::make_unique<WasmVirtualScript>(isolate, wasmTranslation, + underlyingScript, std::move(id), + std::move(url), functionIndex); } V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id, diff --git a/chromium/v8/src/inspector/v8-debugger-script.h b/chromium/v8/src/inspector/v8-debugger-script.h index 547bb0a2ccb..b53d2c15aa8 100644 --- a/chromium/v8/src/inspector/v8-debugger-script.h +++ b/chromium/v8/src/inspector/v8-debugger-script.h @@ -30,6 +30,8 @@ #ifndef V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_ #define V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_ +#include <memory> + #include "src/base/macros.h" #include "src/inspector/string-16.h" #include "src/inspector/string-util.h" @@ -61,6 +63,7 @@ class V8DebuggerScript { virtual const String16& sourceMappingURL() const = 0; virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0; + virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0; virtual const String16& hash() const = 0; virtual int startLine() const = 0; virtual int startColumn() const = 0; diff --git a/chromium/v8/src/inspector/v8-debugger.cc b/chromium/v8/src/inspector/v8-debugger.cc index 5ddc375a80c..bd127b2c1ce 100644 --- a/chromium/v8/src/inspector/v8-debugger.cc +++ b/chromium/v8/src/inspector/v8-debugger.cc @@ -64,6 +64,42 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate { } // namespace +V8DebuggerId::V8DebuggerId(std::pair<int64_t, int64_t> pair) + : m_first(pair.first), m_second(pair.second) {} + +// static +V8DebuggerId V8DebuggerId::generate(v8::Isolate* isolate) { + V8DebuggerId debuggerId; + debuggerId.m_first = v8::debug::GetNextRandomInt64(isolate); + debuggerId.m_second = v8::debug::GetNextRandomInt64(isolate); + if (!debuggerId.m_first && !debuggerId.m_second) ++debuggerId.m_first; + return debuggerId; +} + +V8DebuggerId::V8DebuggerId(const String16& debuggerId) { + const UChar dot = '.'; + size_t pos = debuggerId.find(dot); + if (pos == String16::kNotFound) return; + bool ok = false; + int64_t first = debuggerId.substring(0, pos).toInteger64(&ok); + if (!ok) return; + int64_t second = debuggerId.substring(pos + 1).toInteger64(&ok); + if (!ok) return; + m_first = first; + m_second = second; +} + +String16 V8DebuggerId::toString() const { + return String16::fromInteger64(m_first) + "." + + String16::fromInteger64(m_second); +} + +bool V8DebuggerId::isValid() const { return m_first || m_second; } + +std::pair<int64_t, int64_t> V8DebuggerId::pair() const { + return std::make_pair(m_first, m_second); +} + V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector) : m_isolate(isolate), m_inspector(inspector), @@ -107,7 +143,9 @@ void V8Debugger::disable() { if (--m_enableCount) return; clearContinueToLocation(); m_taskWithScheduledBreak = nullptr; - m_taskWithScheduledBreakDebuggerId = String16(); + m_externalAsyncTaskPauseRequested = false; + m_taskWithScheduledBreakPauseRequested = false; + m_pauseOnNextCallRequested = false; m_pauseOnAsyncCall = false; m_wasmTranslation.Clear(); v8::debug::SetDebugDelegate(m_isolate, nullptr); @@ -171,12 +209,19 @@ void V8Debugger::setPauseOnNextCall(bool pause, int targetContextGroupId) { m_targetContextGroupId != targetContextGroupId) { return; } - m_targetContextGroupId = targetContextGroupId; - m_breakRequested = pause; - if (pause) - v8::debug::SetBreakOnNextFunctionCall(m_isolate); - else - v8::debug::ClearBreakOnNextFunctionCall(m_isolate); + if (pause) { + bool didHaveBreak = hasScheduledBreakOnNextFunctionCall(); + m_pauseOnNextCallRequested = true; + if (!didHaveBreak) { + m_targetContextGroupId = targetContextGroupId; + v8::debug::SetBreakOnNextFunctionCall(m_isolate); + } + } else { + m_pauseOnNextCallRequested = false; + if (!hasScheduledBreakOnNextFunctionCall()) { + v8::debug::ClearBreakOnNextFunctionCall(m_isolate); + } + } } bool V8Debugger::canBreakProgram() { @@ -275,21 +320,12 @@ bool V8Debugger::asyncStepOutOfFunction(int targetContextGroupId, void* parentTask = std::shared_ptr<AsyncStackTrace>(parent)->suspendedTaskId(); if (!parentTask) return false; - pauseOnAsyncCall(targetContextGroupId, - reinterpret_cast<uintptr_t>(parentTask), String16()); + m_targetContextGroupId = targetContextGroupId; + m_taskWithScheduledBreak = parentTask; continueProgram(targetContextGroupId); return true; } -void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task, - const String16& debuggerId) { - DCHECK(targetContextGroupId); - m_targetContextGroupId = targetContextGroupId; - - m_taskWithScheduledBreak = reinterpret_cast<void*>(task); - m_taskWithScheduledBreakDebuggerId = debuggerId; -} - void V8Debugger::terminateExecution( std::unique_ptr<TerminateExecutionCallback> callback) { if (m_terminateExecutionCallback) { @@ -390,10 +426,11 @@ void V8Debugger::handleProgramBreak( return; } m_targetContextGroupId = 0; - m_breakRequested = false; + m_pauseOnNextCallRequested = false; m_pauseOnAsyncCall = false; m_taskWithScheduledBreak = nullptr; - m_taskWithScheduledBreakDebuggerId = String16(); + m_externalAsyncTaskPauseRequested = false; + m_taskWithScheduledBreakPauseRequested = false; bool scheduledOOMBreak = m_scheduledOOMBreak; bool scheduledAssertBreak = m_scheduledAssertBreak; @@ -470,31 +507,30 @@ size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit, void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script, bool is_live_edited, bool has_compile_error) { + if (m_ignoreScriptParsedEventsCounter != 0) return; + int contextId; if (!script->ContextId().To(&contextId)) return; - if (script->IsWasm() && script->SourceMappingURL().IsEmpty()) { - WasmTranslation* wasmTranslation = &m_wasmTranslation; - m_inspector->forEachSession( - m_inspector->contextGroupId(contextId), - [&script, &wasmTranslation](V8InspectorSessionImpl* session) { - if (!session->debuggerAgent()->enabled()) return; - wasmTranslation->AddScript(script.As<v8::debug::WasmScript>(), - session->debuggerAgent()); - }); - } else if (m_ignoreScriptParsedEventsCounter == 0) { - v8::Isolate* isolate = m_isolate; - V8InspectorClient* client = m_inspector->client(); - m_inspector->forEachSession( - m_inspector->contextGroupId(contextId), - [&isolate, &script, &has_compile_error, &is_live_edited, - &client](V8InspectorSessionImpl* session) { - if (!session->debuggerAgent()->enabled()) return; - session->debuggerAgent()->didParseSource( - V8DebuggerScript::Create(isolate, script, is_live_edited, - session->debuggerAgent(), client), + + v8::Isolate* isolate = m_isolate; + V8InspectorClient* client = m_inspector->client(); + WasmTranslation& wasmTranslation = m_wasmTranslation; + + m_inspector->forEachSession( + m_inspector->contextGroupId(contextId), + [isolate, &script, has_compile_error, is_live_edited, client, + &wasmTranslation](V8InspectorSessionImpl* session) { + auto agent = session->debuggerAgent(); + if (!agent->enabled()) return; + if (script->IsWasm() && script->SourceMappingURL().IsEmpty()) { + wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent); + } else { + agent->didParseSource( + V8DebuggerScript::Create(isolate, script, is_live_edited, agent, + client), !has_compile_error); - }); - } + } + }); } void V8Debugger::BreakProgramRequested( @@ -540,15 +576,15 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type, switch (type) { case v8::debug::kDebugPromiseThen: asyncTaskScheduledForStack("Promise.then", task, false); - if (!isBlackboxed) asyncTaskCandidateForStepping(task, true); + if (!isBlackboxed) asyncTaskCandidateForStepping(task); break; case v8::debug::kDebugPromiseCatch: asyncTaskScheduledForStack("Promise.catch", task, false); - if (!isBlackboxed) asyncTaskCandidateForStepping(task, true); + if (!isBlackboxed) asyncTaskCandidateForStepping(task); break; case v8::debug::kDebugPromiseFinally: asyncTaskScheduledForStack("Promise.finally", task, false); - if (!isBlackboxed) asyncTaskCandidateForStepping(task, true); + if (!isBlackboxed) asyncTaskCandidateForStepping(task); break; case v8::debug::kDebugWillHandle: asyncTaskStartedForStack(task); @@ -786,7 +822,7 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) { std::shared_ptr<AsyncStackTrace> V8Debugger::stackTraceFor( int contextGroupId, const V8StackTraceId& id) { - if (debuggerIdFor(contextGroupId) != id.debugger_id) return nullptr; + if (debuggerIdFor(contextGroupId).pair() != id.debugger_id) return nullptr; auto it = m_storedStackTraces.find(id.id); if (it == m_storedStackTraces.end()) return nullptr; return it->second.lock(); @@ -811,9 +847,13 @@ V8StackTraceId V8Debugger::storeCurrentStackTrace( ++m_asyncStacksCount; collectOldAsyncStacksIfNeeded(); - asyncTaskCandidateForStepping(reinterpret_cast<void*>(id), false); - - return V8StackTraceId(id, debuggerIdFor(contextGroupId)); + bool shouldPause = + m_pauseOnAsyncCall && contextGroupId == m_targetContextGroupId; + if (shouldPause) { + m_pauseOnAsyncCall = false; + v8::debug::ClearStepping(m_isolate); // Cancel step into. + } + return V8StackTraceId(id, debuggerIdFor(contextGroupId).pair(), shouldPause); } uintptr_t V8Debugger::storeStackTrace( @@ -829,13 +869,12 @@ void V8Debugger::externalAsyncTaskStarted(const V8StackTraceId& parent) { m_currentAsyncParent.emplace_back(); m_currentTasks.push_back(reinterpret_cast<void*>(parent.id)); - if (m_breakRequested) return; - if (!m_taskWithScheduledBreakDebuggerId.isEmpty() && - reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) == parent.id && - m_taskWithScheduledBreakDebuggerId == - debuggerIdToString(parent.debugger_id)) { - v8::debug::SetBreakOnNextFunctionCall(m_isolate); - } + if (!parent.should_pause) return; + bool didHaveBreak = hasScheduledBreakOnNextFunctionCall(); + m_externalAsyncTaskPauseRequested = true; + if (didHaveBreak) return; + m_targetContextGroupId = currentContextGroupId(); + v8::debug::SetBreakOnNextFunctionCall(m_isolate); } void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) { @@ -845,22 +884,16 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) { DCHECK(m_currentTasks.back() == reinterpret_cast<void*>(parent.id)); m_currentTasks.pop_back(); - if (m_taskWithScheduledBreakDebuggerId.isEmpty() || - reinterpret_cast<uintptr_t>(m_taskWithScheduledBreak) != parent.id || - m_taskWithScheduledBreakDebuggerId != - debuggerIdToString(parent.debugger_id)) { - return; - } - m_taskWithScheduledBreak = nullptr; - m_taskWithScheduledBreakDebuggerId = String16(); - if (m_breakRequested) return; + if (!parent.should_pause) return; + m_externalAsyncTaskPauseRequested = false; + if (hasScheduledBreakOnNextFunctionCall()) return; v8::debug::ClearBreakOnNextFunctionCall(m_isolate); } void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task, bool recurring) { asyncTaskScheduledForStack(toString16(taskName), task, recurring); - asyncTaskCandidateForStepping(task, true); + asyncTaskCandidateForStepping(task); } void V8Debugger::asyncTaskCanceled(void* task) { @@ -936,46 +969,36 @@ void V8Debugger::asyncTaskFinishedForStack(void* task) { } } -void V8Debugger::asyncTaskCandidateForStepping(void* task, bool isLocal) { +void V8Debugger::asyncTaskCandidateForStepping(void* task) { if (!m_pauseOnAsyncCall) return; int contextGroupId = currentContextGroupId(); if (contextGroupId != m_targetContextGroupId) return; - if (isLocal) { - m_scheduledAsyncCall = v8_inspector::V8StackTraceId( - reinterpret_cast<uintptr_t>(task), std::make_pair(0, 0)); - } else { - m_scheduledAsyncCall = v8_inspector::V8StackTraceId( - reinterpret_cast<uintptr_t>(task), debuggerIdFor(contextGroupId)); - } - breakProgram(m_targetContextGroupId); - m_scheduledAsyncCall = v8_inspector::V8StackTraceId(); + m_taskWithScheduledBreak = task; + m_pauseOnAsyncCall = false; + v8::debug::ClearStepping(m_isolate); // Cancel step into. } void V8Debugger::asyncTaskStartedForStepping(void* task) { - if (m_breakRequested) return; // TODO(kozyatinskiy): we should search task in async chain to support // blackboxing. - if (m_taskWithScheduledBreakDebuggerId.isEmpty() && - task == m_taskWithScheduledBreak) { - v8::debug::SetBreakOnNextFunctionCall(m_isolate); - } + if (task != m_taskWithScheduledBreak) return; + bool didHaveBreak = hasScheduledBreakOnNextFunctionCall(); + m_taskWithScheduledBreakPauseRequested = true; + if (didHaveBreak) return; + m_targetContextGroupId = currentContextGroupId(); + v8::debug::SetBreakOnNextFunctionCall(m_isolate); } void V8Debugger::asyncTaskFinishedForStepping(void* task) { - if (!m_taskWithScheduledBreakDebuggerId.isEmpty() || - task != m_taskWithScheduledBreak) { - return; - } + if (task != m_taskWithScheduledBreak) return; m_taskWithScheduledBreak = nullptr; - if (m_breakRequested) return; + m_taskWithScheduledBreakPauseRequested = false; + if (hasScheduledBreakOnNextFunctionCall()) return; v8::debug::ClearBreakOnNextFunctionCall(m_isolate); } void V8Debugger::asyncTaskCanceledForStepping(void* task) { - if (!m_taskWithScheduledBreakDebuggerId.isEmpty() || - task != m_taskWithScheduledBreak) - return; - m_taskWithScheduledBreak = nullptr; + asyncTaskFinishedForStepping(task); } void V8Debugger::allAsyncTasksCanceled() { @@ -1058,7 +1081,7 @@ std::shared_ptr<StackFrame> V8Debugger::symbolize( return std::shared_ptr<StackFrame>(it->second); } std::shared_ptr<StackFrame> frame(new StackFrame(isolate(), v8Frame)); - // TODO(clemensh): Figure out a way to do this translation only right before + // TODO(clemensb): Figure out a way to do this translation only right before // sending the stack trace over wire. if (v8Frame->IsWasm()) frame->translate(&m_wasmTranslation); if (m_maxAsyncCallStackDepth) { @@ -1073,27 +1096,15 @@ void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) { m_maxAsyncCallStacks = limit; } -std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor(int contextGroupId) { +V8DebuggerId V8Debugger::debuggerIdFor(int contextGroupId) { auto it = m_contextGroupIdToDebuggerId.find(contextGroupId); if (it != m_contextGroupIdToDebuggerId.end()) return it->second; - std::pair<int64_t, int64_t> debuggerId( - v8::debug::GetNextRandomInt64(m_isolate), - v8::debug::GetNextRandomInt64(m_isolate)); - if (!debuggerId.first && !debuggerId.second) ++debuggerId.first; + V8DebuggerId debuggerId = V8DebuggerId::generate(m_isolate); m_contextGroupIdToDebuggerId.insert( it, std::make_pair(contextGroupId, debuggerId)); - m_serializedDebuggerIdToDebuggerId.insert( - std::make_pair(debuggerIdToString(debuggerId), debuggerId)); return debuggerId; } -std::pair<int64_t, int64_t> V8Debugger::debuggerIdFor( - const String16& serializedDebuggerId) { - auto it = m_serializedDebuggerIdToDebuggerId.find(serializedDebuggerId); - if (it != m_serializedDebuggerIdToDebuggerId.end()) return it->second; - return std::make_pair(0, 0); -} - bool V8Debugger::addInternalObject(v8::Local<v8::Context> context, v8::Local<v8::Object> object, V8InternalValueType type) { @@ -1110,4 +1121,9 @@ void V8Debugger::dumpAsyncTaskStacksStateForTest() { fprintf(stdout, "\n"); } +bool V8Debugger::hasScheduledBreakOnNextFunctionCall() const { + return m_pauseOnNextCallRequested || m_taskWithScheduledBreakPauseRequested || + m_externalAsyncTaskPauseRequested; +} + } // namespace v8_inspector diff --git a/chromium/v8/src/inspector/v8-debugger.h b/chromium/v8/src/inspector/v8-debugger.h index ba64c4c0326..a078d14f3d2 100644 --- a/chromium/v8/src/inspector/v8-debugger.h +++ b/chromium/v8/src/inspector/v8-debugger.h @@ -6,6 +6,7 @@ #define V8_INSPECTOR_V8_DEBUGGER_H_ #include <list> +#include <memory> #include <unordered_map> #include <unordered_set> #include <vector> @@ -36,6 +37,31 @@ using protocol::Response; using TerminateExecutionCallback = protocol::Runtime::Backend::TerminateExecutionCallback; +// This debugger id tries to be unique by generating two random +// numbers, which should most likely avoid collisions. +// Debugger id has a 1:1 mapping to context group. It is used to +// attribute stack traces to a particular debugging, when doing any +// cross-debugger operations (e.g. async step in). +// See also Runtime.UniqueDebuggerId in the protocol. +class V8DebuggerId { + public: + V8DebuggerId() = default; + explicit V8DebuggerId(std::pair<int64_t, int64_t>); + explicit V8DebuggerId(const String16&); + V8DebuggerId(const V8DebuggerId&) V8_NOEXCEPT = default; + ~V8DebuggerId() = default; + + static V8DebuggerId generate(v8::Isolate*); + + String16 toString() const; + bool isValid() const; + std::pair<int64_t, int64_t> pair() const; + + private: + int64_t m_first = 0; + int64_t m_second = 0; +}; + class V8Debugger : public v8::debug::DebugDelegate, public v8::debug::AsyncEventDelegate { public: @@ -59,8 +85,6 @@ class V8Debugger : public v8::debug::DebugDelegate, void stepIntoStatement(int targetContextGroupId, bool breakOnAsyncCall); void stepOverStatement(int targetContextGroupId); void stepOutOfFunction(int targetContextGroupId); - void pauseOnAsyncCall(int targetContextGroupId, uintptr_t task, - const String16& debuggerId); void terminateExecution(std::unique_ptr<TerminateExecutionCallback> callback); @@ -121,13 +145,7 @@ class V8Debugger : public v8::debug::DebugDelegate, void setMaxAsyncTaskStacksForTest(int limit); void dumpAsyncTaskStacksStateForTest(); - v8_inspector::V8StackTraceId scheduledAsyncCall() { - return m_scheduledAsyncCall; - } - - std::pair<int64_t, int64_t> debuggerIdFor(int contextGroupId); - std::pair<int64_t, int64_t> debuggerIdFor( - const String16& serializedDebuggerId); + V8DebuggerId debuggerIdFor(int contextGroupId); std::shared_ptr<AsyncStackTrace> stackTraceFor(int contextGroupId, const V8StackTraceId& id); @@ -173,7 +191,7 @@ class V8Debugger : public v8::debug::DebugDelegate, void asyncTaskStartedForStack(void* task); void asyncTaskFinishedForStack(void* task); - void asyncTaskCandidateForStepping(void* task, bool isLocal); + void asyncTaskCandidateForStepping(void* task); void asyncTaskStartedForStepping(void* task); void asyncTaskFinishedForStepping(void* task); void asyncTaskCanceledForStepping(void* task); @@ -197,6 +215,8 @@ class V8Debugger : public v8::debug::DebugDelegate, int currentContextGroupId(); bool asyncStepOutOfFunction(int targetContextGroupId, bool onlyAtReturn); + bool hasScheduledBreakOnNextFunctionCall() const; + v8::Isolate* m_isolate; V8InspectorImpl* m_inspector; int m_enableCount; @@ -233,23 +253,24 @@ class V8Debugger : public v8::debug::DebugDelegate, std::unordered_map<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap; void* m_taskWithScheduledBreak = nullptr; - String16 m_taskWithScheduledBreakDebuggerId; - bool m_breakRequested = false; + // If any of the following three is true, we schedule pause on next JS + // execution using SetBreakOnNextFunctionCall. + bool m_externalAsyncTaskPauseRequested = false; // External async task. + bool m_taskWithScheduledBreakPauseRequested = false; // Local async task. + bool m_pauseOnNextCallRequested = false; // setPauseOnNextCall API call. v8::debug::ExceptionBreakState m_pauseOnExceptionsState; + // Whether we should pause on async call execution (if any) while stepping in. + // See Debugger.stepInto for details. bool m_pauseOnAsyncCall = false; - v8_inspector::V8StackTraceId m_scheduledAsyncCall; using StackTraceIdToStackTrace = std::unordered_map<uintptr_t, std::weak_ptr<AsyncStackTrace>>; StackTraceIdToStackTrace m_storedStackTraces; uintptr_t m_lastStackTraceId = 0; - std::unordered_map<int, std::pair<int64_t, int64_t>> - m_contextGroupIdToDebuggerId; - std::unordered_map<String16, std::pair<int64_t, int64_t>> - m_serializedDebuggerIdToDebuggerId; + std::unordered_map<int, V8DebuggerId> m_contextGroupIdToDebuggerId; std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback; diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc index fcee8a6ef3d..02aa1ad9feb 100644 --- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc +++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc @@ -4,7 +4,6 @@ #include "src/inspector/v8-heap-profiler-agent-impl.h" -#include "src/base/template-utils.h" #include "src/inspector/injected-script.h" #include "src/inspector/inspected-context.h" #include "src/inspector/protocol/Protocol.h" @@ -128,7 +127,7 @@ class HeapStatsStream final : public v8::OutputStream { WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData, int count) override { DCHECK_GT(count, 0); - auto statsDiff = v8::base::make_unique<protocol::Array<int>>(); + auto statsDiff = std::make_unique<protocol::Array<int>>(); for (int i = 0; i < count; ++i) { statsDiff->emplace_back(updateData[i].index); statsDiff->emplace_back(updateData[i].count); @@ -337,7 +336,7 @@ namespace { std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode> buildSampingHeapProfileNode(v8::Isolate* isolate, const v8::AllocationProfile::Node* node) { - auto children = v8::base::make_unique< + auto children = std::make_unique< protocol::Array<protocol::HeapProfiler::SamplingHeapProfileNode>>(); for (const auto* child : node->children) children->emplace_back(buildSampingHeapProfileNode(isolate, child)); @@ -384,7 +383,7 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile( if (!v8Profile) return Response::Error("V8 sampling heap profiler was not started."); v8::AllocationProfile::Node* root = v8Profile->GetRootNode(); - auto samples = v8::base::make_unique< + auto samples = std::make_unique< protocol::Array<protocol::HeapProfiler::SamplingHeapProfileSample>>(); for (const auto& sample : v8Profile->GetSamples()) { samples->emplace_back( diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h index 5c2107d5739..665e30be945 100644 --- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h +++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h @@ -5,6 +5,8 @@ #ifndef V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_ #define V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_ +#include <memory> + #include "src/base/macros.h" #include "src/inspector/protocol/Forward.h" #include "src/inspector/protocol/HeapProfiler.h" diff --git a/chromium/v8/src/inspector/v8-inspector-impl.cc b/chromium/v8/src/inspector/v8-inspector-impl.cc index b7641180796..e91dd7f7f46 100644 --- a/chromium/v8/src/inspector/v8-inspector-impl.cc +++ b/chromium/v8/src/inspector/v8-inspector-impl.cc @@ -439,7 +439,7 @@ protocol::Response V8InspectorImpl::EvaluateScope::setTimeout(double timeout) { } m_cancelToken.reset(new CancelToken()); v8::debug::GetCurrentPlatform()->CallDelayedOnWorkerThread( - v8::base::make_unique<TerminateTask>(m_isolate, m_cancelToken), timeout); + std::make_unique<TerminateTask>(m_isolate, m_cancelToken), timeout); return protocol::Response::OK(); } diff --git a/chromium/v8/src/inspector/v8-inspector-impl.h b/chromium/v8/src/inspector/v8-inspector-impl.h index 5b89cb09209..6276d6d7f6f 100644 --- a/chromium/v8/src/inspector/v8-inspector-impl.h +++ b/chromium/v8/src/inspector/v8-inspector-impl.h @@ -33,6 +33,7 @@ #include <functional> #include <map> +#include <memory> #include <unordered_map> #include "src/base/macros.h" diff --git a/chromium/v8/src/inspector/v8-inspector-session-impl.h b/chromium/v8/src/inspector/v8-inspector-session-impl.h index 7a976bcd40d..786dc2a048b 100644 --- a/chromium/v8/src/inspector/v8-inspector-session-impl.h +++ b/chromium/v8/src/inspector/v8-inspector-session-impl.h @@ -5,6 +5,7 @@ #ifndef V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_ #define V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_ +#include <memory> #include <vector> #include "src/base/macros.h" diff --git a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc index 3b02f7faa15..286a18a673c 100644 --- a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc +++ b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc @@ -44,8 +44,8 @@ std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>> buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) { unsigned lineCount = node->GetHitLineCount(); if (!lineCount) return nullptr; - auto array = v8::base::make_unique< - protocol::Array<protocol::Profiler::PositionTickInfo>>(); + auto array = + std::make_unique<protocol::Array<protocol::Profiler::PositionTickInfo>>(); std::vector<v8::CpuProfileNode::LineTick> entries(lineCount); if (node->GetLineTicks(&entries[0], lineCount)) { for (unsigned i = 0; i < lineCount; i++) { @@ -80,7 +80,7 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor( const int childrenCount = node->GetChildrenCount(); if (childrenCount) { - auto children = v8::base::make_unique<protocol::Array<int>>(); + auto children = std::make_unique<protocol::Array<int>>(); for (int i = 0; i < childrenCount; i++) children->emplace_back(node->GetChild(i)->GetNodeId()); result->setChildren(std::move(children)); @@ -98,7 +98,7 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor( std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples( v8::CpuProfile* v8profile) { - auto array = v8::base::make_unique<protocol::Array<int>>(); + auto array = std::make_unique<protocol::Array<int>>(); int count = v8profile->GetSamplesCount(); for (int i = 0; i < count; i++) array->emplace_back(v8profile->GetSample(i)->GetNodeId()); @@ -107,7 +107,7 @@ std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples( std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps( v8::CpuProfile* v8profile) { - auto array = v8::base::make_unique<protocol::Array<int>>(); + auto array = std::make_unique<protocol::Array<int>>(); int count = v8profile->GetSamplesCount(); uint64_t lastTime = v8profile->GetStartTime(); for (int i = 0; i < count; i++) { @@ -130,7 +130,7 @@ void flattenNodesTree(V8InspectorImpl* inspector, std::unique_ptr<protocol::Profiler::Profile> createCPUProfile( V8InspectorImpl* inspector, v8::CpuProfile* v8profile) { auto nodes = - v8::base::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>(); + std::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>(); flattenNodesTree(inspector, v8profile->GetTopDownRoot(), nodes.get()); return protocol::Profiler::Profile::create() .setNodes(std::move(nodes)) @@ -338,18 +338,18 @@ Response coverageToProtocol( V8InspectorImpl* inspector, const v8::debug::Coverage& coverage, std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>* out_result) { - auto result = v8::base::make_unique< - protocol::Array<protocol::Profiler::ScriptCoverage>>(); + auto result = + std::make_unique<protocol::Array<protocol::Profiler::ScriptCoverage>>(); v8::Isolate* isolate = inspector->isolate(); for (size_t i = 0; i < coverage.ScriptCount(); i++) { v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i); v8::Local<v8::debug::Script> script = script_data.GetScript(); - auto functions = v8::base::make_unique< + auto functions = std::make_unique< protocol::Array<protocol::Profiler::FunctionCoverage>>(); for (size_t j = 0; j < script_data.FunctionCount(); j++) { v8::debug::Coverage::FunctionData function_data = script_data.GetFunctionData(j); - auto ranges = v8::base::make_unique< + auto ranges = std::make_unique< protocol::Array<protocol::Profiler::CoverageRange>>(); // Add function range. @@ -418,19 +418,19 @@ namespace { std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>> typeProfileToProtocol(V8InspectorImpl* inspector, const v8::debug::TypeProfile& type_profile) { - auto result = v8::base::make_unique< + auto result = std::make_unique< protocol::Array<protocol::Profiler::ScriptTypeProfile>>(); v8::Isolate* isolate = inspector->isolate(); for (size_t i = 0; i < type_profile.ScriptCount(); i++) { v8::debug::TypeProfile::ScriptData script_data = type_profile.GetScriptData(i); v8::Local<v8::debug::Script> script = script_data.GetScript(); - auto entries = v8::base::make_unique< + auto entries = std::make_unique< protocol::Array<protocol::Profiler::TypeProfileEntry>>(); for (const auto& entry : script_data.Entries()) { - auto types = v8::base::make_unique< - protocol::Array<protocol::Profiler::TypeObject>>(); + auto types = + std::make_unique<protocol::Array<protocol::Profiler::TypeObject>>(); for (const auto& type : entry.Types()) { types->emplace_back( protocol::Profiler::TypeObject::create() diff --git a/chromium/v8/src/inspector/v8-profiler-agent-impl.h b/chromium/v8/src/inspector/v8-profiler-agent-impl.h index 5370d39eb48..832d2ce139f 100644 --- a/chromium/v8/src/inspector/v8-profiler-agent-impl.h +++ b/chromium/v8/src/inspector/v8-profiler-agent-impl.h @@ -5,6 +5,7 @@ #ifndef V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_ #define V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_ +#include <memory> #include <vector> #include "src/base/macros.h" diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc index a8aee0b7f36..4dfc210edc4 100644 --- a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc +++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc @@ -235,7 +235,8 @@ void V8RuntimeAgentImpl::evaluate( Maybe<int> executionContextId, Maybe<bool> returnByValue, Maybe<bool> generatePreview, Maybe<bool> userGesture, Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect, - Maybe<double> timeout, std::unique_ptr<EvaluateCallback> callback) { + Maybe<double> timeout, Maybe<bool> disableBreaks, + std::unique_ptr<EvaluateCallback> callback) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "EvaluateScript"); int contextId = 0; @@ -272,9 +273,16 @@ void V8RuntimeAgentImpl::evaluate( } v8::MicrotasksScope microtasksScope(m_inspector->isolate(), v8::MicrotasksScope::kRunMicrotasks); + v8::debug::EvaluateGlobalMode mode = + v8::debug::EvaluateGlobalMode::kDefault; + if (throwOnSideEffect.fromMaybe(false)) { + mode = v8::debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect; + } else if (disableBreaks.fromMaybe(false)) { + mode = v8::debug::EvaluateGlobalMode::kDisableBreaks; + } maybeResultValue = v8::debug::EvaluateGlobal( m_inspector->isolate(), toV8String(m_inspector->isolate(), expression), - throwOnSideEffect.fromMaybe(false)); + mode); } // Run microtasks before returning result. // Re-initialize after running client's code, as it could have destroyed @@ -613,7 +621,7 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames( v8::PersistentValueVector<v8::String> names(m_inspector->isolate()); v8::debug::GlobalLexicalScopeNames(scope.context(), &names); - *outNames = v8::base::make_unique<protocol::Array<String16>>(); + *outNames = std::make_unique<protocol::Array<String16>>(); for (size_t i = 0; i < names.Size(); ++i) { (*outNames)->emplace_back( toProtocolString(m_inspector->isolate(), names.Get(i))); diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.h b/chromium/v8/src/inspector/v8-runtime-agent-impl.h index a2002e36609..7ecbafd6116 100644 --- a/chromium/v8/src/inspector/v8-runtime-agent-impl.h +++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.h @@ -31,6 +31,7 @@ #ifndef V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_ #define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_ +#include <memory> #include <unordered_map> #include "src/base/macros.h" @@ -66,7 +67,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend { Maybe<int> executionContextId, Maybe<bool> returnByValue, Maybe<bool> generatePreview, Maybe<bool> userGesture, Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect, - Maybe<double> timeout, + Maybe<double> timeout, Maybe<bool> disableBreaks, std::unique_ptr<EvaluateCallback>) override; void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue, Maybe<bool> generatePreview, diff --git a/chromium/v8/src/inspector/v8-schema-agent-impl.cc b/chromium/v8/src/inspector/v8-schema-agent-impl.cc index 808f59b0bfb..ae19416d1f2 100644 --- a/chromium/v8/src/inspector/v8-schema-agent-impl.cc +++ b/chromium/v8/src/inspector/v8-schema-agent-impl.cc @@ -4,7 +4,6 @@ #include "src/inspector/v8-schema-agent-impl.h" -#include "src/base/template-utils.h" #include "src/inspector/protocol/Protocol.h" #include "src/inspector/v8-inspector-session-impl.h" @@ -19,9 +18,9 @@ V8SchemaAgentImpl::~V8SchemaAgentImpl() = default; Response V8SchemaAgentImpl::getDomains( std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) { - *result = v8::base::make_unique< - std::vector<std::unique_ptr<protocol::Schema::Domain>>>( - m_session->supportedDomainsImpl()); + *result = + std::make_unique<std::vector<std::unique_ptr<protocol::Schema::Domain>>>( + m_session->supportedDomainsImpl()); return Response::OK(); } diff --git a/chromium/v8/src/inspector/v8-schema-agent-impl.h b/chromium/v8/src/inspector/v8-schema-agent-impl.h index b96cce14013..1251e98bc52 100644 --- a/chromium/v8/src/inspector/v8-schema-agent-impl.h +++ b/chromium/v8/src/inspector/v8-schema-agent-impl.h @@ -5,6 +5,8 @@ #ifndef V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_ #define V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_ +#include <memory> + #include "src/base/macros.h" #include "src/inspector/protocol/Forward.h" #include "src/inspector/protocol/Schema.h" diff --git a/chromium/v8/src/inspector/v8-stack-trace-impl.cc b/chromium/v8/src/inspector/v8-stack-trace-impl.cc index e2be8110696..04feca284c5 100644 --- a/chromium/v8/src/inspector/v8-stack-trace-impl.cc +++ b/chromium/v8/src/inspector/v8-stack-trace-impl.cc @@ -6,7 +6,6 @@ #include <algorithm> -#include "src/base/template-utils.h" #include "src/inspector/v8-debugger.h" #include "src/inspector/v8-inspector-impl.h" #include "src/inspector/wasm-translation.h" @@ -17,6 +16,10 @@ int V8StackTraceImpl::maxCallStackSizeToCapture = 200; namespace { +static const char kId[] = "id"; +static const char kDebuggerId[] = "debuggerId"; +static const char kShouldPause[] = "shouldPause"; + static const v8::StackTrace::StackTraceOptions stackTraceOptions = static_cast<v8::StackTrace::StackTraceOptions>( v8::StackTrace::kDetailed | @@ -74,7 +77,7 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon( } auto inspectorFrames = - v8::base::make_unique<protocol::Array<protocol::Runtime::CallFrame>>(); + std::make_unique<protocol::Array<protocol::Runtime::CallFrame>>(); for (const std::shared_ptr<StackFrame>& frame : frames) { V8InspectorClient* client = nullptr; if (debugger && debugger->inspector()) @@ -102,7 +105,7 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon( stackTrace->setParentId( protocol::Runtime::StackTraceId::create() .setId(stackTraceIdToString(externalParent.id)) - .setDebuggerId(debuggerIdToString(externalParent.debugger_id)) + .setDebuggerId(V8DebuggerId(externalParent.debugger_id).toString()) .build()); } return stackTrace; @@ -110,14 +113,47 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon( } // namespace -V8StackTraceId::V8StackTraceId() : id(0), debugger_id(std::make_pair(0, 0)) {} +V8StackTraceId::V8StackTraceId() : id(0), debugger_id(V8DebuggerId().pair()) {} V8StackTraceId::V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id) : id(id), debugger_id(debugger_id) {} +V8StackTraceId::V8StackTraceId(uintptr_t id, + const std::pair<int64_t, int64_t> debugger_id, + bool should_pause) + : id(id), debugger_id(debugger_id), should_pause(should_pause) {} + +V8StackTraceId::V8StackTraceId(const StringView& json) + : id(0), debugger_id(V8DebuggerId().pair()) { + auto dict = + protocol::DictionaryValue::cast(protocol::StringUtil::parseJSON(json)); + if (!dict) return; + String16 s; + if (!dict->getString(kId, &s)) return; + bool isOk = false; + int64_t parsedId = s.toInteger64(&isOk); + if (!isOk || !parsedId) return; + if (!dict->getString(kDebuggerId, &s)) return; + V8DebuggerId debuggerId(s); + if (!debuggerId.isValid()) return; + if (!dict->getBoolean(kShouldPause, &should_pause)) return; + id = parsedId; + debugger_id = debuggerId.pair(); +} + bool V8StackTraceId::IsInvalid() const { return !id; } +std::unique_ptr<StringBuffer> V8StackTraceId::ToString() { + if (IsInvalid()) return nullptr; + auto dict = protocol::DictionaryValue::create(); + dict->setString(kId, String16::fromInteger64(id)); + dict->setString(kDebuggerId, V8DebuggerId(debugger_id).toString()); + dict->setBoolean(kShouldPause, should_pause); + String16 json = dict->toJSONString(); + return StringBufferImpl::adopt(json); +} + StackFrame::StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> v8Frame) : m_functionName(toProtocolString(isolate, v8Frame->GetFunctionName())), m_scriptId(String16::fromInteger(v8Frame->GetScriptId())), diff --git a/chromium/v8/src/inspector/value-mirror.cc b/chromium/v8/src/inspector/value-mirror.cc index 9edfbc1a212..903a5c6b020 100644 --- a/chromium/v8/src/inspector/value-mirror.cc +++ b/chromium/v8/src/inspector/value-mirror.cc @@ -372,8 +372,7 @@ class PrimitiveValueMirror final : public ValueMirror { .setType(m_type) .setDescription(descriptionForPrimitiveType(context, m_value)) .setOverflow(false) - .setProperties( - v8::base::make_unique<protocol::Array<PropertyPreview>>()) + .setProperties(std::make_unique<protocol::Array<PropertyPreview>>()) .build(); if (m_value->IsNull()) (*preview)->setSubtype(RemoteObject::SubtypeEnum::Null); @@ -438,8 +437,7 @@ class NumberMirror final : public ValueMirror { .setType(RemoteObject::TypeEnum::Number) .setDescription(description(&unserializable)) .setOverflow(false) - .setProperties( - v8::base::make_unique<protocol::Array<PropertyPreview>>()) + .setProperties(std::make_unique<protocol::Array<PropertyPreview>>()) .build(); } @@ -496,8 +494,7 @@ class BigIntMirror final : public ValueMirror { .setType(RemoteObject::TypeEnum::Bigint) .setDescription(descriptionForBigInt(context, m_value)) .setOverflow(false) - .setProperties( - v8::base::make_unique<protocol::Array<PropertyPreview>>()) + .setProperties(std::make_unique<protocol::Array<PropertyPreview>>()) .build(); } @@ -656,8 +653,7 @@ class FunctionMirror final : public ValueMirror { .setType(RemoteObject::TypeEnum::Function) .setDescription(descriptionForFunction(context, m_value)) .setOverflow(false) - .setProperties( - v8::base::make_unique<protocol::Array<PropertyPreview>>()) + .setProperties(std::make_unique<protocol::Array<PropertyPreview>>()) .build(); } @@ -939,7 +935,7 @@ class ObjectMirror final : public ValueMirror { v8::Local<v8::Context> context, bool forEntry, bool generatePreviewForTable, int* nameLimit, int* indexLimit, std::unique_ptr<ObjectPreview>* result) const { - auto properties = v8::base::make_unique<protocol::Array<PropertyPreview>>(); + auto properties = std::make_unique<protocol::Array<PropertyPreview>>(); std::unique_ptr<protocol::Array<EntryPreview>> entriesPreview; bool overflow = false; @@ -996,8 +992,7 @@ class ObjectMirror final : public ValueMirror { if (forEntry) { overflow = true; } else { - entriesPreview = - v8::base::make_unique<protocol::Array<EntryPreview>>(); + entriesPreview = std::make_unique<protocol::Array<EntryPreview>>(); for (const auto& entry : entries) { std::unique_ptr<ObjectPreview> valuePreview; entry.value->buildEntryPreview(context, nameLimit, indexLimit, @@ -1545,11 +1540,11 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context, const String16& subtype) { // TODO(alph): description and length retrieval should move to embedder. if (subtype == "node") { - return v8::base::make_unique<ObjectMirror>( - value, subtype, descriptionForNode(context, value)); + return std::make_unique<ObjectMirror>(value, subtype, + descriptionForNode(context, value)); } if (subtype == "error") { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Error, descriptionForError(context, value.As<v8::Object>(), ErrorType::kClient)); @@ -1562,14 +1557,14 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context, if (object->Get(context, toV8String(isolate, "length")) .ToLocal(&lengthValue)) { if (lengthValue->IsInt32()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Array, descriptionForCollection(isolate, object, lengthValue.As<v8::Int32>()->Value())); } } } - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, descriptionForObject(context->GetIsolate(), value.As<v8::Object>())); } @@ -1577,26 +1572,26 @@ std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context, std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context, v8::Local<v8::Value> value) { if (value->IsNull()) { - return v8::base::make_unique<PrimitiveValueMirror>( + return std::make_unique<PrimitiveValueMirror>( value, RemoteObject::TypeEnum::Object); } if (value->IsBoolean()) { - return v8::base::make_unique<PrimitiveValueMirror>( + return std::make_unique<PrimitiveValueMirror>( value, RemoteObject::TypeEnum::Boolean); } if (value->IsNumber()) { - return v8::base::make_unique<NumberMirror>(value.As<v8::Number>()); + return std::make_unique<NumberMirror>(value.As<v8::Number>()); } v8::Isolate* isolate = context->GetIsolate(); if (value->IsString()) { - return v8::base::make_unique<PrimitiveValueMirror>( + return std::make_unique<PrimitiveValueMirror>( value, RemoteObject::TypeEnum::String); } if (value->IsBigInt()) { - return v8::base::make_unique<BigIntMirror>(value.As<v8::BigInt>()); + return std::make_unique<BigIntMirror>(value.As<v8::BigInt>()); } if (value->IsSymbol()) { - return v8::base::make_unique<SymbolMirror>(value.As<v8::Symbol>()); + return std::make_unique<SymbolMirror>(value.As<v8::Symbol>()); } auto clientSubtype = (value->IsUndefined() || value->IsObject()) ? clientFor(context)->valueSubtype(value) @@ -1606,121 +1601,121 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context, return clientMirror(context, value, subtype); } if (value->IsUndefined()) { - return v8::base::make_unique<PrimitiveValueMirror>( + return std::make_unique<PrimitiveValueMirror>( value, RemoteObject::TypeEnum::Undefined); } if (value->IsRegExp()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Regexp, descriptionForRegExp(isolate, value.As<v8::RegExp>())); } if (value->IsProxy()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Proxy, "Proxy"); } if (value->IsFunction()) { - return v8::base::make_unique<FunctionMirror>(value); + return std::make_unique<FunctionMirror>(value); } if (value->IsDate()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Date, descriptionForDate(context, value.As<v8::Date>())); } if (value->IsPromise()) { v8::Local<v8::Promise> promise = value.As<v8::Promise>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( promise, RemoteObject::SubtypeEnum::Promise, descriptionForObject(isolate, promise)); } if (value->IsNativeError()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Error, descriptionForError(context, value.As<v8::Object>(), ErrorType::kNative)); } if (value->IsMap()) { v8::Local<v8::Map> map = value.As<v8::Map>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Map, descriptionForCollection(isolate, map, map->Size())); } if (value->IsSet()) { v8::Local<v8::Set> set = value.As<v8::Set>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Set, descriptionForCollection(isolate, set, set->Size())); } if (value->IsWeakMap()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Weakmap, descriptionForObject(isolate, value.As<v8::Object>())); } if (value->IsWeakSet()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Weakset, descriptionForObject(isolate, value.As<v8::Object>())); } if (value->IsMapIterator() || value->IsSetIterator()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Iterator, descriptionForObject(isolate, value.As<v8::Object>())); } if (value->IsGeneratorObject()) { v8::Local<v8::Object> object = value.As<v8::Object>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( object, RemoteObject::SubtypeEnum::Generator, descriptionForObject(isolate, object)); } if (value->IsTypedArray()) { v8::Local<v8::TypedArray> array = value.As<v8::TypedArray>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Typedarray, descriptionForCollection(isolate, array, array->Length())); } if (value->IsArrayBuffer()) { v8::Local<v8::ArrayBuffer> buffer = value.As<v8::ArrayBuffer>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Arraybuffer, descriptionForCollection(isolate, buffer, buffer->ByteLength())); } if (value->IsSharedArrayBuffer()) { v8::Local<v8::SharedArrayBuffer> buffer = value.As<v8::SharedArrayBuffer>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Arraybuffer, descriptionForCollection(isolate, buffer, buffer->ByteLength())); } if (value->IsDataView()) { v8::Local<v8::DataView> view = value.As<v8::DataView>(); - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Dataview, descriptionForCollection(isolate, view, view->ByteLength())); } V8InternalValueType internalType = v8InternalValueTypeFrom(context, v8::Local<v8::Object>::Cast(value)); if (value->IsArray() && internalType == V8InternalValueType::kScopeList) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, "internal#scopeList", descriptionForScopeList(value.As<v8::Array>())); } if (value->IsObject() && internalType == V8InternalValueType::kEntry) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, "internal#entry", descriptionForEntry(context, value.As<v8::Object>())); } if (value->IsObject() && internalType == V8InternalValueType::kScope) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, "internal#scope", descriptionForScope(context, value.As<v8::Object>())); } size_t length = 0; if (value->IsArray() || isArrayLike(context, value, &length)) { length = value->IsArray() ? value.As<v8::Array>()->Length() : length; - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, RemoteObject::SubtypeEnum::Array, descriptionForCollection(isolate, value.As<v8::Object>(), length)); } if (value->IsObject()) { - return v8::base::make_unique<ObjectMirror>( + return std::make_unique<ObjectMirror>( value, descriptionForObject(isolate, value.As<v8::Object>())); } return nullptr; diff --git a/chromium/v8/src/inspector/wasm-translation.cc b/chromium/v8/src/inspector/wasm-translation.cc index 4836a6bc4a3..5a1526d142b 100644 --- a/chromium/v8/src/inspector/wasm-translation.cc +++ b/chromium/v8/src/inspector/wasm-translation.cc @@ -67,15 +67,20 @@ class WasmTranslation::TranslatorImpl { column(column) {} }; - TranslatorImpl(v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> script) + TranslatorImpl(v8::Isolate* isolate, WasmTranslation* translation, + v8::Local<v8::debug::WasmScript> script) : script_(isolate, script) { script_.AnnotateStrongRetainer(kGlobalScriptHandleLabel); + + ForEachFunction(script, [this, translation](String16& script_id, + int func_idx) { + translation->AddFakeScript(GetFakeScriptId(script_id, func_idx), this); + }); } - void Init(v8::Isolate* isolate, WasmTranslation* translation, - V8DebuggerAgentImpl* agent) { - // Register fake scripts for each function in this wasm module/script. - v8::Local<v8::debug::WasmScript> script = script_.Get(isolate); + template <typename Callback> + void ForEachFunction(v8::Local<v8::debug::WasmScript> script, + Callback callback) { int num_functions = script->NumFunctions(); int num_imported_functions = script->NumImportedFunctions(); DCHECK_LE(0, num_imported_functions); @@ -84,10 +89,18 @@ class WasmTranslation::TranslatorImpl { String16 script_id = String16::fromInteger(script->Id()); for (int func_idx = num_imported_functions; func_idx < num_functions; ++func_idx) { - AddFakeScript(isolate, script_id, func_idx, translation, agent); + callback(script_id, func_idx); } } + void ReportFakeScripts(v8::Isolate* isolate, WasmTranslation* translation, + V8DebuggerAgentImpl* agent) { + ForEachFunction( + script_.Get(isolate), [=](String16& script_id, int func_idx) { + ReportFakeScript(isolate, script_id, func_idx, translation, agent); + }); + } + void Translate(TransLocation* loc) { const OffsetTable& offset_table = GetOffsetTable(loc); DCHECK(!offset_table.empty()); @@ -212,9 +225,10 @@ class WasmTranslation::TranslatorImpl { return GetFakeScriptId(loc->script_id, loc->line); } - void AddFakeScript(v8::Isolate* isolate, const String16& underlyingScriptId, - int func_idx, WasmTranslation* translation, - V8DebuggerAgentImpl* agent) { + void ReportFakeScript(v8::Isolate* isolate, + const String16& underlyingScriptId, int func_idx, + WasmTranslation* translation, + V8DebuggerAgentImpl* agent) { String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx); String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx); @@ -223,7 +237,6 @@ class WasmTranslation::TranslatorImpl { fake_script_id, std::move(fake_script_url), func_idx); - translation->AddFakeScript(fake_script->scriptId(), this); agent->didParseSource(std::move(fake_script), true); } @@ -254,6 +267,9 @@ class WasmTranslation::TranslatorImpl { // We assume to only disassemble a subset of the functions, so store them in a // map instead of an array. std::unordered_map<int, WasmSourceInformation> source_informations_; + + // Disallow copies, because our pointer is registered in translation. + DISALLOW_COPY_AND_ASSIGN(TranslatorImpl); }; constexpr char WasmTranslation::TranslatorImpl::kGlobalScriptHandleLabel[]; @@ -264,15 +280,11 @@ WasmTranslation::~WasmTranslation() { Clear(); } void WasmTranslation::AddScript(v8::Local<v8::debug::WasmScript> script, V8DebuggerAgentImpl* agent) { - std::unique_ptr<TranslatorImpl> impl; - impl.reset(new TranslatorImpl(isolate_, script)); - DCHECK(impl); - auto inserted = - wasm_translators_.insert(std::make_pair(script->Id(), std::move(impl))); - // Check that no mapping for this script id existed before. - DCHECK(inserted.second); - // impl has been moved, use the returned iterator to call Init. - inserted.first->second->Init(isolate_, this, agent); + auto& impl = wasm_translators_[script->Id()]; + if (impl == nullptr) { + impl = std::make_unique<TranslatorImpl>(isolate_, this, script); + } + impl->ReportFakeScripts(isolate_, this, agent); } void WasmTranslation::Clear() { diff --git a/chromium/v8/src/inspector/wasm-translation.h b/chromium/v8/src/inspector/wasm-translation.h index 2d41822e59a..a19aa852051 100644 --- a/chromium/v8/src/inspector/wasm-translation.h +++ b/chromium/v8/src/inspector/wasm-translation.h @@ -5,6 +5,7 @@ #ifndef V8_INSPECTOR_WASM_TRANSLATION_H_ #define V8_INSPECTOR_WASM_TRANSLATION_H_ +#include <memory> #include <unordered_map> #include "include/v8.h" diff --git a/chromium/v8/src/interpreter/bytecode-array-accessor.cc b/chromium/v8/src/interpreter/bytecode-array-accessor.cc index d460c1a45f7..0690e16aa9a 100644 --- a/chromium/v8/src/interpreter/bytecode-array-accessor.cc +++ b/chromium/v8/src/interpreter/bytecode-array-accessor.cc @@ -66,7 +66,7 @@ BytecodeArrayAccessor::BytecodeArrayAccessor( BytecodeArrayAccessor::BytecodeArrayAccessor( Handle<BytecodeArray> bytecode_array, int initial_offset) : BytecodeArrayAccessor( - base::make_unique<OnHeapBytecodeArray>(bytecode_array), + std::make_unique<OnHeapBytecodeArray>(bytecode_array), initial_offset) {} void BytecodeArrayAccessor::SetOffset(int offset) { diff --git a/chromium/v8/src/interpreter/bytecode-array-accessor.h b/chromium/v8/src/interpreter/bytecode-array-accessor.h index 97278af7bd0..92d0da66071 100644 --- a/chromium/v8/src/interpreter/bytecode-array-accessor.h +++ b/chromium/v8/src/interpreter/bytecode-array-accessor.h @@ -5,6 +5,8 @@ #ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_ #define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_ +#include <memory> + #include "src/base/optional.h" #include "src/common/globals.h" #include "src/handles/handles.h" diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.cc b/chromium/v8/src/interpreter/bytecode-array-builder.cc index cfc3eb36c15..1c61776cdfa 100644 --- a/chromium/v8/src/interpreter/bytecode-array-builder.cc +++ b/chromium/v8/src/interpreter/bytecode-array-builder.cc @@ -824,9 +824,16 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty( return *this; } -BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator(Register object, - int feedback_slot) { - OutputGetIterator(object, feedback_slot); +BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty( + Register object, int feedback_slot) { + size_t name_index = IteratorSymbolConstantPoolEntry(); + OutputLdaNamedProperty(object, name_index, feedback_slot); + return *this; +} + +BytecodeArrayBuilder& BytecodeArrayBuilder::GetIterator( + Register object, int load_feedback_slot, int call_feedback_slot) { + OutputGetIterator(object, load_feedback_slot, call_feedback_slot); return *this; } diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.h b/chromium/v8/src/interpreter/bytecode-array-builder.h index 06230f9270d..39cd4fa6f60 100644 --- a/chromium/v8/src/interpreter/bytecode-array-builder.h +++ b/chromium/v8/src/interpreter/bytecode-array-builder.h @@ -135,7 +135,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot); // Named load property of the @@iterator symbol. - BytecodeArrayBuilder& GetIterator(Register object, int feedback_slot); + BytecodeArrayBuilder& LoadIteratorProperty(Register object, + int feedback_slot); + + // Load and call property of the @@iterator symbol + BytecodeArrayBuilder& GetIterator(Register object, int load_feedback_slot, + int call_feedback_slot); // Named load property of the @@asyncIterator symbol. BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object, diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.h b/chromium/v8/src/interpreter/bytecode-array-iterator.h index e6b58deadc4..b992ffc0374 100644 --- a/chromium/v8/src/interpreter/bytecode-array-iterator.h +++ b/chromium/v8/src/interpreter/bytecode-array-iterator.h @@ -5,6 +5,8 @@ #ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_ #define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_ +#include <memory> + #include "src/interpreter/bytecode-array-accessor.h" namespace v8 { diff --git a/chromium/v8/src/interpreter/bytecode-array-random-iterator.h b/chromium/v8/src/interpreter/bytecode-array-random-iterator.h index a3b69b70158..68905a146cc 100644 --- a/chromium/v8/src/interpreter/bytecode-array-random-iterator.h +++ b/chromium/v8/src/interpreter/bytecode-array-random-iterator.h @@ -5,6 +5,8 @@ #ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_ #define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_ +#include <memory> + #include "src/interpreter/bytecode-array-accessor.h" #include "src/zone/zone-containers.h" #include "src/zone/zone.h" diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc index 29065d6a55a..92ae15127e4 100644 --- a/chromium/v8/src/interpreter/bytecode-generator.cc +++ b/chromium/v8/src/interpreter/bytecode-generator.cc @@ -2042,7 +2042,71 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) { VisitDeclarations(expr->scope()->declarations()); Register class_constructor = register_allocator()->NewRegister(); + // Create the class brand symbol and store it on the context during class + // evaluation. This will be stored in the instance later in the constructor. + // We do this early so that invalid access to private methods or accessors + // in computed property keys throw. + if (expr->scope()->brand() != nullptr) { + Register brand = register_allocator()->NewRegister(); + const AstRawString* class_name = + expr->scope()->class_variable() != nullptr + ? expr->scope()->class_variable()->raw_name() + : ast_string_constants()->empty_string(); + builder() + ->LoadLiteral(class_name) + .StoreAccumulatorInRegister(brand) + .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand); + BuildVariableAssignment(expr->scope()->brand(), Token::INIT, + HoleCheckMode::kElided); + } + AccessorTable<ClassLiteral::Property> private_accessors(zone()); + for (int i = 0; i < expr->private_members()->length(); i++) { + ClassLiteral::Property* property = expr->private_members()->at(i); + DCHECK(property->is_private()); + switch (property->kind()) { + case ClassLiteral::Property::FIELD: { + // Initialize the private field variables early. + // Create the private name symbols for fields during class + // evaluation and store them on the context. These will be + // used as keys later during instance or static initialization. + RegisterAllocationScope private_name_register_scope(this); + Register private_name = register_allocator()->NewRegister(); + VisitForRegisterValue(property->key(), private_name); + builder() + ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName()) + .StoreAccumulatorInRegister(private_name) + .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name); + DCHECK_NOT_NULL(property->private_name_var()); + BuildVariableAssignment(property->private_name_var(), Token::INIT, + HoleCheckMode::kElided); + break; + } + case ClassLiteral::Property::METHOD: { + // We can initialize the private methods and accessors later so that the + // home objects can be assigned right after the creation of the + // closures, and those are guarded by the brand checks. + break; + } + // Collect private accessors into a table to merge the creation of + // those closures later. + case ClassLiteral::Property::GETTER: { + Literal* key = property->key()->AsLiteral(); + DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter); + private_accessors.LookupOrInsert(key)->getter = property; + break; + } + case ClassLiteral::Property::SETTER: { + Literal* key = property->key()->AsLiteral(); + DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter); + private_accessors.LookupOrInsert(key)->setter = property; + break; + } + default: + UNREACHABLE(); + } + } + { RegisterAllocationScope register_scope(this); RegisterList args = register_allocator()->NewGrowableRegisterList(); @@ -2065,8 +2129,8 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) { .StoreAccumulatorInRegister(class_boilerplate); // Create computed names and method values nodes to store into the literal. - for (int i = 0; i < expr->properties()->length(); i++) { - ClassLiteral::Property* property = expr->properties()->at(i); + for (int i = 0; i < expr->public_members()->length(); i++) { + ClassLiteral::Property* property = expr->public_members()->at(i); if (property->is_computed_name()) { Register key = register_allocator()->GrowRegisterList(&args); @@ -2099,50 +2163,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) { } } - if (property->is_private()) { - // Assign private class member's name variables. - switch (property->kind()) { - case ClassLiteral::Property::FIELD: { - // Create the private name symbols for fields during class - // evaluation and store them on the context. These will be - // used as keys later during instance or static initialization. - RegisterAllocationScope private_name_register_scope(this); - Register private_name = register_allocator()->NewRegister(); - VisitForRegisterValue(property->key(), private_name); - builder() - ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName()) - .StoreAccumulatorInRegister(private_name) - .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name); - DCHECK_NOT_NULL(property->private_name_var()); - BuildVariableAssignment(property->private_name_var(), Token::INIT, - HoleCheckMode::kElided); - break; - } - case ClassLiteral::Property::METHOD: { - // Create the closures for private methods. - VisitForAccumulatorValue(property->value()); - BuildVariableAssignment(property->private_name_var(), Token::INIT, - HoleCheckMode::kElided); - break; - } - case ClassLiteral::Property::GETTER: { - Literal* key = property->key()->AsLiteral(); - DCHECK_NULL(private_accessors.LookupOrInsert(key)->getter); - private_accessors.LookupOrInsert(key)->getter = property; - break; - } - case ClassLiteral::Property::SETTER: { - Literal* key = property->key()->AsLiteral(); - DCHECK_NULL(private_accessors.LookupOrInsert(key)->setter); - private_accessors.LookupOrInsert(key)->setter = property; - break; - } - } - // The private fields are initialized in the initializer function and - // the private brand for the private methods are initialized in the - // constructor instead. - continue; - } + DCHECK(!property->is_private()); if (property->kind() == ClassLiteral::Property::FIELD) { // We don't compute field's value here, but instead do it in the @@ -2160,60 +2181,55 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) { builder()->StoreAccumulatorInRegister(prototype); // Assign to class variable. - if (expr->class_variable() != nullptr) { - DCHECK(expr->class_variable()->IsStackLocal() || - expr->class_variable()->IsContextSlot()); + Variable* class_variable = expr->scope()->class_variable(); + if (class_variable != nullptr && class_variable->is_used()) { + DCHECK(class_variable->IsStackLocal() || class_variable->IsContextSlot()); builder()->LoadAccumulatorWithRegister(class_constructor); - BuildVariableAssignment(expr->class_variable(), Token::INIT, + BuildVariableAssignment(class_variable, Token::INIT, HoleCheckMode::kElided); } - // Create the class brand symbol and store it on the context - // during class evaluation. This will be stored in the - // receiver later in the constructor. - if (expr->scope()->brand() != nullptr) { - Register brand = register_allocator()->NewRegister(); - const AstRawString* class_name = - expr->class_variable() != nullptr - ? expr->class_variable()->raw_name() - : ast_string_constants()->empty_string(); - builder() - ->LoadLiteral(class_name) - .StoreAccumulatorInRegister(brand) - .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand); - BuildVariableAssignment(expr->scope()->brand(), Token::INIT, - HoleCheckMode::kElided); - - // Store the home object for any private methods that need - // them. We do this here once the prototype and brand symbol has - // been created. Private accessors have their home object set later - // when they are defined. - for (int i = 0; i < expr->properties()->length(); i++) { + // Create the closures of private methods, and store the home object for + // any private methods that need them. + if (expr->has_private_methods()) { + for (int i = 0; i < expr->private_members()->length(); i++) { + ClassLiteral::Property* property = expr->private_members()->at(i); + if (property->kind() != ClassLiteral::Property::METHOD) { + continue; + } RegisterAllocationScope register_scope(this); - ClassLiteral::Property* property = expr->properties()->at(i); + VisitForAccumulatorValue(property->value()); + BuildVariableAssignment(property->private_name_var(), Token::INIT, + HoleCheckMode::kElided); + Register home_object = property->private_name_var()->is_static() + ? class_constructor + : prototype; if (property->NeedsHomeObjectOnClassPrototype()) { Register func = register_allocator()->NewRegister(); - BuildVariableLoad(property->private_name_var(), HoleCheckMode::kElided); builder()->StoreAccumulatorInRegister(func); - VisitSetHomeObject(func, prototype, property); + VisitSetHomeObject(func, home_object, property); } } + } - // Define accessors, using only a single call to the runtime for each pair - // of corresponding getters and setters. - for (auto accessors : private_accessors.ordered_accessors()) { - RegisterAllocationScope inner_register_scope(this); - RegisterList accessors_reg = register_allocator()->NewRegisterList(2); - ClassLiteral::Property* getter = accessors.second->getter; - ClassLiteral::Property* setter = accessors.second->setter; - VisitLiteralAccessor(prototype, getter, accessors_reg[0]); - VisitLiteralAccessor(prototype, setter, accessors_reg[1]); - builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg); - Variable* var = getter != nullptr ? getter->private_name_var() - : setter->private_name_var(); - DCHECK_NOT_NULL(var); - BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided); - } + // Define private accessors, using only a single call to the runtime for + // each pair of corresponding getters and setters, in the order the first + // component is declared. Store the home objects if necessary. + for (auto accessors : private_accessors.ordered_accessors()) { + RegisterAllocationScope inner_register_scope(this); + RegisterList accessors_reg = register_allocator()->NewRegisterList(2); + ClassLiteral::Property* getter = accessors.second->getter; + ClassLiteral::Property* setter = accessors.second->setter; + bool is_static = + getter != nullptr ? getter->is_static() : setter->is_static(); + Register home_object = is_static ? class_constructor : prototype; + VisitLiteralAccessor(home_object, getter, accessors_reg[0]); + VisitLiteralAccessor(home_object, setter, accessors_reg[1]); + builder()->CallRuntime(Runtime::kCreatePrivateAccessors, accessors_reg); + Variable* var = getter != nullptr ? getter->private_name_var() + : setter->private_name_var(); + DCHECK_NOT_NULL(var); + BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kElided); } if (expr->instance_members_initializer_function() != nullptr) { @@ -3086,7 +3102,8 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) { .StoreAccumulatorInRegister(args[2]) // done .CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args); } else { - DCHECK(IsAsyncFunction(info()->literal()->kind())); + DCHECK(IsAsyncFunction(info()->literal()->kind()) || + IsAsyncModule(info()->literal()->kind())); RegisterList args = register_allocator()->NewRegisterList(3); builder() ->MoveRegister(generator_object(), args[0]) // generator @@ -3921,7 +3938,8 @@ void BytecodeGenerator::BuildAssignment( Property* property = lhs_data.expr()->AsProperty(); Register object = VisitForRegisterValue(property->obj()); Register key = VisitForRegisterValue(property->key()); - BuildPrivateBrandCheck(property, object); + BuildPrivateBrandCheck(property, object, + MessageTemplate::kInvalidPrivateMemberWrite); BuildPrivateSetterAccess(object, key, value); if (!execution_result()->IsEffect()) { builder()->LoadAccumulatorWithRegister(value); @@ -4004,6 +4022,12 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) { // in the accumulator. When the generator is resumed, the sent value is loaded // in the accumulator. void BytecodeGenerator::BuildSuspendPoint(int position) { + // Because we eliminate jump targets in dead code, we also eliminate resumes + // when the suspend is not emitted because otherwise the below call to Bind + // would start a new basic block and the code would be considered alive. + if (builder()->RemainderOfBlockIsDead()) { + return; + } const int suspend_id = suspend_count_++; RegisterList registers = register_allocator()->AllLiveRegisters(); @@ -4454,12 +4478,14 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) { case PRIVATE_GETTER_ONLY: case PRIVATE_GETTER_AND_SETTER: { Register key = VisitForRegisterValue(property->key()); - BuildPrivateBrandCheck(property, obj); + BuildPrivateBrandCheck(property, obj, + MessageTemplate::kInvalidPrivateMemberRead); BuildPrivateGetterAccess(obj, key); break; } case PRIVATE_METHOD: { - BuildPrivateBrandCheck(property, obj); + BuildPrivateBrandCheck(property, obj, + MessageTemplate::kInvalidPrivateMemberRead); // In the case of private methods, property->key() is the function to be // loaded (stored in a context slot), so load this directly. VisitForAccumulatorValue(property->key()); @@ -4499,15 +4525,29 @@ void BytecodeGenerator::BuildPrivateSetterAccess(Register object, } void BytecodeGenerator::BuildPrivateBrandCheck(Property* property, - Register object) { + Register object, + MessageTemplate tmpl) { Variable* private_name = property->key()->AsVariableProxy()->var(); - DCHECK(private_name->requires_brand_check()); + DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode())); ClassScope* scope = private_name->scope()->AsClassScope(); - Variable* brand = scope->brand(); - BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided); - builder()->SetExpressionPosition(property); - builder()->LoadKeyedProperty( - object, feedback_index(feedback_spec()->AddKeyedLoadICSlot())); + if (private_name->is_static()) { + DCHECK_NOT_NULL(scope->class_variable()); + // For static private methods, the only valid receiver is the class. + // Load the class constructor. + BuildVariableLoadForAccumulatorValue(scope->class_variable(), + HoleCheckMode::kElided); + BytecodeLabel return_check; + builder()->CompareReference(object).JumpIfTrue( + ToBooleanMode::kAlreadyBoolean, &return_check); + BuildInvalidPropertyAccess(tmpl, property); + builder()->Bind(&return_check); + } else { + BuildVariableLoadForAccumulatorValue(scope->brand(), + HoleCheckMode::kElided); + builder()->SetExpressionPosition(property); + builder()->LoadKeyedProperty( + object, feedback_index(feedback_spec()->AddKeyedLoadICSlot())); + } } void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj, @@ -5113,7 +5153,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { case PRIVATE_GETTER_AND_SETTER: { object = VisitForRegisterValue(property->obj()); key = VisitForRegisterValue(property->key()); - BuildPrivateBrandCheck(property, object); + BuildPrivateBrandCheck(property, object, + MessageTemplate::kInvalidPrivateMemberRead); BuildPrivateGetterAccess(object, key); break; } @@ -5407,7 +5448,8 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) { // If method is undefined, // Let syncMethod be GetMethod(obj, @@iterator) builder() - ->GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot())) + ->LoadIteratorProperty(obj, + feedback_index(feedback_spec()->AddLoadICSlot())) .StoreAccumulatorInRegister(method); // Let syncIterator be Call(syncMethod, obj) @@ -5426,24 +5468,17 @@ void BytecodeGenerator::BuildGetIterator(IteratorType hint) { RegisterAllocationScope scope(this); Register obj = register_allocator()->NewRegister(); - Register method = register_allocator()->NewRegister(); - - // Let method be GetMethod(obj, @@iterator). - builder() - ->StoreAccumulatorInRegister(obj) - .GetIterator(obj, feedback_index(feedback_spec()->AddLoadICSlot())) - .StoreAccumulatorInRegister(method); + int load_feedback_index = + feedback_index(feedback_spec()->AddLoadICSlot()); + int call_feedback_index = + feedback_index(feedback_spec()->AddCallICSlot()); - // Let iterator be Call(method, obj). - builder()->CallProperty(method, RegisterList(obj), - feedback_index(feedback_spec()->AddCallICSlot())); + // Let method be GetMethod(obj, @@iterator) and + // iterator be Call(method, obj). If Type(iterator) is not Object, + // throw a SymbolIteratorInvalid exception. + builder()->StoreAccumulatorInRegister(obj).GetIterator( + obj, load_feedback_index, call_feedback_index); } - - // If Type(iterator) is not Object, throw a TypeError exception. - BytecodeLabel no_type_error; - builder()->JumpIfJSReceiver(&no_type_error); - builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid); - builder()->Bind(&no_type_error); } } @@ -6102,8 +6137,9 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() { RegisterAllocationScope register_scope(this); RegisterList args = register_allocator()->NewRegisterList(2); Runtime::FunctionId function_id = - (IsAsyncFunction(info()->literal()->kind()) && - !IsAsyncGeneratorFunction(info()->literal()->kind())) + ((IsAsyncFunction(info()->literal()->kind()) && + !IsAsyncGeneratorFunction(info()->literal()->kind())) || + IsAsyncModule(info()->literal()->kind())) ? Runtime::kInlineAsyncFunctionEnter : Runtime::kInlineCreateJSGeneratorObject; builder() diff --git a/chromium/v8/src/interpreter/bytecode-generator.h b/chromium/v8/src/interpreter/bytecode-generator.h index 134b1b463ab..ecfe50ba5a4 100644 --- a/chromium/v8/src/interpreter/bytecode-generator.h +++ b/chromium/v8/src/interpreter/bytecode-generator.h @@ -250,12 +250,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op); void BuildThrowIfHole(Variable* variable); - // Build jump to targets[value], where - // start_index <= value < start_index + size. - void BuildIndexedJump( - Register value, size_t start_index, size_t size, - ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references) - void BuildNewLocalActivationContext(); void BuildLocalActivationContextInitialization(); void BuildNewLocalBlockContext(Scope* scope); @@ -307,10 +301,13 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void VisitRestArgumentsArray(Variable* rest); void VisitCallSuper(Call* call); void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property); - void BuildPrivateBrandCheck(Property* property, Register object); + void BuildPrivateBrandCheck(Property* property, Register object, + MessageTemplate tmpl); void BuildPrivateGetterAccess(Register obj, Register access_pair); void BuildPrivateSetterAccess(Register obj, Register access_pair, Register value); + void BuildPrivateMethods(ClassLiteral* expr, bool is_static, + Register home_object); void BuildClassLiteral(ClassLiteral* expr, Register name); void VisitClassLiteral(ClassLiteral* expr, Register name); void VisitNewTargetVariable(Variable* variable); diff --git a/chromium/v8/src/interpreter/bytecodes.cc b/chromium/v8/src/interpreter/bytecodes.cc index 60f30ee1d98..88e80b96135 100644 --- a/chromium/v8/src/interpreter/bytecodes.cc +++ b/chromium/v8/src/interpreter/bytecodes.cc @@ -217,6 +217,7 @@ bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) { case Bytecode::kCreateBlockContext: case Bytecode::kCreateCatchContext: case Bytecode::kCreateRegExpLiteral: + case Bytecode::kGetIterator: return true; default: return false; diff --git a/chromium/v8/src/interpreter/bytecodes.h b/chromium/v8/src/interpreter/bytecodes.h index 6802d53c955..80f9e4d3112 100644 --- a/chromium/v8/src/interpreter/bytecodes.h +++ b/chromium/v8/src/interpreter/bytecodes.h @@ -356,7 +356,8 @@ namespace interpreter { OperandType::kRegOutList, OperandType::kRegCount) \ \ /* Iterator protocol operations */ \ - V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx) \ + V(GetIterator, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \ + OperandType::kIdx) \ \ /* Debugger */ \ V(Debugger, AccumulatorUse::kNone) \ diff --git a/chromium/v8/src/interpreter/constant-array-builder.cc b/chromium/v8/src/interpreter/constant-array-builder.cc index 167b0ee7e24..0a4bdd62f76 100644 --- a/chromium/v8/src/interpreter/constant-array-builder.cc +++ b/chromium/v8/src/interpreter/constant-array-builder.cc @@ -378,7 +378,7 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const { case Tag::kRawString: return raw_string_->string(); case Tag::kHeapNumber: - return isolate->factory()->NewNumber(heap_number_, AllocationType::kOld); + return isolate->factory()->NewNumber<AllocationType::kOld>(heap_number_); case Tag::kBigInt: // This should never fail: the parser will never create a BigInt // literal that cannot be allocated. diff --git a/chromium/v8/src/interpreter/interpreter-assembler.cc b/chromium/v8/src/interpreter/interpreter-assembler.cc index f01821b5651..a55e074b3ae 100644 --- a/chromium/v8/src/interpreter/interpreter-assembler.cc +++ b/chromium/v8/src/interpreter/interpreter-assembler.cc @@ -22,8 +22,6 @@ namespace interpreter { using compiler::CodeAssemblerState; using compiler::Node; -template <class T> -using TNode = compiler::TNode<T>; InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, Bytecode bytecode, @@ -32,19 +30,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state, bytecode_(bytecode), operand_scale_(operand_scale), TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_), - VARIABLE_CONSTRUCTOR( - bytecode_array_, MachineRepresentation::kTagged, - Parameter(InterpreterDispatchDescriptor::kBytecodeArray)), + TVARIABLE_CONSTRUCTOR( + bytecode_array_, + CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))), TVARIABLE_CONSTRUCTOR( bytecode_offset_, UncheckedCast<IntPtrT>( Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))), - VARIABLE_CONSTRUCTOR( - dispatch_table_, MachineType::PointerRepresentation(), - Parameter(InterpreterDispatchDescriptor::kDispatchTable)), - VARIABLE_CONSTRUCTOR( - accumulator_, MachineRepresentation::kTagged, - Parameter(InterpreterDispatchDescriptor::kAccumulator)), + TVARIABLE_CONSTRUCTOR( + dispatch_table_, UncheckedCast<ExternalReference>(Parameter( + InterpreterDispatchDescriptor::kDispatchTable))), + TVARIABLE_CONSTRUCTOR( + accumulator_, + CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))), accumulator_use_(AccumulatorUse::kNone), made_call_(false), reloaded_frame_ptr_(false), @@ -129,27 +127,27 @@ void InterpreterAssembler::SaveBytecodeOffset() { } } -Node* InterpreterAssembler::BytecodeArrayTaggedPointer() { +TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() { // Force a re-load of the bytecode array after every call in case the debugger // has been activated. if (!bytecode_array_valid_) { - bytecode_array_.Bind(LoadRegister(Register::bytecode_array())); + bytecode_array_ = CAST(LoadRegister(Register::bytecode_array())); bytecode_array_valid_ = true; } return bytecode_array_.value(); } -Node* InterpreterAssembler::DispatchTableRawPointer() { +TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() { if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ && (dispatch_table_.value() == Parameter(InterpreterDispatchDescriptor::kDispatchTable))) { - dispatch_table_.Bind(ExternalConstant( - ExternalReference::interpreter_dispatch_table_address(isolate()))); + dispatch_table_ = ExternalConstant( + ExternalReference::interpreter_dispatch_table_address(isolate())); } return dispatch_table_.value(); } -Node* InterpreterAssembler::GetAccumulatorUnchecked() { +TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() { return accumulator_.value(); } @@ -159,10 +157,11 @@ TNode<Object> InterpreterAssembler::GetAccumulator() { return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked()); } -void InterpreterAssembler::SetAccumulator(Node* value) { +// TODO(v8:6949): Remove sloppy-ness from SetAccumulator's value argument. +void InterpreterAssembler::SetAccumulator(SloppyTNode<Object> value) { DCHECK(Bytecodes::WritesAccumulator(bytecode_)); accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; - accumulator_.Bind(value); + accumulator_ = value; } TNode<Context> InterpreterAssembler::GetContext() { @@ -173,15 +172,14 @@ void InterpreterAssembler::SetContext(TNode<Context> value) { StoreRegister(value, Register::current_context()); } -Node* InterpreterAssembler::GetContextAtDepth(TNode<Context> context, - TNode<Uint32T> depth) { +TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context, + TNode<Uint32T> depth) { TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); Label context_found(this); - Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; - Label context_search(this, 2, context_search_loop_variables); + Label context_search(this, {&cur_depth, &cur_context}); // Fast path if the depth is 0. Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search); @@ -206,33 +204,38 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth( TVARIABLE(Context, cur_context, context); TVARIABLE(Uint32T, cur_depth, depth); - Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; - Label context_search(this, 2, context_search_loop_variables); + Label context_search(this, {&cur_depth, &cur_context}); + Label no_extension(this); // Loop until the depth is 0. Goto(&context_search); BIND(&context_search); { - // TODO(leszeks): We only need to do this check if the context had a sloppy - // eval, we could pass in a context chain bitmask to figure out which - // contexts actually need to be checked. + // Check if context has an extension slot + TNode<BoolT> has_extension = + LoadContextHasExtensionField(cur_context.value()); + GotoIfNot(has_extension, &no_extension); + // Jump to the target if the extension slot is not a hole. TNode<Object> extension_slot = LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX); + Branch(TaggedNotEqual(extension_slot, TheHoleConstant()), target, + &no_extension); - // Jump to the target if the extension slot is not a hole. - GotoIf(TaggedNotEqual(extension_slot, TheHoleConstant()), target); - - cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); - cur_context = - CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); + BIND(&no_extension); + { + cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1))); + cur_context = CAST( + LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); - GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), - &context_search); + GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), + &context_search); + } } } -TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Node* reg_index) { +TNode<IntPtrT> InterpreterAssembler::RegisterLocation( + TNode<IntPtrT> reg_index) { return Signed(WordPoisonOnSpeculation( IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)))); } @@ -241,11 +244,11 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) { return RegisterLocation(IntPtrConstant(reg.ToOperand())); } -TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(Node* index) { - return Signed(TimesSystemPointerSize(index)); +TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) { + return TimesSystemPointerSize(index); } -TNode<Object> InterpreterAssembler::LoadRegister(Node* reg_index) { +TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) { return LoadFullTagged(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), LoadSensitivity::kCritical); @@ -281,7 +284,7 @@ std::pair<TNode<Object>, TNode<Object>> InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) { DCHECK_EQ(OperandType::kRegPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); return std::make_pair(LoadRegister(first_reg_index), @@ -300,7 +303,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { return RegListNodePair(base_reg, reg_count); } -Node* InterpreterAssembler::LoadRegisterFromRegisterList( +TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList( const RegListNodePair& reg_list, int index) { TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index); // Location is already poisoned on speculation, so no need to poison here. @@ -317,29 +320,30 @@ TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList( return Signed(IntPtrSub(reg_list.base_reg_location(), offset)); } -void InterpreterAssembler::StoreRegister(Node* value, Register reg) { +void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) { StoreFullTaggedNoWriteBarrier( GetInterpretedFramePointer(), IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value); } -void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { +void InterpreterAssembler::StoreRegister(TNode<Object> value, + TNode<IntPtrT> reg_index) { StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index), value); } -void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value, +void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value, int operand_index) { StoreRegister(value, BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); } -void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1, - Node* value2, +void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1, + TNode<Object> value2, int operand_index) { DCHECK_EQ(OperandType::kRegOutPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); @@ -347,10 +351,11 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1, } void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( - Node* value1, Node* value2, Node* value3, int operand_index) { + TNode<Object> value1, TNode<Object> value2, TNode<Object> value3, + int operand_index) { DCHECK_EQ(OperandType::kRegOutTriple, Bytecodes::GetOperandType(bytecode_, operand_index)); - Node* first_reg_index = + TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); StoreRegister(value1, first_reg_index); TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index); @@ -359,12 +364,12 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( StoreRegister(value3, third_reg_index); } -TNode<IntPtrT> InterpreterAssembler::NextRegister(Node* reg_index) { +TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) { // Register indexes are negative, so the next index is minus one. return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1))); } -Node* InterpreterAssembler::OperandOffset(int operand_index) { +TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) { return IntPtrConstant( Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale())); } @@ -374,7 +379,7 @@ TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte( DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); - Node* operand_offset = OperandOffset(operand_index); + TNode<IntPtrT> operand_offset = OperandOffset(operand_index); return Load<Uint8T>(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); @@ -385,7 +390,7 @@ TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte( DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); - Node* operand_offset = OperandOffset(operand_index); + TNode<IntPtrT> operand_offset = OperandOffset(operand_index); return Load<Int8T>(BytecodeArrayTaggedPointer(), IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning); @@ -429,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned( MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8(); TNode<IntPtrT> offset = IntPtrConstant(relative_offset + msb_offset + i * kStep); - TNode<WordT> array_offset = IntPtrAdd(BytecodeOffset(), offset); + TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset); bytes[i] = UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(), array_offset, needs_poisoning)); @@ -561,7 +566,7 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) { DCHECK_EQ(OperandType::kFlag8, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -578,15 +583,16 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) { +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord( + int operand_index) { return ChangeUint32ToWord(BytecodeOperandUImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { - return SmiFromInt32(Signed(BytecodeOperandUImm(operand_index))); +TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) { + return SmiFromUint32(BytecodeOperandUImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) { +TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) { DCHECK_EQ(OperandType::kImm, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -594,15 +600,17 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) { return BytecodeSignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) { +TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr( + int operand_index) { return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { +TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) { return SmiFromInt32(BytecodeOperandImm(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32( + int operand_index) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -610,15 +618,15 @@ Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) { +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) { return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index)); } -Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { - return SmiTag(BytecodeOperandIdx(operand_index)); +TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) { + return SmiTag(Signed(BytecodeOperandIdx(operand_index))); } -Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx( +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx( int operand_index, LoadSensitivity needs_poisoning) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); @@ -628,7 +636,7 @@ Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx( BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning)); } -Node* InterpreterAssembler::BytecodeOperandReg( +TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg( int operand_index, LoadSensitivity needs_poisoning) { DCHECK(Bytecodes::IsRegisterOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); @@ -638,7 +646,8 @@ Node* InterpreterAssembler::BytecodeOperandReg( BytecodeSignedOperand(operand_index, operand_size, needs_poisoning)); } -Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId( + int operand_index) { DCHECK_EQ(OperandType::kRuntimeId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -647,7 +656,7 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::BytecodeOperandNativeContextIndex( +TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex( int operand_index) { DCHECK_EQ(OperandType::kNativeContextIndex, Bytecodes::GetOperandType(bytecode_, operand_index)); @@ -657,7 +666,8 @@ Node* InterpreterAssembler::BytecodeOperandNativeContextIndex( BytecodeUnsignedOperand(operand_index, operand_size)); } -Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { +TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId( + int operand_index) { DCHECK_EQ(OperandType::kIntrinsicId, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = @@ -666,7 +676,7 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { return BytecodeUnsignedOperand(operand_index, operand_size); } -Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { +TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) { TNode<FixedArray> constant_pool = CAST(LoadObjectField( BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset)); return UnsafeLoadFixedArrayElement( @@ -674,13 +684,13 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { } TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry( - Node* index) { - return SmiUntag(LoadConstantPoolEntry(index)); + TNode<WordT> index) { + return SmiUntag(CAST(LoadConstantPoolEntry(index))); } -Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( +TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( int operand_index) { - Node* index = + TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe); return LoadConstantPoolEntry(index); } @@ -688,7 +698,7 @@ Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex( int operand_index) { - return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index)); + return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index))); } TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() { @@ -713,151 +723,15 @@ void InterpreterAssembler::CallPrologue() { void InterpreterAssembler::CallEpilogue() { } -void InterpreterAssembler::IncrementCallCount(Node* feedback_vector, - Node* slot_id) { - Comment("increment call count"); - TNode<Smi> call_count = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); - // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call - // count are used as flags. To increment the call count by 1 we hence - // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. - TNode<Smi> new_count = SmiAdd( - call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); - // Count is Smi, so we don't need a write barrier. - StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, - SKIP_WRITE_BARRIER, kTaggedSize); -} - -void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context, - Node* feedback_vector, - Node* slot_id) { - Label extra_checks(this, Label::kDeferred), done(this); - - // Check if we have monomorphic {target} feedback already. - TNode<MaybeObject> feedback = - LoadFeedbackVectorSlot(feedback_vector, slot_id); - Comment("check if monomorphic"); - TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target)); - GotoIf(is_monomorphic, &done); - - // Check if it is a megamorphic {target}. - Comment("check if megamorphic"); - TNode<BoolT> is_megamorphic = TaggedEqual( - feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); - Branch(is_megamorphic, &done, &extra_checks); - - BIND(&extra_checks); - { - Label initialize(this), mark_megamorphic(this); - - Comment("check if weak reference"); - TNode<BoolT> is_uninitialized = TaggedEqual( - feedback, - HeapConstant(FeedbackVector::UninitializedSentinel(isolate()))); - GotoIf(is_uninitialized, &initialize); - CSA_ASSERT(this, IsWeakOrCleared(feedback)); - - // If the weak reference is cleared, we have a new chance to become - // monomorphic. - Comment("check if weak reference is cleared"); - Branch(IsCleared(feedback), &initialize, &mark_megamorphic); - - BIND(&initialize); - { - // Check if {target} is a JSFunction in the current native context. - Comment("check if function in same native context"); - GotoIf(TaggedIsSmi(target), &mark_megamorphic); - // Check if the {target} is a JSFunction or JSBoundFunction - // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, target); - Label loop(this, &var_current), done_loop(this); - Goto(&loop); - BIND(&loop); - { - Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); - TNode<Uint16T> current_instance_type = LoadInstanceType(current); - GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), - &if_boundfunction); - Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), - &if_function, &mark_megamorphic); - - BIND(&if_function); - { - // Check that the JSFunction {current} is in the current native - // context. - TNode<Context> current_context = - CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = - LoadNativeContext(current_context); - Branch( - TaggedEqual(LoadNativeContext(context), current_native_context), - &done_loop, &mark_megamorphic); - } - - BIND(&if_boundfunction); - { - // Continue with the [[BoundTargetFunction]] of {target}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); - Goto(&loop); - } - } - BIND(&done_loop); - StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, - CAST(target)); - ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize"); - Goto(&done); - } - - BIND(&mark_megamorphic); - { - // MegamorphicSentinel is an immortal immovable object so - // write-barrier is not needed. - Comment("transition to megamorphic"); - DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); - StoreFeedbackVectorSlot( - feedback_vector, slot_id, - HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), - SKIP_WRITE_BARRIER); - ReportFeedbackUpdate(feedback_vector, slot_id, - "Call:TransitionMegamorphic"); - Goto(&done); - } - } - - BIND(&done); -} - -void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context, - Node* maybe_feedback_vector, - Node* slot_id) { - Label feedback_done(this); - // If feedback_vector is not valid, then nothing to do. - GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done); - - CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector)); - - // Increment the call count. - IncrementCallCount(maybe_feedback_vector, slot_id); - - // Collect the callable {target} feedback. - CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id); - Goto(&feedback_done); - - BIND(&feedback_done); -} - void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, const RegListNodePair& args, + TNode<Object> function, TNode<Context> context, const RegListNodePair& args, ConvertReceiverMode receiver_mode) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) || bytecode_ == Bytecode::kInvokeIntrinsic); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode); - Node* args_count; + TNode<Word32T> args_count; if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The receiver is implied, so it is not in the argument list. args_count = args.reg_count(); @@ -879,8 +753,9 @@ void InterpreterAssembler::CallJSAndDispatch( } template <class... TArgs> -void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, - Node* arg_count, +void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function, + TNode<Context> context, + TNode<Word32T> arg_count, ConvertReceiverMode receiver_mode, TArgs... args) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); @@ -892,9 +767,9 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // The first argument parameter (the receiver) is implied to be undefined. - TailCallStubThenBytecodeDispatch( - callable.descriptor(), code_target, context, function, arg_count, - static_cast<Node*>(UndefinedConstant()), args...); + TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, + context, function, arg_count, + UndefinedConstant(), args...); } else { TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context, function, arg_count, args...); @@ -906,21 +781,22 @@ void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context, // Instantiate CallJSAndDispatch() for argument counts used by interpreter // generator. template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, ConvertReceiverMode receiver_mode); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>); template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch( - Node* function, Node* context, Node* arg_count, - ConvertReceiverMode receiver_mode, Node*, Node*, Node*); + TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count, + ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>, + TNode<Object>); void InterpreterAssembler::CallJSWithSpreadAndDispatch( - Node* function, Node* context, const RegListNodePair& args, Node* slot_id, - Node* maybe_feedback_vector) { + TNode<Object> function, TNode<Context> context, const RegListNodePair& args, + TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny); CollectCallFeedback(function, context, maybe_feedback_vector, slot_id); @@ -939,16 +815,18 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch( accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite; } -Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, - SloppyTNode<Object> new_target, - const RegListNodePair& args, - Node* slot_id, Node* feedback_vector) { +TNode<Object> InterpreterAssembler::Construct( + TNode<Object> target, TNode<Context> context, TNode<Object> new_target, + const RegListNodePair& args, TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); - VARIABLE(var_result, MachineRepresentation::kTagged); - VARIABLE(var_site, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); + TVARIABLE(AllocationSite, var_site); Label extra_checks(this, Label::kDeferred), return_result(this, &var_result), construct(this), construct_array(this, &var_site); - GotoIf(IsUndefined(feedback_vector), &construct); + GotoIf(IsUndefined(maybe_feedback_vector), &construct); + + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); @@ -956,7 +834,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, // Check if we have monomorphic {new_target} feedback already. TNode<MaybeObject> feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); - Branch(IsWeakReferenceTo(feedback, new_target), &construct, &extra_checks); + Branch(IsWeakReferenceToObject(feedback, new_target), &construct, + &extra_checks); BIND(&extra_checks); { @@ -989,7 +868,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX); GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic); GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic); - var_site.Bind(strong_feedback); + var_site = CAST(strong_feedback); Goto(&construct_array); } @@ -1008,14 +887,13 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, new_target); + TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); + TNode<HeapObject> current = var_current.value(); TNode<Uint16T> current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); @@ -1028,7 +906,7 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, // context. TNode<Context> current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = + TNode<NativeContext> current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), @@ -1038,8 +916,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); + var_current = LoadObjectField<HeapObject>( + current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } @@ -1056,8 +934,8 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, BIND(&create_allocation_site); { - var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector, - SmiTag(slot_id))); + var_site = + CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id); ReportFeedbackUpdate(feedback_vector, slot_id, "Construct:CreateAllocationSite"); Goto(&construct_array); @@ -1097,9 +975,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kArrayFunction); TNode<Code> code_target = HeapConstant(callable.code()); - var_result.Bind(CallStub(callable.descriptor(), code_target, context, - args.reg_count(), args.base_reg_location(), target, - new_target, var_site.value())); + var_result = CallStub(callable.descriptor(), code_target, context, + args.reg_count(), args.base_reg_location(), target, + new_target, var_site.value()); Goto(&return_result); } @@ -1110,9 +988,9 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, Callable callable = CodeFactory::InterpreterPushArgsThenConstruct( isolate(), InterpreterPushArgsMode::kOther); TNode<Code> code_target = HeapConstant(callable.code()); - var_result.Bind(CallStub(callable.descriptor(), code_target, context, - args.reg_count(), args.base_reg_location(), target, - new_target, UndefinedConstant())); + var_result = CallStub(callable.descriptor(), code_target, context, + args.reg_count(), args.base_reg_location(), target, + new_target, UndefinedConstant()); Goto(&return_result); } @@ -1120,17 +998,18 @@ Node* InterpreterAssembler::Construct(SloppyTNode<Object> target, Node* context, return var_result.value(); } -Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, - Node* new_target, - const RegListNodePair& args, - Node* slot_id, - Node* feedback_vector) { +TNode<Object> InterpreterAssembler::ConstructWithSpread( + TNode<Object> target, TNode<Context> context, TNode<Object> new_target, + const RegListNodePair& args, TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector) { // TODO(bmeurer): Unify this with the Construct bytecode feedback // above once we have a way to pass the AllocationSite to the Array // constructor _and_ spread the last argument at the same time. DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); Label extra_checks(this, Label::kDeferred), construct(this); - GotoIf(IsUndefined(feedback_vector), &construct); + GotoIf(IsUndefined(maybe_feedback_vector), &construct); + + TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector); // Increment the call count. IncrementCallCount(feedback_vector, slot_id); @@ -1138,7 +1017,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, // Check if we have monomorphic {new_target} feedback already. TNode<MaybeObject> feedback = LoadFeedbackVectorSlot(feedback_vector, slot_id); - Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct, + Branch(IsWeakReferenceToObject(feedback, new_target), &construct, &extra_checks); BIND(&extra_checks); @@ -1174,14 +1053,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, GotoIf(TaggedIsSmi(new_target), &mark_megamorphic); // Check if the {new_target} is a JSFunction or JSBoundFunction // in the current native context. - VARIABLE(var_current, MachineRepresentation::kTagged, new_target); + TVARIABLE(HeapObject, var_current, CAST(new_target)); Label loop(this, &var_current), done_loop(this); Goto(&loop); BIND(&loop); { Label if_boundfunction(this), if_function(this); - Node* current = var_current.value(); - CSA_ASSERT(this, TaggedIsNotSmi(current)); + TNode<HeapObject> current = var_current.value(); TNode<Uint16T> current_instance_type = LoadInstanceType(current); GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), &if_boundfunction); @@ -1194,7 +1072,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, // context. TNode<Context> current_context = CAST(LoadObjectField(current, JSFunction::kContextOffset)); - TNode<Context> current_native_context = + TNode<NativeContext> current_native_context = LoadNativeContext(current_context); Branch( TaggedEqual(LoadNativeContext(context), current_native_context), @@ -1204,8 +1082,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, BIND(&if_boundfunction); { // Continue with the [[BoundTargetFunction]] of {current}. - var_current.Bind(LoadObjectField( - current, JSBoundFunction::kBoundTargetFunctionOffset)); + var_current = LoadObjectField<HeapObject>( + current, JSBoundFunction::kBoundTargetFunctionOffset); Goto(&loop); } } @@ -1243,7 +1121,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context, UndefinedConstant()); } -Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, +Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id, + TNode<Context> context, const RegListNodePair& args, int result_size) { DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_)); @@ -1252,22 +1131,22 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, TNode<Code> code_target = HeapConstant(callable.code()); // Get the function entry from the function id. - Node* function_table = ExternalConstant( - ExternalReference::runtime_function_table_address(isolate())); + TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant( + ExternalReference::runtime_function_table_address(isolate()))); TNode<Word32T> function_offset = Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function))); TNode<WordT> function = IntPtrAdd(function_table, ChangeUint32ToWord(function_offset)); - Node* function_entry = - Load(MachineType::Pointer(), function, - IntPtrConstant(offsetof(Runtime::Function, entry))); + TNode<RawPtrT> function_entry = Load<RawPtrT>( + function, IntPtrConstant(offsetof(Runtime::Function, entry))); return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(), result_size, code_target, context, args.reg_count(), args.base_reg_location(), function_entry); } -void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { +void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight, + bool backward) { Comment("[ UpdateInterruptBudget"); // Assert that the weight is positive (negative weights should be implemented @@ -1289,7 +1168,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { TVARIABLE(Int32T, new_budget); if (backward) { // Update budget by |weight| and check if it reaches zero. - new_budget = Signed(Int32Sub(budget_after_bytecode, weight)); + new_budget = Int32Sub(budget_after_bytecode, weight); TNode<BoolT> condition = Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0)); Label ok(this), interrupt_check(this, Label::kDeferred); @@ -1303,7 +1182,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { } else { // For a forward jump, we know we only increase the interrupt budget, so // no need to check if it's below zero. - new_budget = Signed(Int32Add(budget_after_bytecode, weight)); + new_budget = Int32Add(budget_after_bytecode, weight); } // Update budget. @@ -1323,7 +1202,7 @@ TNode<IntPtrT> InterpreterAssembler::Advance(int delta) { return Advance(IntPtrConstant(delta)); } -TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta, +TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta, bool backward) { #ifdef V8_TRACE_IGNITION TraceBytecode(Runtime::kInterpreterTraceBytecodeExit); @@ -1334,45 +1213,51 @@ TNode<IntPtrT> InterpreterAssembler::Advance(SloppyTNode<IntPtrT> delta, return next_offset; } -Node* InterpreterAssembler::Jump(Node* delta, bool backward) { +void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) { DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_)); - UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward); - Node* new_bytecode_offset = Advance(delta, backward); - TNode<WordT> target_bytecode = LoadBytecode(new_bytecode_offset); - return DispatchToBytecode(target_bytecode, new_bytecode_offset); + UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward); + TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward); + TNode<RawPtrT> target_bytecode = + UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset)); + DispatchToBytecode(target_bytecode, new_bytecode_offset); } -Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); } +void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) { + Jump(jump_offset, false); +} -Node* InterpreterAssembler::JumpBackward(Node* delta) { - return Jump(delta, true); +void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) { + Jump(jump_offset, true); } -void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) { +void InterpreterAssembler::JumpConditional(TNode<BoolT> condition, + TNode<IntPtrT> jump_offset) { Label match(this), no_match(this); Branch(condition, &match, &no_match); BIND(&match); - Jump(delta); + Jump(jump_offset); BIND(&no_match); Dispatch(); } void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs, - TNode<Object> rhs, Node* delta) { - JumpConditional(TaggedEqual(lhs, rhs), delta); + TNode<Object> rhs, + TNode<IntPtrT> jump_offset) { + JumpConditional(TaggedEqual(lhs, rhs), jump_offset); } void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs, - Node* delta) { - JumpConditional(TaggedNotEqual(lhs, rhs), delta); + TNode<IntPtrT> jump_offset) { + JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset); } -TNode<WordT> InterpreterAssembler::LoadBytecode(Node* bytecode_offset) { - Node* bytecode = - Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset); +TNode<WordT> InterpreterAssembler::LoadBytecode( + TNode<IntPtrT> bytecode_offset) { + TNode<Uint8T> bytecode = + Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset); return ChangeUint32ToWord(bytecode); } @@ -1418,51 +1303,39 @@ void InterpreterAssembler::InlineStar() { accumulator_use_ = previous_acc_use; } -Node* InterpreterAssembler::Dispatch() { +void InterpreterAssembler::Dispatch() { Comment("========= Dispatch"); DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); - Node* target_offset = Advance(); + TNode<IntPtrT> target_offset = Advance(); TNode<WordT> target_bytecode = LoadBytecode(target_offset); if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) { target_bytecode = StarDispatchLookahead(target_bytecode); } - return DispatchToBytecode(target_bytecode, BytecodeOffset()); + DispatchToBytecode(target_bytecode, BytecodeOffset()); } -Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode, - Node* new_bytecode_offset) { +void InterpreterAssembler::DispatchToBytecode( + TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) { if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(target_bytecode); } - Node* target_code_entry = - Load(MachineType::Pointer(), DispatchTableRawPointer(), - TimesSystemPointerSize(target_bytecode)); - - return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset, - target_bytecode); -} + TNode<RawPtrT> target_code_entry = Load<RawPtrT>( + DispatchTablePointer(), TimesSystemPointerSize(target_bytecode)); -Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler, - Node* bytecode_offset, - Node* target_bytecode) { - // TODO(ishell): Add CSA::CodeEntryPoint(code). - TNode<IntPtrT> handler_entry = - IntPtrAdd(BitcastTaggedToWord(handler), - IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); - return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset, - target_bytecode); + DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset); } -Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry( - Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) { +void InterpreterAssembler::DispatchToBytecodeHandlerEntry( + TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) { // Propagate speculation poisoning. - TNode<WordT> poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry); - return TailCallBytecodeDispatch( - InterpreterDispatchDescriptor{}, poisoned_handler_entry, - GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(), - DispatchTableRawPointer()); + TNode<RawPtrT> poisoned_handler_entry = + UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry)); + TailCallBytecodeDispatch(InterpreterDispatchDescriptor{}, + poisoned_handler_entry, GetAccumulatorUnchecked(), + bytecode_offset, BytecodeArrayTaggedPointer(), + DispatchTablePointer()); } void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { @@ -1474,14 +1347,14 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { // Indices 256-511 correspond to bytecodes with operand_scale == 1 // Indices 512-767 correspond to bytecodes with operand_scale == 2 DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_); - Node* next_bytecode_offset = Advance(1); + TNode<IntPtrT> next_bytecode_offset = Advance(1); TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset); if (FLAG_trace_ignition_dispatches) { TraceBytecodeDispatch(next_bytecode); } - Node* base_index; + TNode<IntPtrT> base_index; switch (operand_scale) { case OperandScale::kDouble: base_index = IntPtrConstant(1 << kBitsPerByte); @@ -1493,12 +1366,10 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { UNREACHABLE(); } TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode); - Node* target_code_entry = - Load(MachineType::Pointer(), DispatchTableRawPointer(), - TimesSystemPointerSize(target_index)); + TNode<RawPtrT> target_code_entry = Load<RawPtrT>( + DispatchTablePointer(), TimesSystemPointerSize(target_index)); - DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset, - next_bytecode); + DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset); } void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { @@ -1527,10 +1398,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { UpdateInterruptBudget(profiling_weight, true); } -Node* InterpreterAssembler::LoadOsrNestingLevel() { - return LoadObjectField(BytecodeArrayTaggedPointer(), - BytecodeArray::kOsrNestingLevelOffset, - MachineType::Int8()); +TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() { + return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(), + BytecodeArray::kOsrNestingLevelOffset); } void InterpreterAssembler::Abort(AbortReason abort_reason) { @@ -1551,7 +1421,7 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs, BIND(&ok); } -void InterpreterAssembler::MaybeDropFrames(Node* context) { +void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) { TNode<ExternalReference> restart_fp_address = ExternalConstant(ExternalReference::debug_restart_fp_address(isolate())); @@ -1576,7 +1446,7 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) { SmiTag(BytecodeOffset()), GetAccumulatorUnchecked()); } -void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) { +void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) { TNode<ExternalReference> counters_table = ExternalConstant( ExternalReference::interpreter_dispatch_counters(isolate())); TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant( @@ -1616,8 +1486,8 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() { } void InterpreterAssembler::AbortIfRegisterCountInvalid( - Node* parameters_and_registers, Node* formal_parameter_count, - Node* register_count) { + TNode<FixedArrayBase> parameters_and_registers, + TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) { TNode<IntPtrT> array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers); @@ -1633,13 +1503,13 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid( BIND(&ok); } -Node* InterpreterAssembler::ExportParametersAndRegisterFile( +TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile( TNode<FixedArray> array, const RegListNodePair& registers, TNode<Int32T> formal_parameter_count) { // Store the formal parameters (without receiver) followed by the // registers into the generator's internal parameters_and_registers field. TNode<IntPtrT> formal_parameter_count_intptr = - ChangeInt32ToIntPtr(formal_parameter_count); + Signed(ChangeUint32ToWord(formal_parameter_count)); TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), @@ -1649,8 +1519,8 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( } { - Variable var_index(this, MachineType::PointerRepresentation()); - var_index.Bind(IntPtrConstant(0)); + TVARIABLE(IntPtrT, var_index); + var_index = IntPtrConstant(0); // Iterate over parameters and write them into the array. Label loop(this, &var_index), done_loop(this); @@ -1662,16 +1532,16 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( Goto(&loop); BIND(&loop); { - Node* index = var_index.value(); + TNode<IntPtrT> index = var_index.value(); GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr), &done_loop); - TNode<WordT> reg_index = IntPtrSub(reg_base, index); + TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index); TNode<Object> value = LoadRegister(reg_index); StoreFixedArrayElement(array, index, value); - var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); + var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); @@ -1681,25 +1551,25 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( // Iterate over register file and write values into array. // The mapping of register to array index must match that used in // BytecodeGraphBuilder::VisitResumeGenerator. - Variable var_index(this, MachineType::PointerRepresentation()); - var_index.Bind(IntPtrConstant(0)); + TVARIABLE(IntPtrT, var_index); + var_index = IntPtrConstant(0); Label loop(this, &var_index), done_loop(this); Goto(&loop); BIND(&loop); { - Node* index = var_index.value(); + TNode<IntPtrT> index = var_index.value(); GotoIfNot(UintPtrLessThan(index, register_count), &done_loop); - TNode<WordT> reg_index = + TNode<IntPtrT> reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); TNode<Object> value = LoadRegister(reg_index); - TNode<WordT> array_index = + TNode<IntPtrT> array_index = IntPtrAdd(formal_parameter_count_intptr, index); StoreFixedArrayElement(array, array_index, value); - var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); + var_index = IntPtrAdd(index, IntPtrConstant(1)); Goto(&loop); } BIND(&done_loop); @@ -1708,11 +1578,11 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile( return array; } -Node* InterpreterAssembler::ImportRegisterFile( +TNode<FixedArray> InterpreterAssembler::ImportRegisterFile( TNode<FixedArray> array, const RegListNodePair& registers, TNode<Int32T> formal_parameter_count) { TNode<IntPtrT> formal_parameter_count_intptr = - ChangeInt32ToIntPtr(formal_parameter_count); + Signed(ChangeUint32ToWord(formal_parameter_count)); TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count()); if (FLAG_debug_code) { CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(), @@ -1758,8 +1628,8 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { TNode<Object> object = GetAccumulator(); TNode<Context> context = GetContext(); - Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); - Variable var_result(this, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_type_feedback); + TVARIABLE(Numeric, var_result); Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this), if_objectisother(this, Label::kDeferred); @@ -1768,15 +1638,15 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { BIND(&if_objectissmi); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); Goto(&if_done); } BIND(&if_objectisheapnumber); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); Goto(&if_done); } @@ -1789,23 +1659,23 @@ void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) { Label not_bigint(this); GotoIfNot(IsBigInt(CAST(object)), ¬_bigint); { - var_result.Bind(object); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt)); + var_result = CAST(object); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); Goto(&if_done); } BIND(¬_bigint); } // Convert {object} by calling out to the appropriate builtin. - var_result.Bind(CallBuiltin(builtin, context, object)); - var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + var_result = CAST(CallBuiltin(builtin, context, object)); + var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny); Goto(&if_done); } BIND(&if_done); // Record the type feedback collected for {object}. - Node* slot_index = BytecodeOperandIdx(0); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(0); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index); diff --git a/chromium/v8/src/interpreter/interpreter-assembler.h b/chromium/v8/src/interpreter/interpreter-assembler.h index 33fa987595d..4a1882b82ca 100644 --- a/chromium/v8/src/interpreter/interpreter-assembler.h +++ b/chromium/v8/src/interpreter/interpreter-assembler.h @@ -25,64 +25,62 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Returns the 32-bit unsigned count immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::TNode<Uint32T> BytecodeOperandCount(int operand_index); + TNode<Uint32T> BytecodeOperandCount(int operand_index); // Returns the 32-bit unsigned flag for bytecode operand |operand_index| // in the current bytecode. - compiler::Node* BytecodeOperandFlag(int operand_index); + TNode<Uint32T> BytecodeOperandFlag(int operand_index); // Returns the 32-bit zero-extended index immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandIdxInt32(int operand_index); + TNode<Uint32T> BytecodeOperandIdxInt32(int operand_index); // Returns the word zero-extended index immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandIdx(int operand_index); + TNode<UintPtrT> BytecodeOperandIdx(int operand_index); // Returns the smi index immediate for bytecode operand |operand_index| // in the current bytecode. - compiler::Node* BytecodeOperandIdxSmi(int operand_index); + TNode<Smi> BytecodeOperandIdxSmi(int operand_index); // Returns the 32-bit unsigned immediate for bytecode operand |operand_index| // in the current bytecode. - compiler::TNode<Uint32T> BytecodeOperandUImm(int operand_index); + TNode<Uint32T> BytecodeOperandUImm(int operand_index); // Returns the word-size unsigned immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandUImmWord(int operand_index); + TNode<UintPtrT> BytecodeOperandUImmWord(int operand_index); // Returns the unsigned smi immediate for bytecode operand |operand_index| in // the current bytecode. - compiler::Node* BytecodeOperandUImmSmi(int operand_index); + TNode<Smi> BytecodeOperandUImmSmi(int operand_index); // Returns the 32-bit signed immediate for bytecode operand |operand_index| // in the current bytecode. - compiler::Node* BytecodeOperandImm(int operand_index); + TNode<Int32T> BytecodeOperandImm(int operand_index); // Returns the word-size signed immediate for bytecode operand |operand_index| // in the current bytecode. - compiler::Node* BytecodeOperandImmIntPtr(int operand_index); + TNode<IntPtrT> BytecodeOperandImmIntPtr(int operand_index); // Returns the smi immediate for bytecode operand |operand_index| in the // current bytecode. - compiler::Node* BytecodeOperandImmSmi(int operand_index); + TNode<Smi> BytecodeOperandImmSmi(int operand_index); // Returns the 32-bit unsigned runtime id immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandRuntimeId(int operand_index); - // Returns the 32-bit unsigned native context index immediate for bytecode + TNode<Uint32T> BytecodeOperandRuntimeId(int operand_index); + // Returns the word zero-extended native context index immediate for bytecode // operand |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandNativeContextIndex(int operand_index); + TNode<UintPtrT> BytecodeOperandNativeContextIndex(int operand_index); // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand // |operand_index| in the current bytecode. - compiler::Node* BytecodeOperandIntrinsicId(int operand_index); - + TNode<Uint32T> BytecodeOperandIntrinsicId(int operand_index); // Accumulator. - compiler::TNode<Object> GetAccumulator(); - void SetAccumulator(compiler::Node* value); + TNode<Object> GetAccumulator(); + void SetAccumulator(SloppyTNode<Object> value); // Context. - compiler::TNode<Context> GetContext(); - void SetContext(compiler::TNode<Context> value); + TNode<Context> GetContext(); + void SetContext(TNode<Context> value); // Context at |depth| in the context chain starting at |context|. - compiler::Node* GetContextAtDepth(compiler::TNode<Context> context, - compiler::TNode<Uint32T> depth); + TNode<Context> GetContextAtDepth(TNode<Context> context, + TNode<Uint32T> depth); // Goto the given |target| if the context chain starting at |context| has any // extensions up to the given |depth|. - void GotoIfHasContextExtensionUpToDepth(compiler::TNode<Context> context, - compiler::TNode<Uint32T> depth, - Label* target); + void GotoIfHasContextExtensionUpToDepth(TNode<Context> context, + TNode<Uint32T> depth, Label* target); // A RegListNodePair provides an abstraction over lists of registers. class RegListNodePair { @@ -90,14 +88,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { RegListNodePair(TNode<IntPtrT> base_reg_location, TNode<Word32T> reg_count) : base_reg_location_(base_reg_location), reg_count_(reg_count) {} - compiler::TNode<Word32T> reg_count() const { return reg_count_; } - compiler::TNode<IntPtrT> base_reg_location() const { - return base_reg_location_; - } + TNode<Word32T> reg_count() const { return reg_count_; } + TNode<IntPtrT> base_reg_location() const { return base_reg_location_; } private: - compiler::TNode<IntPtrT> base_reg_location_; - compiler::TNode<Word32T> reg_count_; + TNode<IntPtrT> base_reg_location_; + TNode<Word32T> reg_count_; }; // Backup/restore register file to/from a fixed array of the correct length. @@ -105,72 +101,53 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // - Suspend copies arguments and registers to the generator. // - Resume copies only the registers from the generator, the arguments // are copied by the ResumeGenerator trampoline. - compiler::Node* ExportParametersAndRegisterFile( + TNode<FixedArray> ExportParametersAndRegisterFile( TNode<FixedArray> array, const RegListNodePair& registers, TNode<Int32T> formal_parameter_count); - compiler::Node* ImportRegisterFile(TNode<FixedArray> array, - const RegListNodePair& registers, - TNode<Int32T> formal_parameter_count); + TNode<FixedArray> ImportRegisterFile(TNode<FixedArray> array, + const RegListNodePair& registers, + TNode<Int32T> formal_parameter_count); // Loads from and stores to the interpreter register file. - compiler::TNode<Object> LoadRegister(Register reg); - compiler::TNode<IntPtrT> LoadAndUntagRegister(Register reg); - compiler::TNode<Object> LoadRegisterAtOperandIndex(int operand_index); - std::pair<compiler::TNode<Object>, compiler::TNode<Object>> - LoadRegisterPairAtOperandIndex(int operand_index); - void StoreRegister(compiler::Node* value, Register reg); - void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index); - void StoreRegisterPairAtOperandIndex(compiler::Node* value1, - compiler::Node* value2, - int operand_index); - void StoreRegisterTripleAtOperandIndex(compiler::Node* value1, - compiler::Node* value2, - compiler::Node* value3, + TNode<Object> LoadRegister(Register reg); + TNode<IntPtrT> LoadAndUntagRegister(Register reg); + TNode<Object> LoadRegisterAtOperandIndex(int operand_index); + std::pair<TNode<Object>, TNode<Object>> LoadRegisterPairAtOperandIndex( + int operand_index); + void StoreRegister(TNode<Object> value, Register reg); + void StoreRegisterAtOperandIndex(TNode<Object> value, int operand_index); + void StoreRegisterPairAtOperandIndex(TNode<Object> value1, + TNode<Object> value2, int operand_index); + void StoreRegisterTripleAtOperandIndex(TNode<Object> value1, + TNode<Object> value2, + TNode<Object> value3, int operand_index); RegListNodePair GetRegisterListAtOperandIndex(int operand_index); - Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list, - int index); + TNode<Object> LoadRegisterFromRegisterList(const RegListNodePair& reg_list, + int index); TNode<IntPtrT> RegisterLocationInRegisterList(const RegListNodePair& reg_list, int index); // Load constant at the index specified in operand |operand_index| from the // constant pool. - compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index); + TNode<Object> LoadConstantPoolEntryAtOperandIndex(int operand_index); // Load and untag constant at the index specified in operand |operand_index| // from the constant pool. TNode<IntPtrT> LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index); // Load constant at |index| in the constant pool. - compiler::Node* LoadConstantPoolEntry(compiler::Node* index); + TNode<Object> LoadConstantPoolEntry(TNode<WordT> index); // Load and untag constant at |index| in the constant pool. - TNode<IntPtrT> LoadAndUntagConstantPoolEntry(compiler::Node* index); + TNode<IntPtrT> LoadAndUntagConstantPoolEntry(TNode<WordT> index); // Load the FeedbackVector for the current function. The retuned node could be // undefined. - compiler::TNode<HeapObject> LoadFeedbackVector(); - - // Increment the call count for a CALL_IC or construct call. - // The call count is located at feedback_vector[slot_id + 1]. - void IncrementCallCount(compiler::Node* feedback_vector, - compiler::Node* slot_id); - - // Collect the callable |target| feedback for either a CALL_IC or - // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|. - void CollectCallableFeedback(compiler::Node* target, compiler::Node* context, - compiler::Node* feedback_vector, - compiler::Node* slot_id); - - // Collect CALL_IC feedback for |target| function in the - // |feedback_vector| at |slot_id|, and the call counts in - // the |feedback_vector| at |slot_id+1|. - void CollectCallFeedback(compiler::Node* target, compiler::Node* context, - compiler::Node* maybe_feedback_vector, - compiler::Node* slot_id); + TNode<HeapObject> LoadFeedbackVector(); // Call JSFunction or Callable |function| with |args| arguments, possibly // including the receiver depending on |receiver_mode|. After the call returns // directly dispatches to the next bytecode. - void CallJSAndDispatch(compiler::Node* function, compiler::Node* context, + void CallJSAndDispatch(TNode<Object> function, TNode<Context> context, const RegListNodePair& args, ConvertReceiverMode receiver_mode); @@ -179,93 +156,89 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // depending on |receiver_mode|. After the call returns directly dispatches to // the next bytecode. template <class... TArgs> - void CallJSAndDispatch(Node* function, Node* context, Node* arg_count, + void CallJSAndDispatch(TNode<Object> function, TNode<Context> context, + TNode<Word32T> arg_count, ConvertReceiverMode receiver_mode, TArgs... args); // Call JSFunction or Callable |function| with |args| // arguments (not including receiver), and the final argument being spread. // After the call returns directly dispatches to the next bytecode. - void CallJSWithSpreadAndDispatch(compiler::Node* function, - compiler::Node* context, + void CallJSWithSpreadAndDispatch(TNode<Object> function, + TNode<Context> context, const RegListNodePair& args, - compiler::Node* slot_id, - compiler::Node* feedback_vector); + TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector); // Call constructor |target| with |args| arguments (not including receiver). // The |new_target| is the same as the |target| for the new keyword, but // differs for the super keyword. - compiler::Node* Construct(compiler::SloppyTNode<Object> target, - compiler::Node* context, - compiler::SloppyTNode<Object> new_target, - const RegListNodePair& args, - compiler::Node* slot_id, - compiler::Node* feedback_vector); + TNode<Object> Construct(TNode<Object> target, TNode<Context> context, + TNode<Object> new_target, const RegListNodePair& args, + TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector); // Call constructor |target| with |args| arguments (not including // receiver). The last argument is always a spread. The |new_target| is the // same as the |target| for the new keyword, but differs for the super // keyword. - compiler::Node* ConstructWithSpread(compiler::Node* target, - compiler::Node* context, - compiler::Node* new_target, - const RegListNodePair& args, - compiler::Node* slot_id, - compiler::Node* feedback_vector); + TNode<Object> ConstructWithSpread(TNode<Object> target, + TNode<Context> context, + TNode<Object> new_target, + const RegListNodePair& args, + TNode<UintPtrT> slot_id, + TNode<HeapObject> maybe_feedback_vector); // Call runtime function with |args| arguments which will return |return_size| // number of values. - compiler::Node* CallRuntimeN(compiler::Node* function_id, - compiler::Node* context, + compiler::Node* CallRuntimeN(TNode<Uint32T> function_id, + TNode<Context> context, const RegListNodePair& args, int return_size = 1); // Jump forward relative to the current bytecode by the |jump_offset|. - compiler::Node* Jump(compiler::Node* jump_offset); + void Jump(TNode<IntPtrT> jump_offset); // Jump backward relative to the current bytecode by the |jump_offset|. - compiler::Node* JumpBackward(compiler::Node* jump_offset); + void JumpBackward(TNode<IntPtrT> jump_offset); // Jump forward relative to the current bytecode by |jump_offset| if the // word values |lhs| and |rhs| are equal. - void JumpIfTaggedEqual(compiler::TNode<Object> lhs, - compiler::TNode<Object> rhs, - compiler::Node* jump_offset); + void JumpIfTaggedEqual(TNode<Object> lhs, TNode<Object> rhs, + TNode<IntPtrT> jump_offset); // Jump forward relative to the current bytecode by |jump_offset| if the // word values |lhs| and |rhs| are not equal. - void JumpIfTaggedNotEqual(compiler::TNode<Object> lhs, - compiler::TNode<Object> rhs, - compiler::Node* jump_offset); + void JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs, + TNode<IntPtrT> jump_offset); // Updates the profiler interrupt budget for a return. void UpdateInterruptBudgetOnReturn(); // Returns the OSR nesting level from the bytecode header. - compiler::Node* LoadOsrNestingLevel(); + TNode<Int8T> LoadOsrNestingLevel(); // Dispatch to the bytecode. - compiler::Node* Dispatch(); + void Dispatch(); // Dispatch bytecode as wide operand variant. void DispatchWide(OperandScale operand_scale); // Dispatch to |target_bytecode| at |new_bytecode_offset|. // |target_bytecode| should be equivalent to loading from the offset. - compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode, - compiler::Node* new_bytecode_offset); + void DispatchToBytecode(TNode<WordT> target_bytecode, + TNode<IntPtrT> new_bytecode_offset); // Abort with the given abort reason. void Abort(AbortReason abort_reason); - void AbortIfWordNotEqual(compiler::TNode<WordT> lhs, - compiler::TNode<WordT> rhs, + void AbortIfWordNotEqual(TNode<WordT> lhs, TNode<WordT> rhs, AbortReason abort_reason); // Abort if |register_count| is invalid for given register file array. - void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers, - compiler::Node* formal_parameter_count, - compiler::Node* register_count); + void AbortIfRegisterCountInvalid( + TNode<FixedArrayBase> parameters_and_registers, + TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count); // Dispatch to frame dropper trampoline if necessary. - void MaybeDropFrames(compiler::Node* context); + void MaybeDropFrames(TNode<Context> context); // Returns the offset from the BytecodeArrayPointer of the current bytecode. TNode<IntPtrT> BytecodeOffset(); @@ -277,27 +250,27 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { void ToNumberOrNumeric(Object::Conversion mode); private: - // Returns a tagged pointer to the current function's BytecodeArray object. - compiler::Node* BytecodeArrayTaggedPointer(); + // Returns a pointer to the current function's BytecodeArray object. + TNode<BytecodeArray> BytecodeArrayTaggedPointer(); - // Returns a raw pointer to first entry in the interpreter dispatch table. - compiler::Node* DispatchTableRawPointer(); + // Returns a pointer to first entry in the interpreter dispatch table. + TNode<ExternalReference> DispatchTablePointer(); // Returns the accumulator value without checking whether bytecode // uses it. This is intended to be used only in dispatch and in // tracing as these need to bypass accumulator use validity checks. - compiler::Node* GetAccumulatorUnchecked(); + TNode<Object> GetAccumulatorUnchecked(); // Returns the frame pointer for the interpreted frame of the function being // interpreted. TNode<RawPtrT> GetInterpretedFramePointer(); // Operations on registers. - compiler::TNode<IntPtrT> RegisterLocation(Register reg); - compiler::TNode<IntPtrT> RegisterLocation(compiler::Node* reg_index); - compiler::TNode<IntPtrT> NextRegister(compiler::Node* reg_index); - compiler::TNode<Object> LoadRegister(Node* reg_index); - void StoreRegister(compiler::Node* value, compiler::Node* reg_index); + TNode<IntPtrT> RegisterLocation(Register reg); + TNode<IntPtrT> RegisterLocation(TNode<IntPtrT> reg_index); + TNode<IntPtrT> NextRegister(TNode<IntPtrT> reg_index); + TNode<Object> LoadRegister(TNode<IntPtrT> reg_index); + void StoreRegister(TNode<Object> value, TNode<IntPtrT> reg_index); // Saves and restores interpreter bytecode offset to the interpreter stack // frame when performing a call. @@ -305,7 +278,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { void CallEpilogue(); // Increment the dispatch counter for the (current, next) bytecode pair. - void TraceBytecodeDispatch(compiler::Node* target_index); + void TraceBytecodeDispatch(TNode<WordT> target_bytecode); // Traces the current bytecode by calling |function_id|. void TraceBytecode(Runtime::FunctionId function_id); @@ -313,74 +286,74 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight| // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then // the interrupt budget is decremented, otherwise it is incremented. - void UpdateInterruptBudget(compiler::Node* weight, bool backward); + void UpdateInterruptBudget(TNode<Int32T> weight, bool backward); // Returns the offset of register |index| relative to RegisterFilePointer(). - compiler::TNode<IntPtrT> RegisterFrameOffset(compiler::Node* index); + TNode<IntPtrT> RegisterFrameOffset(TNode<IntPtrT> index); // Returns the offset of an operand relative to the current bytecode offset. - compiler::Node* OperandOffset(int operand_index); + TNode<IntPtrT> OperandOffset(int operand_index); // Returns a value built from an sequence of bytes in the bytecode // array starting at |relative_offset| from the current bytecode. // The |result_type| determines the size and signedness. of the // value read. This method should only be used on architectures that // do not support unaligned memory accesses. - compiler::TNode<Word32T> BytecodeOperandReadUnaligned( + TNode<Word32T> BytecodeOperandReadUnaligned( int relative_offset, MachineType result_type, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); // Returns zero- or sign-extended to word32 value of the operand. - compiler::TNode<Uint8T> BytecodeOperandUnsignedByte( + TNode<Uint8T> BytecodeOperandUnsignedByte( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Int8T> BytecodeOperandSignedByte( + TNode<Int8T> BytecodeOperandSignedByte( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Uint16T> BytecodeOperandUnsignedShort( + TNode<Uint16T> BytecodeOperandUnsignedShort( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Int16T> BytecodeOperandSignedShort( + TNode<Int16T> BytecodeOperandSignedShort( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Uint32T> BytecodeOperandUnsignedQuad( + TNode<Uint32T> BytecodeOperandUnsignedQuad( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Int32T> BytecodeOperandSignedQuad( + TNode<Int32T> BytecodeOperandSignedQuad( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); // Returns zero- or sign-extended to word32 value of the operand of // given size. - compiler::TNode<Int32T> BytecodeSignedOperand( + TNode<Int32T> BytecodeSignedOperand( int operand_index, OperandSize operand_size, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - compiler::TNode<Uint32T> BytecodeUnsignedOperand( + TNode<Uint32T> BytecodeUnsignedOperand( int operand_index, OperandSize operand_size, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); // Returns the word-size sign-extended register index for bytecode operand // |operand_index| in the current bytecode. Value is not poisoned on // speculation since the value loaded from the register is poisoned instead. - compiler::Node* BytecodeOperandReg( + TNode<IntPtrT> BytecodeOperandReg( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); // Returns the word zero-extended index immediate for bytecode operand // |operand_index| in the current bytecode for use when loading a . - compiler::Node* BytecodeOperandConstantPoolIdx( + TNode<UintPtrT> BytecodeOperandConstantPoolIdx( int operand_index, LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); // Jump relative to the current bytecode by the |jump_offset|. If |backward|, // then jump backward (subtract the offset), otherwise jump forward (add the // offset). Helper function for Jump and JumpBackward. - compiler::Node* Jump(compiler::Node* jump_offset, bool backward); + void Jump(TNode<IntPtrT> jump_offset, bool backward); // Jump forward relative to the current bytecode by |jump_offset| if the // |condition| is true. Helper function for JumpIfTaggedEqual and // JumpIfTaggedNotEqual. - void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset); + void JumpConditional(TNode<BoolT> condition, TNode<IntPtrT> jump_offset); // Save the bytecode offset to the interpreter frame. void SaveBytecodeOffset(); @@ -394,29 +367,22 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Updates and returns BytecodeOffset() advanced by delta bytecodes. // Traces the exit of the current bytecode. TNode<IntPtrT> Advance(int delta); - TNode<IntPtrT> Advance(SloppyTNode<IntPtrT> delta, bool backward = false); + TNode<IntPtrT> Advance(TNode<IntPtrT> delta, bool backward = false); // Load the bytecode at |bytecode_offset|. - compiler::TNode<WordT> LoadBytecode(compiler::Node* bytecode_offset); + TNode<WordT> LoadBytecode(TNode<IntPtrT> bytecode_offset); // Look ahead for Star and inline it in a branch. Returns a new target // bytecode node for dispatch. - compiler::TNode<WordT> StarDispatchLookahead( - compiler::TNode<WordT> target_bytecode); + TNode<WordT> StarDispatchLookahead(TNode<WordT> target_bytecode); // Build code for Star at the current BytecodeOffset() and Advance() to the // next dispatch offset. void InlineStar(); - // Dispatch to the bytecode handler with code offset |handler|. - compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler, - compiler::Node* bytecode_offset, - compiler::Node* target_bytecode); - // Dispatch to the bytecode handler with code entry point |handler_entry|. - compiler::Node* DispatchToBytecodeHandlerEntry( - compiler::Node* handler_entry, compiler::Node* bytecode_offset, - compiler::Node* target_bytecode); + void DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry, + TNode<IntPtrT> bytecode_offset); int CurrentBytecodeSize() const; @@ -424,11 +390,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { Bytecode bytecode_; OperandScale operand_scale_; - TVariable<RawPtrT> interpreted_frame_pointer_; - CodeStubAssembler::Variable bytecode_array_; - TVariable<IntPtrT> bytecode_offset_; - CodeStubAssembler::Variable dispatch_table_; - CodeStubAssembler::Variable accumulator_; + CodeStubAssembler::TVariable<RawPtrT> interpreted_frame_pointer_; + CodeStubAssembler::TVariable<BytecodeArray> bytecode_array_; + CodeStubAssembler::TVariable<IntPtrT> bytecode_offset_; + CodeStubAssembler::TVariable<ExternalReference> dispatch_table_; + CodeStubAssembler::TVariable<Object> accumulator_; AccumulatorUse accumulator_use_; bool made_call_; bool reloaded_frame_ptr_; diff --git a/chromium/v8/src/interpreter/interpreter-generator.cc b/chromium/v8/src/interpreter/interpreter-generator.cc index e8569ecd55b..5f686f86b88 100644 --- a/chromium/v8/src/interpreter/interpreter-generator.cc +++ b/chromium/v8/src/interpreter/interpreter-generator.cc @@ -35,7 +35,6 @@ namespace { using compiler::Node; using Label = CodeStubAssembler::Label; -using Variable = CodeStubAssembler::Variable; #define IGNITION_HANDLER(Name, BaseAssembler) \ class Name##Assembler : public BaseAssembler { \ @@ -71,7 +70,7 @@ IGNITION_HANDLER(LdaZero, InterpreterAssembler) { // // Load an integer literal into the accumulator as a Smi. IGNITION_HANDLER(LdaSmi, InterpreterAssembler) { - Node* smi_int = BytecodeOperandImmSmi(0); + TNode<Smi> smi_int = BytecodeOperandImmSmi(0); SetAccumulator(smi_int); Dispatch(); } @@ -80,7 +79,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) { // // Load constant literal at |idx| in the constant pool into the accumulator. IGNITION_HANDLER(LdaConstant, InterpreterAssembler) { - Node* constant = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Object> constant = LoadConstantPoolEntryAtOperandIndex(0); SetAccumulator(constant); Dispatch(); } @@ -161,7 +160,6 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler { void LdaGlobal(int slot_operand_index, int name_operand_index, TypeofMode typeof_mode) { TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); - Node* feedback_slot = BytecodeOperandIdx(slot_operand_index); AccessorAssembler accessor_asm(state()); ExitPoint exit_point(this, [=](Node* result) { @@ -169,17 +167,25 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler { Dispatch(); }); + LazyNode<Smi> lazy_smi_slot = [=] { + return SmiTag(Signed(BytecodeOperandIdx(slot_operand_index))); + }; + + LazyNode<UintPtrT> lazy_slot = [=] { + return BytecodeOperandIdx(slot_operand_index); + }; + LazyNode<Context> lazy_context = [=] { return GetContext(); }; LazyNode<Name> lazy_name = [=] { - Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index); - return CAST(name); + TNode<Name> name = + CAST(LoadConstantPoolEntryAtOperandIndex(name_operand_index)); + return name; }; - ParameterMode slot_mode = CodeStubAssembler::INTPTR_PARAMETERS; - accessor_asm.LoadGlobalIC(maybe_feedback_vector, feedback_slot, - lazy_context, lazy_name, typeof_mode, &exit_point, - slot_mode); + accessor_asm.LoadGlobalIC(maybe_feedback_vector, lazy_smi_slot, lazy_slot, + lazy_context, lazy_name, typeof_mode, + &exit_point); } }; @@ -213,9 +219,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { TNode<Context> context = GetContext(); // Store the global via the StoreGlobalIC. - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Object> value = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(1); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> maybe_vector = LoadFeedbackVector(); @@ -240,9 +246,9 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) { // chain starting at |context| into the accumulator. IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) { TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0)); - Node* slot_index = BytecodeOperandIdx(1); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<Uint32T> depth = BytecodeOperandUImm(2); - Node* slot_context = GetContextAtDepth(context, depth); + TNode<Context> slot_context = GetContextAtDepth(context, depth); TNode<Object> result = LoadContextElement(slot_context, slot_index); SetAccumulator(result); Dispatch(); @@ -254,9 +260,9 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) { // chain starting at |context| into the accumulator. IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) { TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0)); - Node* slot_index = BytecodeOperandIdx(1); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<Uint32T> depth = BytecodeOperandUImm(2); - Node* slot_context = GetContextAtDepth(context, depth); + TNode<Context> slot_context = GetContextAtDepth(context, depth); TNode<Object> result = LoadContextElement(slot_context, slot_index); SetAccumulator(result); Dispatch(); @@ -266,7 +272,7 @@ IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) { // // Load the object in |slot_index| of the current context into the accumulator. IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) { - Node* slot_index = BytecodeOperandIdx(0); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0)); TNode<Context> slot_context = GetContext(); TNode<Object> result = LoadContextElement(slot_context, slot_index); SetAccumulator(result); @@ -277,7 +283,7 @@ IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) { // // Load the object in |slot_index| of the current context into the accumulator. IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) { - Node* slot_index = BytecodeOperandIdx(0); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0)); TNode<Context> slot_context = GetContext(); TNode<Object> result = LoadContextElement(slot_context, slot_index); SetAccumulator(result); @@ -291,9 +297,9 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) { IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); TNode<Context> context = CAST(LoadRegisterAtOperandIndex(0)); - Node* slot_index = BytecodeOperandIdx(1); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<Uint32T> depth = BytecodeOperandUImm(2); - Node* slot_context = GetContextAtDepth(context, depth); + TNode<Context> slot_context = GetContextAtDepth(context, depth); StoreContextElement(slot_context, slot_index, value); Dispatch(); } @@ -304,7 +310,7 @@ IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) { // context. IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* slot_index = BytecodeOperandIdx(0); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(0)); TNode<Context> slot_context = GetContext(); StoreContextElement(slot_context, slot_index, value); Dispatch(); @@ -315,7 +321,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) { // Lookup the object with the name in constant pool entry |name_index| // dynamically. IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) { - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Context> context = GetContext(); TNode<Object> result = CallRuntime(Runtime::kLoadLookupSlot, context, name); SetAccumulator(result); @@ -327,7 +333,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) { // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) { - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Context> context = GetContext(); TNode<Object> result = CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name); @@ -344,7 +350,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { void LookupContextSlot(Runtime::FunctionId function_id) { TNode<Context> context = GetContext(); - Node* slot_index = BytecodeOperandIdx(1); + TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(1)); TNode<Uint32T> depth = BytecodeOperandUImm(2); Label slowpath(this, Label::kDeferred); @@ -354,7 +360,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { // Fast path does a normal load context. { - Node* slot_context = GetContextAtDepth(context, depth); + TNode<Context> slot_context = GetContextAtDepth(context, depth); TNode<Object> result = LoadContextElement(slot_context, slot_index); SetAccumulator(result); Dispatch(); @@ -363,7 +369,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { // Slow path when we have to call out to the runtime. BIND(&slowpath); { - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Object> result = CallRuntime(function_id, context, name); SetAccumulator(result); Dispatch(); @@ -419,7 +425,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler { // Slow path when we have to call out to the runtime BIND(&slowpath); { - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Object> result = CallRuntime(function_id, context, name); SetAccumulator(result); Dispatch(); @@ -450,10 +456,10 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof, // pool entry |name_index|. IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* name = LoadConstantPoolEntryAtOperandIndex(0); - Node* bytecode_flags = BytecodeOperandFlag(1); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); + TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1); TNode<Context> context = GetContext(); - Variable var_result(this, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); Label sloppy(this), strict(this), end(this); DCHECK_EQ(0, LanguageMode::kSloppy); @@ -467,8 +473,8 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) { { CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>( bytecode_flags)); - var_result.Bind( - CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value)); + var_result = + CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value); Goto(&end); } @@ -481,15 +487,15 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) { BIND(&hoisting); { - var_result.Bind(CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting, - context, name, value)); + var_result = CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting, + context, name, value); Goto(&end); } BIND(&ordinary); { - var_result.Bind( - CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value)); + var_result = + CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value); Goto(&end); } } @@ -507,24 +513,24 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) { // constant pool entry <name_index>. IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* feedback_slot = BytecodeOperandIdx(2); - TNode<Smi> smi_slot = SmiTag(feedback_slot); + TNode<UintPtrT> feedback_slot = BytecodeOperandIdx(2); // Load receiver. TNode<Object> recv = LoadRegisterAtOperandIndex(0); // Load the name and context lazily. - LazyNode<Name> name = [=] { + LazyNode<Smi> lazy_smi_slot = [=] { return SmiTag(Signed(feedback_slot)); }; + LazyNode<Name> lazy_name = [=] { return CAST(LoadConstantPoolEntryAtOperandIndex(1)); }; - LazyNode<Context> context = [=] { return GetContext(); }; + LazyNode<Context> lazy_context = [=] { return GetContext(); }; Label done(this); - Variable var_result(this, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); ExitPoint exit_point(this, &done, &var_result); - AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot, - feedback_vector); + AccessorAssembler::LazyLoadICParameters params( + lazy_context, recv, lazy_name, lazy_smi_slot, feedback_vector); AccessorAssembler accessor_asm(state()); accessor_asm.LoadIC_BytecodeHandler(¶ms, &exit_point); @@ -540,7 +546,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { // Calls the GetProperty builtin for <object> and the key in the accumulator. IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); - Node* name = LoadConstantPoolEntryAtOperandIndex(1); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1)); TNode<Context> context = GetContext(); TNode<Object> result = CallBuiltin(Builtins::kGetProperty, context, object, name); @@ -555,14 +561,14 @@ IGNITION_HANDLER(LdaNamedPropertyNoFeedback, InterpreterAssembler) { IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); TNode<Object> name = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(1); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_result, MachineRepresentation::kTagged); - var_result.Bind(CallBuiltin(Builtins::kKeyedLoadIC, context, object, name, - smi_slot, feedback_vector)); + TVARIABLE(Object, var_result); + var_result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name, + smi_slot, feedback_vector); SetAccumulator(var_result.value()); Dispatch(); } @@ -577,16 +583,16 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler { void StaNamedProperty(Callable ic, NamedPropertyType property_type) { TNode<Code> code_target = HeapConstant(ic.code()); TNode<Object> object = LoadRegisterAtOperandIndex(0); - Node* name = LoadConstantPoolEntryAtOperandIndex(1); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1)); TNode<Object> value = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(2); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> maybe_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_result, MachineRepresentation::kTagged); - var_result.Bind(CallStub(ic.descriptor(), code_target, context, object, - name, value, smi_slot, maybe_vector)); + TVARIABLE(Object, var_result); + var_result = CallStub(ic.descriptor(), code_target, context, object, name, + value, smi_slot, maybe_vector); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -624,7 +630,7 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) { IGNITION_HANDLER(StaNamedPropertyNoFeedback, InterpreterStoreNamedPropertyAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); - Node* name = LoadConstantPoolEntryAtOperandIndex(1); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1)); TNode<Object> value = GetAccumulator(); TNode<Context> context = GetContext(); @@ -642,14 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); TNode<Object> name = LoadRegisterAtOperandIndex(1); TNode<Object> value = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(2); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> maybe_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_result, MachineRepresentation::kTagged); - var_result.Bind(CallBuiltin(Builtins::kKeyedStoreIC, context, object, name, - value, smi_slot, maybe_vector)); + TVARIABLE(Object, var_result); + var_result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name, + value, smi_slot, maybe_vector); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -667,14 +673,14 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) { TNode<Object> array = LoadRegisterAtOperandIndex(0); TNode<Object> index = LoadRegisterAtOperandIndex(1); TNode<Object> value = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(2); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_result, MachineRepresentation::kTagged); - var_result.Bind(CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array, - index, value, smi_slot, feedback_vector)); + TVARIABLE(Object, var_result); + var_result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array, + index, value, smi_slot, feedback_vector); // To avoid special logic in the deoptimizer to re-materialize the value in // the accumulator, we overwrite the accumulator after the IC call. It // doesn't really matter what we write to the accumulator here, since we @@ -696,8 +702,9 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); TNode<Object> name = LoadRegisterAtOperandIndex(1); TNode<Object> value = GetAccumulator(); - TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2)); - TNode<Smi> vector_index = SmiTag(BytecodeOperandIdx(3)); + TNode<Smi> flags = + SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2))); + TNode<Smi> vector_index = BytecodeOperandIdxSmi(3); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); @@ -708,7 +715,7 @@ IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) { } IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) { - Node* position = BytecodeOperandImmSmi(0); + TNode<Smi> position = BytecodeOperandImmSmi(0); TNode<Object> value = GetAccumulator(); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); @@ -725,10 +732,10 @@ IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) { // identified by <cell_index>. <depth> is the depth of the current context // relative to the module context. IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { - Node* cell_index = BytecodeOperandImmIntPtr(0); + TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0); TNode<Uint32T> depth = BytecodeOperandUImm(1); - Node* module_context = GetContextAtDepth(GetContext(), depth); + TNode<Context> module_context = GetContextAtDepth(GetContext(), depth); TNode<SourceTextModule> module = CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX)); @@ -741,7 +748,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>( module, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). - TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1)); + TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1)); TNode<Cell> cell = CAST(LoadFixedArrayElement(regular_exports, export_index)); SetAccumulator(LoadObjectField(cell, Cell::kValueOffset)); @@ -753,7 +760,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { TNode<FixedArray> regular_imports = LoadObjectField<FixedArray>( module, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). - TNode<WordT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index); + TNode<IntPtrT> import_index = IntPtrSub(IntPtrConstant(-1), cell_index); TNode<Cell> cell = CAST(LoadFixedArrayElement(regular_imports, import_index)); SetAccumulator(LoadObjectField(cell, Cell::kValueOffset)); @@ -770,10 +777,10 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { // <depth> is the depth of the current context relative to the module context. IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* cell_index = BytecodeOperandImmIntPtr(0); + TNode<IntPtrT> cell_index = BytecodeOperandImmIntPtr(0); TNode<Uint32T> depth = BytecodeOperandUImm(1); - Node* module_context = GetContextAtDepth(GetContext(), depth); + TNode<Context> module_context = GetContextAtDepth(GetContext(), depth); TNode<SourceTextModule> module = CAST(LoadContextElement(module_context, Context::EXTENSION_INDEX)); @@ -786,7 +793,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) { TNode<FixedArray> regular_exports = LoadObjectField<FixedArray>( module, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). - TNode<WordT> export_index = IntPtrSub(cell_index, IntPtrConstant(1)); + TNode<IntPtrT> export_index = IntPtrSub(cell_index, IntPtrConstant(1)); TNode<Object> cell = LoadFixedArrayElement(regular_exports, export_index); StoreObjectField(cell, Cell::kValueOffset, value); Goto(&end); @@ -830,34 +837,35 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler { OperandScale operand_scale) : InterpreterAssembler(state, bytecode, operand_scale) {} - using BinaryOpGenerator = - Node* (BinaryOpAssembler::*)(Node* context, Node* left, Node* right, - Node* slot, Node* vector, bool lhs_is_smi); + using BinaryOpGenerator = TNode<Object> (BinaryOpAssembler::*)( + TNode<Context> context, TNode<Object> left, TNode<Object> right, + TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector, + bool rhs_known_smi); void BinaryOpWithFeedback(BinaryOpGenerator generator) { TNode<Object> lhs = LoadRegisterAtOperandIndex(0); TNode<Object> rhs = GetAccumulator(); TNode<Context> context = GetContext(); - Node* slot_index = BytecodeOperandIdx(1); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); BinaryOpAssembler binop_asm(state()); - Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index, - maybe_feedback_vector, false); + TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index, + maybe_feedback_vector, false); SetAccumulator(result); Dispatch(); } void BinaryOpSmiWithFeedback(BinaryOpGenerator generator) { TNode<Object> lhs = GetAccumulator(); - Node* rhs = BytecodeOperandImmSmi(0); + TNode<Smi> rhs = BytecodeOperandImmSmi(0); TNode<Context> context = GetContext(); - Node* slot_index = BytecodeOperandIdx(1); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); BinaryOpAssembler binop_asm(state()); - Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index, - maybe_feedback_vector, true); + TNode<Object> result = (binop_asm.*generator)(context, lhs, rhs, slot_index, + maybe_feedback_vector, true); SetAccumulator(result); Dispatch(); } @@ -959,15 +967,15 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler { TNode<Object> left = LoadRegisterAtOperandIndex(0); TNode<Object> right = GetAccumulator(); TNode<Context> context = GetContext(); - Node* slot_index = BytecodeOperandIdx(1); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TVARIABLE(Smi, var_left_feedback); TVARIABLE(Smi, var_right_feedback); - VARIABLE(var_left_word32, MachineRepresentation::kWord32); - VARIABLE(var_right_word32, MachineRepresentation::kWord32); - VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left); - VARIABLE(var_right_bigint, MachineRepresentation::kTagged); + TVARIABLE(Word32T, var_left_word32); + TVARIABLE(Word32T, var_right_word32); + TVARIABLE(Object, var_left_bigint, left); + TVARIABLE(Object, var_right_bigint); Label if_left_number(this), do_number_op(this); Label if_left_bigint(this), do_bigint_op(this); @@ -1007,14 +1015,16 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler { void BitwiseBinaryOpWithSmi(Operation bitwise_op) { TNode<Object> left = GetAccumulator(); - Node* right = BytecodeOperandImmSmi(0); - Node* slot_index = BytecodeOperandIdx(1); + TNode<Smi> right = BytecodeOperandImmSmi(0); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); TVARIABLE(Smi, var_left_feedback); - VARIABLE(var_left_word32, MachineRepresentation::kWord32); - VARIABLE(var_left_bigint, MachineRepresentation::kTagged); + TVARIABLE(Word32T, var_left_word32); + // TODO(v8:6949): var_left_bigint should be BigInt, but before that we need + // to clean up TaggedToWord32OrBigIntWithFeedback and related methods. + TVARIABLE(Object, var_left_bigint); Label do_smi_op(this), if_bigint_mix(this); TaggedToWord32OrBigIntWithFeedback(context, left, &do_smi_op, @@ -1115,13 +1125,15 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) { // Perform bitwise-not on the accumulator. IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) { TNode<Object> operand = GetAccumulator(); - Node* slot_index = BytecodeOperandIdx(0); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(0); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_word32, MachineRepresentation::kWord32); + TVARIABLE(Word32T, var_word32); TVARIABLE(Smi, var_feedback); - VARIABLE(var_bigint, MachineRepresentation::kTagged); + // TODO(v8:6949): var_bigint should be BigInt, but before that we need to + // clean up TaggedToWord32OrBigIntWithFeedback and related methods. + TVARIABLE(Object, var_bigint); Label if_number(this), if_bigint(this); TaggedToWord32OrBigIntWithFeedback(context, operand, &if_number, &var_word32, &if_bigint, &var_bigint, &var_feedback); @@ -1184,20 +1196,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { virtual ~UnaryNumericOpAssembler() = default; // Must return a tagged value. - virtual TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback, - Label* do_float_op, Variable* var_float) = 0; + virtual TNode<Number> SmiOp(TNode<Smi> smi_value, + TVariable<Smi>* var_feedback, Label* do_float_op, + TVariable<Float64T>* var_float) = 0; // Must return a Float64 value. - virtual Node* FloatOp(Node* float_value) = 0; + virtual TNode<Float64T> FloatOp(TNode<Float64T> float_value) = 0; // Must return a tagged value. - virtual Node* BigIntOp(Node* bigint_value) = 0; + virtual TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) = 0; void UnaryOpWithFeedback() { - VARIABLE(var_value, MachineRepresentation::kTagged, GetAccumulator()); - VARIABLE(var_result, MachineRepresentation::kTagged); - VARIABLE(var_float_value, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_value, GetAccumulator()); + TVARIABLE(Object, var_result); + TVARIABLE(Float64T, var_float_value); TVARIABLE(Smi, var_feedback, SmiConstant(BinaryOperationFeedback::kNone)); - Variable* loop_vars[] = {&var_value, &var_feedback}; - Label start(this, arraysize(loop_vars), loop_vars), end(this); + Label start(this, {&var_value, &var_feedback}), end(this); Label do_float_op(this, &var_float_value); Goto(&start); // We might have to try again after ToNumeric conversion. @@ -1206,9 +1218,11 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { Label if_smi(this), if_heapnumber(this), if_oddball(this); Label if_bigint(this, Label::kDeferred); Label if_other(this, Label::kDeferred); - Node* value = var_value.value(); + TNode<Object> value = var_value.value(); GotoIf(TaggedIsSmi(value), &if_smi); - TNode<Map> map = LoadMap(value); + + TNode<HeapObject> value_heap_object = CAST(value); + TNode<Map> map = LoadMap(value_heap_object); GotoIf(IsHeapNumberMap(map), &if_heapnumber); TNode<Uint16T> instance_type = LoadMapInstanceType(map); GotoIf(IsBigIntInstanceType(instance_type), &if_bigint); @@ -1217,20 +1231,20 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { BIND(&if_smi); { - var_result.Bind( - SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value)); + var_result = + SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value); Goto(&end); } BIND(&if_heapnumber); { - var_float_value.Bind(LoadHeapNumberValue(value)); + var_float_value = LoadHeapNumberValue(value_heap_object); Goto(&do_float_op); } BIND(&if_bigint); { - var_result.Bind(BigIntOp(value)); + var_result = BigIntOp(value_heap_object); CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt); Goto(&end); } @@ -1244,7 +1258,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { SmiConstant(BinaryOperationFeedback::kNone))); OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kNumberOrOddball); - var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset)); + var_value = + LoadObjectField(value_heap_object, Oddball::kToNumberOffset); Goto(&start); } @@ -1256,8 +1271,8 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { CSA_ASSERT(this, SmiEqual(var_feedback.value(), SmiConstant(BinaryOperationFeedback::kNone))); OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny); - var_value.Bind( - CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), value)); + var_value = CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(), + value_heap_object); Goto(&start); } } @@ -1265,13 +1280,13 @@ class UnaryNumericOpAssembler : public InterpreterAssembler { BIND(&do_float_op); { CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber); - var_result.Bind( - AllocateHeapNumberWithValue(FloatOp(var_float_value.value()))); + var_result = + AllocateHeapNumberWithValue(FloatOp(var_float_value.value())); Goto(&end); } BIND(&end); - Node* slot_index = BytecodeOperandIdx(0); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(0); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index); SetAccumulator(var_result.value()); @@ -1285,8 +1300,9 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler { OperandScale operand_scale) : UnaryNumericOpAssembler(state, bytecode, operand_scale) {} - TNode<Number> SmiOp(TNode<Smi> smi_value, Variable* var_feedback, - Label* do_float_op, Variable* var_float) override { + TNode<Number> SmiOp(TNode<Smi> smi_value, TVariable<Smi>* var_feedback, + Label* do_float_op, + TVariable<Float64T>* var_float) override { TVARIABLE(Number, var_result); Label if_zero(this), if_min_smi(this), end(this); // Return -0 if operand is 0. @@ -1306,18 +1322,20 @@ class NegateAssemblerImpl : public UnaryNumericOpAssembler { Goto(&end); BIND(&if_min_smi); - var_float->Bind(SmiToFloat64(smi_value)); + *var_float = SmiToFloat64(smi_value); Goto(do_float_op); BIND(&end); return var_result.value(); } - Node* FloatOp(Node* float_value) override { return Float64Neg(float_value); } + TNode<Float64T> FloatOp(TNode<Float64T> float_value) override { + return Float64Neg(float_value); + } - Node* BigIntOp(Node* bigint_value) override { - return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value, - SmiConstant(Operation::kNegate)); + TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override { + return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value, + SmiConstant(Operation::kNegate))); } }; @@ -1381,8 +1399,9 @@ class IncDecAssembler : public UnaryNumericOpAssembler { return op_; } - TNode<Number> SmiOp(TNode<Smi> value, Variable* var_feedback, - Label* do_float_op, Variable* var_float) override { + TNode<Number> SmiOp(TNode<Smi> value, TVariable<Smi>* var_feedback, + Label* do_float_op, + TVariable<Float64T>* var_float) override { TNode<Smi> one = SmiConstant(1); Label if_overflow(this), if_notoverflow(this); TNode<Smi> result = op() == Operation::kIncrement @@ -1392,7 +1411,7 @@ class IncDecAssembler : public UnaryNumericOpAssembler { BIND(&if_overflow); { - var_float->Bind(SmiToFloat64(value)); + *var_float = SmiToFloat64(value); Goto(do_float_op); } @@ -1401,15 +1420,15 @@ class IncDecAssembler : public UnaryNumericOpAssembler { return result; } - Node* FloatOp(Node* float_value) override { + TNode<Float64T> FloatOp(TNode<Float64T> float_value) override { return op() == Operation::kIncrement ? Float64Add(float_value, Float64Constant(1.0)) : Float64Sub(float_value, Float64Constant(1.0)); } - Node* BigIntOp(Node* bigint_value) override { - return CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value, - SmiConstant(op())); + TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override { + return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value, + SmiConstant(op()))); } void IncWithFeedback() { @@ -1442,17 +1461,17 @@ IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); } // accumulator to a boolean value if required. IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Variable result(this, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); Label if_true(this), if_false(this), end(this); BranchIfToBooleanIsTrue(value, &if_true, &if_false); BIND(&if_true); { - result.Bind(FalseConstant()); + result = FalseConstant(); Goto(&end); } BIND(&if_false); { - result.Bind(TrueConstant()); + result = TrueConstant(); Goto(&end); } BIND(&end); @@ -1466,20 +1485,20 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) { // value. IGNITION_HANDLER(LogicalNot, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Variable result(this, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); Label if_true(this), if_false(this), end(this); TNode<Oddball> true_value = TrueConstant(); TNode<Oddball> false_value = FalseConstant(); Branch(TaggedEqual(value, true_value), &if_true, &if_false); BIND(&if_true); { - result.Bind(false_value); + result = false_value; Goto(&end); } BIND(&if_false); { CSA_ASSERT(this, TaggedEqual(value, false_value)); - result.Bind(true_value); + result = true_value; Goto(&end); } BIND(&end); @@ -1493,7 +1512,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) { // object in the accumulator. IGNITION_HANDLER(TypeOf, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* result = Typeof(value); + TNode<String> result = Typeof(value); SetAccumulator(result); Dispatch(); } @@ -1550,7 +1569,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { void JSCall(ConvertReceiverMode receiver_mode) { TNode<Object> function = LoadRegisterAtOperandIndex(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); - Node* slot_id = BytecodeOperandIdx(3); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(3); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); @@ -1583,7 +1602,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { kFirstArgumentOperandIndex + kRecieverAndArgOperandCount; TNode<Object> function = LoadRegisterAtOperandIndex(0); - Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(kSlotOperandIndex); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); @@ -1598,26 +1617,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler { case 1: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)); break; case 2: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)), - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)); break; case 3: CallJSAndDispatch( function, context, Int32Constant(arg_count), receiver_mode, - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex)), - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1)), - static_cast<Node*>( - LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2))); + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1), + LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2)); break; default: UNREACHABLE(); @@ -1676,7 +1689,7 @@ IGNITION_HANDLER(CallNoFeedback, InterpreterJSCallAssembler) { // register |first_arg| and |arg_count| arguments in subsequent // registers. IGNITION_HANDLER(CallRuntime, InterpreterAssembler) { - Node* function_id = BytecodeOperandRuntimeId(0); + TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); TNode<Context> context = GetContext(); Node* result = CallRuntimeN(function_id, context, args); @@ -1690,10 +1703,11 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) { // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) { - Node* function_id = BytecodeOperandIntrinsicId(0); + TNode<Uint32T> function_id = BytecodeOperandIntrinsicId(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); TNode<Context> context = GetContext(); - Node* result = GenerateInvokeIntrinsic(this, function_id, context, args); + TNode<Object> result = + GenerateInvokeIntrinsic(this, function_id, context, args); SetAccumulator(result); Dispatch(); } @@ -1706,13 +1720,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) { // <first_return + 1> IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) { // Call the runtime function. - Node* function_id = BytecodeOperandRuntimeId(0); + TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); TNode<Context> context = GetContext(); Node* result_pair = CallRuntimeN(function_id, context, args, 2); // Store the results in <first_return> and <first_return + 1> - Node* result0 = Projection(0, result_pair); - Node* result1 = Projection(1, result_pair); + TNode<Object> result0 = CAST(Projection(0, result_pair)); + TNode<Object> result1 = CAST(Projection(1, result_pair)); StoreRegisterPairAtOperandIndex(result0, result1, 3); Dispatch(); } @@ -1722,12 +1736,12 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) { // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { - Node* context_index = BytecodeOperandNativeContextIndex(0); + TNode<IntPtrT> context_index = Signed(BytecodeOperandNativeContextIndex(0)); RegListNodePair args = GetRegisterListAtOperandIndex(1); // Get the function to call from the native context. TNode<Context> context = GetContext(); - TNode<Context> native_context = LoadNativeContext(context); + TNode<NativeContext> native_context = LoadNativeContext(context); TNode<Object> function = LoadContextElement(native_context, context_index); // Call the function. @@ -1744,7 +1758,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) { TNode<Object> callable = LoadRegisterAtOperandIndex(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); - Node* slot_id = BytecodeOperandIdx(3); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(3); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); @@ -1763,11 +1777,11 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) { TNode<Object> new_target = GetAccumulator(); TNode<Object> constructor = LoadRegisterAtOperandIndex(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); - Node* slot_id = BytecodeOperandIdx(3); - TNode<HeapObject> feedback_vector = LoadFeedbackVector(); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(3); + TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - Node* result = ConstructWithSpread(constructor, context, new_target, args, - slot_id, feedback_vector); + TNode<Object> result = ConstructWithSpread( + constructor, context, new_target, args, slot_id, maybe_feedback_vector); SetAccumulator(result); Dispatch(); } @@ -1782,11 +1796,11 @@ IGNITION_HANDLER(Construct, InterpreterAssembler) { TNode<Object> new_target = GetAccumulator(); TNode<Object> constructor = LoadRegisterAtOperandIndex(0); RegListNodePair args = GetRegisterListAtOperandIndex(1); - Node* slot_id = BytecodeOperandIdx(3); - TNode<HeapObject> feedback_vector = LoadFeedbackVector(); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(3); + TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - Node* result = Construct(constructor, context, new_target, args, slot_id, - feedback_vector); + TNode<Object> result = Construct(constructor, context, new_target, args, + slot_id, maybe_feedback_vector); SetAccumulator(result); Dispatch(); } @@ -1802,8 +1816,8 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler { TNode<Object> rhs = GetAccumulator(); TNode<Context> context = GetContext(); - Variable var_type_feedback(this, MachineRepresentation::kTagged); - Node* result; + TVARIABLE(Smi, var_type_feedback); + TNode<Oddball> result; switch (compare_op) { case Operation::kEqual: result = Equal(lhs, rhs, context, &var_type_feedback); @@ -1822,7 +1836,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler { UNREACHABLE(); } - Node* slot_index = BytecodeOperandIdx(1); + TNode<UintPtrT> slot_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index); @@ -1894,14 +1908,14 @@ IGNITION_HANDLER(TestReferenceEqual, InterpreterAssembler) { IGNITION_HANDLER(TestIn, InterpreterAssembler) { TNode<Object> name = LoadRegisterAtOperandIndex(0); TNode<Object> object = GetAccumulator(); - Node* raw_slot = BytecodeOperandIdx(1); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(1)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - VARIABLE(var_result, MachineRepresentation::kTagged); - var_result.Bind(CallBuiltin(Builtins::kKeyedHasIC, context, object, name, - smi_slot, feedback_vector)); + TVARIABLE(Object, var_result); + var_result = CallBuiltin(Builtins::kKeyedHasIC, context, object, name, + smi_slot, feedback_vector); SetAccumulator(var_result.value()); Dispatch(); } @@ -1913,15 +1927,16 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) { IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); TNode<Object> callable = GetAccumulator(); - Node* slot_id = BytecodeOperandIdx(1); - TNode<HeapObject> feedback_vector = LoadFeedbackVector(); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(1); + TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); Label feedback_done(this); - GotoIf(IsUndefined(feedback_vector), &feedback_done); + GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done); // Record feedback for the {callable} in the {feedback_vector}. - CollectCallableFeedback(callable, context, feedback_vector, slot_id); + CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector), + slot_id); Goto(&feedback_done); BIND(&feedback_done); @@ -1980,7 +1995,7 @@ IGNITION_HANDLER(TestUndefined, InterpreterAssembler) { // by |literal_flag|. IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) { TNode<Object> object = GetAccumulator(); - Node* literal_flag = BytecodeOperandFlag(0); + TNode<Uint32T> literal_flag = BytecodeOperandFlag(0); #define MAKE_LABEL(name, lower_case) Label if_##lower_case(this); TYPEOF_LITERAL_LIST(MAKE_LABEL) @@ -2097,7 +2112,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) { // // Jump by the number of bytes represented by the immediate operand |imm|. IGNITION_HANDLER(Jump, InterpreterAssembler) { - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); Jump(relative_jump); } @@ -2117,7 +2132,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) { // will misbehave if passed arbitrary input values. IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); CSA_ASSERT(this, IsBoolean(CAST(accumulator))); JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump); } @@ -2141,7 +2156,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) { // will misbehave if passed arbitrary input values. IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); CSA_ASSERT(this, IsBoolean(CAST(accumulator))); JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump); } @@ -2164,7 +2179,7 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) { // referenced by the accumulator is true when the object is cast to boolean. IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); Label if_true(this), if_false(this); BranchIfToBooleanIsTrue(value, &if_true, &if_false); BIND(&if_true); @@ -2195,7 +2210,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) { // referenced by the accumulator is false when the object is cast to boolean. IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) { TNode<Object> value = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); Label if_true(this), if_false(this); BranchIfToBooleanIsTrue(value, &if_true, &if_false); BIND(&if_true); @@ -2226,7 +2241,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) { // referenced by the accumulator is the null constant. IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump); } @@ -2246,7 +2261,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) { // referenced by the accumulator is not the null constant. IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump); } @@ -2266,7 +2281,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) { // referenced by the accumulator is the undefined constant. IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump); } @@ -2286,7 +2301,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) { // referenced by the accumulator is not the undefined constant. IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump); } @@ -2314,7 +2329,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNull, InterpreterAssembler) { Dispatch(); BIND(&do_jump); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); Jump(relative_jump); } @@ -2342,7 +2357,7 @@ IGNITION_HANDLER(JumpIfUndefinedOrNullConstant, InterpreterAssembler) { // referenced by the accumulator is a JSReceiver. IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); - Node* relative_jump = BytecodeOperandUImmWord(0); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this); Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); @@ -2383,9 +2398,9 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) { // performs a loop nesting check and potentially triggers OSR in case the // current OSR level matches (or exceeds) the specified |loop_depth|. IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { - Node* relative_jump = BytecodeOperandUImmWord(0); - Node* loop_depth = BytecodeOperandImm(1); - Node* osr_level = LoadOsrNestingLevel(); + TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); + TNode<Int32T> loop_depth = BytecodeOperandImm(1); + TNode<Int8T> osr_level = LoadOsrNestingLevel(); // Check if OSR points at the given {loop_depth} are armed by comparing it to // the current {osr_level} loaded from the header of the BytecodeArray. @@ -2415,9 +2430,9 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { // next bytecode. IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) { TNode<Object> acc = GetAccumulator(); - Node* table_start = BytecodeOperandIdx(0); - Node* table_length = BytecodeOperandUImmWord(1); - Node* case_value_base = BytecodeOperandImmIntPtr(2); + TNode<UintPtrT> table_start = BytecodeOperandIdx(0); + TNode<UintPtrT> table_length = BytecodeOperandUImmWord(1); + TNode<IntPtrT> case_value_base = BytecodeOperandImmIntPtr(2); Label fall_through(this); @@ -2426,7 +2441,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) { // accumulator values. CSA_ASSERT(this, TaggedIsSmi(acc)); - TNode<WordT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base); + TNode<IntPtrT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base); GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through); GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through); TNode<WordT> entry = IntPtrAdd(table_start, case_value); @@ -2442,17 +2457,18 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) { // Creates a regular expression literal for literal index <literal_idx> with // <flags> and the pattern in <pattern_idx>. IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) { - Node* pattern = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Object> pattern = LoadConstantPoolEntryAtOperandIndex(0); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* slot_id = BytecodeOperandIdx(1); - TNode<Smi> flags = SmiFromInt32(BytecodeOperandFlag(2)); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(1); + TNode<Smi> flags = + SmiFromInt32(UncheckedCast<Int32T>(BytecodeOperandFlag(2))); TNode<Context> context = GetContext(); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(JSRegExp, result); ConstructorBuiltinsAssembler constructor_assembler(state()); - result.Bind(constructor_assembler.EmitCreateRegExpLiteral( - feedback_vector, slot_id, pattern, flags, context)); + result = constructor_assembler.EmitCreateRegExpLiteral( + feedback_vector, slot_id, pattern, flags, context); SetAccumulator(result.value()); Dispatch(); } @@ -2463,9 +2479,9 @@ IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) { // CreateArrayLiteral flags <flags> and constant elements in <element_idx>. IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* slot_id = BytecodeOperandIdx(1); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(1); TNode<Context> context = GetContext(); - Node* bytecode_flags = BytecodeOperandFlag(2); + TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2); Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred); // No feedback, so handle it as a slow case. @@ -2478,8 +2494,8 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { BIND(&fast_shallow_clone); { ConstructorBuiltinsAssembler constructor_assembler(state()); - Node* result = constructor_assembler.EmitCreateShallowArrayLiteral( - feedback_vector, slot_id, context, &call_runtime, + TNode<JSArray> result = constructor_assembler.EmitCreateShallowArrayLiteral( + CAST(feedback_vector), slot_id, context, &call_runtime, TRACK_ALLOCATION_SITE); SetAccumulator(result); Dispatch(); @@ -2487,14 +2503,14 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { BIND(&call_runtime); { - TNode<WordT> flags_raw = + TNode<UintPtrT> flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>( bytecode_flags); TNode<Smi> flags = SmiTag(Signed(flags_raw)); - Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Object> constant_elements = LoadConstantPoolEntryAtOperandIndex(0); TNode<Object> result = CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector, - SmiTag(slot_id), constant_elements, flags); + SmiTag(Signed(slot_id)), constant_elements, flags); SetAccumulator(result); Dispatch(); } @@ -2504,26 +2520,26 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { // // Creates an empty JSArray literal for literal index <literal_idx>. IGNITION_HANDLER(CreateEmptyArrayLiteral, InterpreterAssembler) { - TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* slot_id = BytecodeOperandIdx(0); + TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(0); TNode<Context> context = GetContext(); Label no_feedback(this, Label::kDeferred), end(this); - VARIABLE(result, MachineRepresentation::kTagged); - GotoIf(IsUndefined(feedback_vector), &no_feedback); + TVARIABLE(JSArray, result); + GotoIf(IsUndefined(maybe_feedback_vector), &no_feedback); ConstructorBuiltinsAssembler constructor_assembler(state()); - result.Bind(constructor_assembler.EmitCreateEmptyArrayLiteral( - feedback_vector, slot_id, context)); + result = constructor_assembler.EmitCreateEmptyArrayLiteral( + CAST(maybe_feedback_vector), slot_id, context); Goto(&end); BIND(&no_feedback); { TNode<Map> array_map = LoadJSArrayElementsMap(GetInitialFastElementsKind(), LoadNativeContext(context)); - result.Bind(AllocateJSArray(GetInitialFastElementsKind(), array_map, - SmiConstant(0), SmiConstant(0), nullptr, - ParameterMode::SMI_PARAMETERS)); + result = + AllocateJSArray(GetInitialFastElementsKind(), array_map, SmiConstant(0), + SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS); Goto(&end); } @@ -2551,8 +2567,8 @@ IGNITION_HANDLER(CreateArrayFromIterable, InterpreterAssembler) { // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>. IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* slot_id = BytecodeOperandIdx(1); - Node* bytecode_flags = BytecodeOperandFlag(2); + TNode<UintPtrT> slot_id = BytecodeOperandIdx(1); + TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(2); Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred); // No feedback, so handle it as a slow case. @@ -2567,8 +2583,9 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { { // If we can do a fast clone do the fast-path in CreateShallowObjectLiteral. ConstructorBuiltinsAssembler constructor_assembler(state()); - Node* result = constructor_assembler.EmitCreateShallowObjectLiteral( - feedback_vector, slot_id, &if_not_fast_clone); + TNode<HeapObject> result = + constructor_assembler.EmitCreateShallowObjectLiteral( + CAST(feedback_vector), slot_id, &if_not_fast_clone); SetAccumulator(result); Dispatch(); } @@ -2576,18 +2593,18 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { BIND(&if_not_fast_clone); { // If we can't do a fast clone, call into the runtime. - Node* object_boilerplate_description = - LoadConstantPoolEntryAtOperandIndex(0); + TNode<ObjectBoilerplateDescription> object_boilerplate_description = + CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Context> context = GetContext(); - TNode<WordT> flags_raw = + TNode<UintPtrT> flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>( bytecode_flags); TNode<Smi> flags = SmiTag(Signed(flags_raw)); - TNode<Object> result = - CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, - SmiTag(slot_id), object_boilerplate_description, flags); + TNode<Object> result = CallRuntime(Runtime::kCreateObjectLiteral, context, + feedback_vector, SmiTag(Signed(slot_id)), + object_boilerplate_description, flags); SetAccumulator(result); // TODO(klaasb) build a single dispatch once the call is inlined Dispatch(); @@ -2600,7 +2617,8 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) { TNode<Context> context = GetContext(); ConstructorBuiltinsAssembler constructor_assembler(state()); - Node* result = constructor_assembler.EmitCreateEmptyObjectLiteral(context); + TNode<JSObject> result = + constructor_assembler.EmitCreateEmptyObjectLiteral(context); SetAccumulator(result); Dispatch(); } @@ -2611,18 +2629,18 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) { // {source}, converting getters into data properties. IGNITION_HANDLER(CloneObject, InterpreterAssembler) { TNode<Object> source = LoadRegisterAtOperandIndex(0); - Node* bytecode_flags = BytecodeOperandFlag(1); - TNode<WordT> raw_flags = + TNode<Uint32T> bytecode_flags = BytecodeOperandFlag(1); + TNode<UintPtrT> raw_flags = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags); TNode<Smi> smi_flags = SmiTag(Signed(raw_flags)); - Node* raw_slot = BytecodeOperandIdx(2); + TNode<IntPtrT> raw_slot = Signed(BytecodeOperandIdx(2)); TNode<Smi> smi_slot = SmiTag(raw_slot); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); TNode<Context> context = GetContext(); - Variable var_result(this, MachineRepresentation::kTagged); - var_result.Bind(CallBuiltin(Builtins::kCloneObjectIC, context, source, - smi_flags, smi_slot, maybe_feedback_vector)); + TVARIABLE(Object, var_result); + var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags, + smi_slot, maybe_feedback_vector); SetAccumulator(var_result.value()); Dispatch(); } @@ -2633,14 +2651,14 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) { // accumulator, creating and caching the site object on-demand as per the // specification. IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { - TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* slot = BytecodeOperandIdx(1); + TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); + TNode<UintPtrT> slot = BytecodeOperandIdx(1); Label call_runtime(this, Label::kDeferred); - GotoIf(IsUndefined(feedback_vector), &call_runtime); + GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime); TNode<Object> cached_value = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(CAST(maybe_feedback_vector), slot)); GotoIf(TaggedEqual(cached_value, SmiConstant(0)), &call_runtime); @@ -2649,8 +2667,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { BIND(&call_runtime); { - Node* description = LoadConstantPoolEntryAtOperandIndex(0); - TNode<Smi> slot_smi = SmiTag(slot); + TNode<Object> description = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Smi> slot_smi = SmiTag(Signed(slot)); TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>( @@ -2660,8 +2678,8 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { description, shared_info, slot_smi); Label end(this); - GotoIf(IsUndefined(feedback_vector), &end); - StoreFeedbackVectorSlot(feedback_vector, slot, result); + GotoIf(IsUndefined(maybe_feedback_vector), &end); + StoreFeedbackVectorSlot(CAST(maybe_feedback_vector), slot, result); Goto(&end); Bind(&end); @@ -2675,10 +2693,10 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) { // Creates a new closure for SharedFunctionInfo at position |index| in the // constant pool and with pretenuring controlled by |flags|. IGNITION_HANDLER(CreateClosure, InterpreterAssembler) { - Node* shared = LoadConstantPoolEntryAtOperandIndex(0); - Node* flags = BytecodeOperandFlag(2); + TNode<Object> shared = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Uint32T> flags = BytecodeOperandFlag(2); TNode<Context> context = GetContext(); - Node* slot = BytecodeOperandIdx(1); + TNode<UintPtrT> slot = BytecodeOperandIdx(1); Label if_undefined(this); TNode<ClosureFeedbackCellArray> feedback_cell_array = @@ -2727,7 +2745,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) { // // Creates a new block context with the scope info constant at |index|. IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) { - Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0); + TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(0)); TNode<Context> context = GetContext(); SetAccumulator(CallRuntime(Runtime::kPushBlockContext, context, scope_info)); Dispatch(); @@ -2739,7 +2757,7 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) { // and the ScopeInfo at |scope_info_idx|. IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) { TNode<Object> exception = LoadRegisterAtOperandIndex(0); - Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1); + TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1)); TNode<Context> context = GetContext(); SetAccumulator( CallRuntime(Runtime::kPushCatchContext, context, exception, scope_info)); @@ -2750,8 +2768,8 @@ IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) { // // Creates a new context with number of |slots| for the function closure. IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) { - Node* scope_info_idx = BytecodeOperandIdx(0); - Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0); + TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx)); TNode<Uint32T> slots = BytecodeOperandUImm(1); TNode<Context> context = GetContext(); ConstructorBuiltinsAssembler constructor_assembler(state()); @@ -2764,8 +2782,8 @@ IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) { // // Creates a new context with number of |slots| for an eval closure. IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) { - Node* scope_info_idx = BytecodeOperandIdx(0); - Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + TNode<UintPtrT> scope_info_idx = BytecodeOperandIdx(0); + TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntry(scope_info_idx)); TNode<Uint32T> slots = BytecodeOperandUImm(1); TNode<Context> context = GetContext(); ConstructorBuiltinsAssembler constructor_assembler(state()); @@ -2780,7 +2798,7 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) { // with-statement with the object in |register|. IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) { TNode<Object> object = LoadRegisterAtOperandIndex(0); - Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1); + TNode<ScopeInfo> scope_info = CAST(LoadConstantPoolEntryAtOperandIndex(1)); TNode<Context> context = GetContext(); SetAccumulator( CallRuntime(Runtime::kPushWithContext, context, object, scope_info)); @@ -2802,8 +2820,8 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) { // duplicate parameters. TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>( closure, JSFunction::kSharedFunctionInfoOffset); - Node* flags = LoadObjectField(shared_info, SharedFunctionInfo::kFlagsOffset, - MachineType::Uint32()); + TNode<Uint32T> flags = + LoadObjectField<Uint32T>(shared_info, SharedFunctionInfo::kFlagsOffset); TNode<BoolT> has_duplicate_parameters = IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(flags); Branch(has_duplicate_parameters, &if_duplicate_parameters, @@ -2812,7 +2830,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) { BIND(&if_not_duplicate_parameters); { ArgumentsBuiltinsAssembler constructor_assembler(state()); - Node* result = + TNode<JSObject> result = constructor_assembler.EmitFastNewSloppyArguments(context, closure); SetAccumulator(result); Dispatch(); @@ -2832,9 +2850,9 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) { // Creates a new unmapped arguments object. IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) { TNode<Context> context = GetContext(); - TNode<Object> closure = LoadRegister(Register::function_closure()); + TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); ArgumentsBuiltinsAssembler builtins_assembler(state()); - Node* result = + TNode<JSObject> result = builtins_assembler.EmitFastNewStrictArguments(context, closure); SetAccumulator(result); Dispatch(); @@ -2844,10 +2862,11 @@ IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) { // // Creates a new rest parameter array. IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) { - TNode<Object> closure = LoadRegister(Register::function_closure()); + TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); TNode<Context> context = GetContext(); ArgumentsBuiltinsAssembler builtins_assembler(state()); - Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure); + TNode<JSObject> result = + builtins_assembler.EmitFastNewRestParameter(context, closure); SetAccumulator(result); Dispatch(); } @@ -2868,7 +2887,7 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) { IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) { TNode<ExternalReference> pending_message = ExternalConstant( ExternalReference::address_of_pending_message_obj(isolate())); - Node* previous_message = Load(MachineType::TaggedPointer(), pending_message); + TNode<HeapObject> previous_message = Load<HeapObject>(pending_message); TNode<Object> new_message = GetAccumulator(); StoreFullTaggedNoWriteBarrier(pending_message, new_message); SetAccumulator(previous_message); @@ -2903,8 +2922,8 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) { // // Aborts execution (via a call to the runtime function). IGNITION_HANDLER(Abort, InterpreterAssembler) { - Node* reason = BytecodeOperandIdx(0); - CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(reason)); + TNode<UintPtrT> reason = BytecodeOperandIdx(0); + CallRuntime(Runtime::kAbort, NoContextConstant(), SmiTag(Signed(reason))); Unreachable(); } @@ -2929,7 +2948,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) { BIND(&throw_error); { - Node* name = LoadConstantPoolEntryAtOperandIndex(0); + TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(0)); CallRuntime(Runtime::kThrowAccessedUninitializedVariable, GetContext(), name); // We shouldn't ever return from a throw. @@ -2995,7 +3014,7 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) { TNode<Object> accumulator = GetAccumulator(); \ TNode<Object> result_pair = \ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ - Node* return_value = Projection(0, result_pair); \ + TNode<Object> return_value = CAST(Projection(0, result_pair)); \ TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \ MaybeDropFrames(context); \ SetAccumulator(return_value); \ @@ -3010,7 +3029,7 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK) // coverage. IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) { TNode<Object> closure = LoadRegister(Register::function_closure()); - Node* coverage_array_slot = BytecodeOperandIdxSmi(0); + TNode<Smi> coverage_array_slot = BytecodeOperandIdxSmi(0); TNode<Context> context = GetContext(); CallBuiltin(Builtins::kIncBlockCounter, context, closure, @@ -3025,11 +3044,11 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) { // map of the |receiver| if it has a usable enum cache or a fixed array // with the keys to enumerate in the accumulator. IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) { - TNode<Object> receiver = LoadRegisterAtOperandIndex(0); + TNode<HeapObject> receiver = CAST(LoadRegisterAtOperandIndex(0)); TNode<Context> context = GetContext(); Label if_empty(this), if_runtime(this, Label::kDeferred); - Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); + TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); SetAccumulator(receiver_map); Dispatch(); @@ -3060,7 +3079,7 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) { IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) { // The {enumerator} is either a Map or a FixedArray. TNode<HeapObject> enumerator = CAST(GetAccumulator()); - Node* vector_index = BytecodeOperandIdx(1); + TNode<UintPtrT> vector_index = BytecodeOperandIdx(1); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); // Check if we're using an enum cache. @@ -3091,8 +3110,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) { UpdateFeedback(feedback, maybe_feedback_vector, vector_index); // Construct the cache info triple. - Node* cache_type = enumerator; - Node* cache_array = enum_keys; + TNode<Map> cache_type = map_enumerator; + TNode<FixedArray> cache_array = enum_keys; TNode<Smi> cache_length = SmiTag(Signed(enum_length)); StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0); Dispatch(); @@ -3108,8 +3127,8 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) { vector_index); // Construct the cache info triple. - Node* cache_type = array_enumerator; - Node* cache_array = array_enumerator; + TNode<FixedArray> cache_type = array_enumerator; + TNode<FixedArray> cache_array = array_enumerator; TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator); StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0); Dispatch(); @@ -3125,7 +3144,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) { TNode<Object> cache_type; TNode<Object> cache_array; std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2); - Node* vector_index = BytecodeOperandIdx(3); + TNode<UintPtrT> vector_index = BytecodeOperandIdx(3); TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector(); // Load the next key from the enumeration array. @@ -3195,21 +3214,22 @@ IGNITION_HANDLER(ForInStep, InterpreterAssembler) { // GetIterator <object> // -// Retrieves the object[Symbol.iterator] method and stores the result -// in the accumulator -// TODO(swapnilgaikwad): Extend the functionality of the bytecode to call -// iterator method for an object +// Retrieves the object[Symbol.iterator] method, calls it and stores +// the result in the accumulator. If the result is not a JSReceiver, throws +// SymbolIteratorInvalid runtime exception. IGNITION_HANDLER(GetIterator, InterpreterAssembler) { TNode<Object> receiver = LoadRegisterAtOperandIndex(0); TNode<Context> context = GetContext(); TNode<HeapObject> feedback_vector = LoadFeedbackVector(); - Node* feedback_slot = BytecodeOperandIdx(1); - TNode<Smi> smi_slot = SmiTag(feedback_slot); + TNode<IntPtrT> load_feedback_slot = Signed(BytecodeOperandIdx(1)); + TNode<IntPtrT> call_feedback_slot = Signed(BytecodeOperandIdx(2)); + TNode<Smi> load_slot_smi = SmiTag(load_feedback_slot); + TNode<Smi> call_slot_smi = SmiTag(call_feedback_slot); - TNode<Object> result = + TNode<Object> iterator = CallBuiltin(Builtins::kGetIteratorWithFeedback, context, receiver, - smi_slot, feedback_vector); - SetAccumulator(result); + load_slot_smi, call_slot_smi, feedback_vector); + SetAccumulator(iterator); Dispatch(); } @@ -3249,7 +3269,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) { TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure())); TNode<Context> context = GetContext(); RegListNodePair registers = GetRegisterListAtOperandIndex(1); - Node* suspend_id = BytecodeOperandUImmSmi(3); + TNode<Smi> suspend_id = BytecodeOperandUImmSmi(3); TNode<SharedFunctionInfo> shared = CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset)); @@ -3297,10 +3317,10 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) { CAST(LoadObjectField(generator, JSGeneratorObject::kContextOffset)); SetContext(context); - Node* table_start = BytecodeOperandIdx(1); + TNode<UintPtrT> table_start = BytecodeOperandIdx(1); // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't // actually need it otherwise. - Node* table_length = BytecodeOperandUImmWord(2); + TNode<UintPtrT> table_length = BytecodeOperandUImmWord(2); // The state must be a Smi. CSA_ASSERT(this, TaggedIsSmi(state)); @@ -3350,14 +3370,15 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) { } // namespace -Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode, +Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name, + Bytecode bytecode, OperandScale operand_scale, int builtin_index, const AssemblerOptions& options) { Zone zone(isolate->allocator(), ZONE_NAME); compiler::CodeAssemblerState state( isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER, - Bytecodes::ToString(bytecode), + debug_name, FLAG_untrusted_code_mitigations ? PoisoningMitigationLevel::kPoisonCriticalOnly : PoisoningMitigationLevel::kDontPoison, @@ -3377,7 +3398,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode, #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { StdoutStream os; - code->Disassemble(Bytecodes::ToString(bytecode), os); + code->Disassemble(Bytecodes::ToString(bytecode), os, isolate); os << std::flush; } #endif // ENABLE_DISASSEMBLER diff --git a/chromium/v8/src/interpreter/interpreter-generator.h b/chromium/v8/src/interpreter/interpreter-generator.h index a41e89f250d..263f02ba39e 100644 --- a/chromium/v8/src/interpreter/interpreter-generator.h +++ b/chromium/v8/src/interpreter/interpreter-generator.h @@ -15,7 +15,9 @@ struct AssemblerOptions; namespace interpreter { -extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode, +extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, + const char* debug_name, + Bytecode bytecode, OperandScale operand_scale, int builtin_index, const AssemblerOptions& options); diff --git a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc index a329e7189f4..f5307762f78 100644 --- a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc +++ b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc @@ -21,8 +21,6 @@ namespace internal { namespace interpreter { using compiler::Node; -template <typename T> -using TNode = compiler::TNode<T>; class IntrinsicsGenerator { public: @@ -31,8 +29,9 @@ class IntrinsicsGenerator { zone_(assembler->zone()), assembler_(assembler) {} - Node* InvokeIntrinsic(Node* function_id, Node* context, - const InterpreterAssembler::RegListNodePair& args); + TNode<Object> InvokeIntrinsic( + TNode<Uint32T> function_id, TNode<Context> context, + const InterpreterAssembler::RegListNodePair& args); private: enum InstanceTypeCompareMode { @@ -40,17 +39,20 @@ class IntrinsicsGenerator { kInstanceTypeGreaterThanOrEqual }; - Node* IsInstanceType(Node* input, int type); - Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode); - Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args, - Node* context, Callable const& callable); - Node* IntrinsicAsBuiltinCall( - const InterpreterAssembler::RegListNodePair& args, Node* context, + TNode<Oddball> IsInstanceType(TNode<Object> input, int type); + TNode<BoolT> CompareInstanceType(TNode<HeapObject> map, int type, + InstanceTypeCompareMode mode); + TNode<Object> IntrinsicAsStubCall( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context, + Callable const& callable); + TNode<Object> IntrinsicAsBuiltinCall( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context, Builtins::Name name); - void AbortIfArgCountMismatch(int expected, compiler::TNode<Word32T> actual); + void AbortIfArgCountMismatch(int expected, TNode<Word32T> actual); -#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \ - Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context); +#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \ + TNode<Object> name(const InterpreterAssembler::RegListNodePair& args, \ + TNode<Context> context); INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER) #undef DECLARE_INTRINSIC_HELPER @@ -65,21 +67,20 @@ class IntrinsicsGenerator { DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator); }; -Node* GenerateInvokeIntrinsic( - InterpreterAssembler* assembler, Node* function_id, Node* context, - const InterpreterAssembler::RegListNodePair& args) { +TNode<Object> GenerateInvokeIntrinsic( + InterpreterAssembler* assembler, TNode<Uint32T> function_id, + TNode<Context> context, const InterpreterAssembler::RegListNodePair& args) { IntrinsicsGenerator generator(assembler); return generator.InvokeIntrinsic(function_id, context, args); } #define __ assembler_-> -Node* IntrinsicsGenerator::InvokeIntrinsic( - Node* function_id, Node* context, +TNode<Object> IntrinsicsGenerator::InvokeIntrinsic( + TNode<Uint32T> function_id, TNode<Context> context, const InterpreterAssembler::RegListNodePair& args) { InterpreterAssembler::Label abort(assembler_), end(assembler_); - InterpreterAssembler::Variable result(assembler_, - MachineRepresentation::kTagged); + InterpreterAssembler::TVariable<Object> result(assembler_); #define MAKE_LABEL(name, lower_case, count) \ InterpreterAssembler::Label lower_case(assembler_); @@ -102,9 +103,9 @@ Node* IntrinsicsGenerator::InvokeIntrinsic( if (FLAG_debug_code && expected_arg_count >= 0) { \ AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \ } \ - Node* value = name(args, context); \ + TNode<Object> value = name(args, context); \ if (value) { \ - result.Bind(value); \ + result = value; \ __ Goto(&end); \ } \ } @@ -114,7 +115,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic( __ BIND(&abort); { __ Abort(AbortReason::kUnexpectedFunctionIDForInvokeIntrinsic); - result.Bind(__ UndefinedConstant()); + result = __ UndefinedConstant(); __ Goto(&end); } @@ -122,8 +123,8 @@ Node* IntrinsicsGenerator::InvokeIntrinsic( return result.value(); } -Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type, - InstanceTypeCompareMode mode) { +TNode<BoolT> IntrinsicsGenerator::CompareInstanceType( + TNode<HeapObject> object, int type, InstanceTypeCompareMode mode) { TNode<Uint16T> instance_type = __ LoadInstanceType(object); if (mode == kInstanceTypeEqual) { @@ -134,39 +135,42 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type, } } -Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) { +TNode<Oddball> IntrinsicsGenerator::IsInstanceType(TNode<Object> input, + int type) { TNode<Oddball> result = __ Select<Oddball>( __ TaggedIsSmi(input), [=] { return __ FalseConstant(); }, [=] { return __ SelectBooleanConstant( - CompareInstanceType(input, type, kInstanceTypeEqual)); + CompareInstanceType(__ CAST(input), type, kInstanceTypeEqual)); }); return result; } -Node* IntrinsicsGenerator::IsJSReceiver( - const InterpreterAssembler::RegListNodePair& args, Node* context) { - Node* input = __ LoadRegisterFromRegisterList(args, 0); +TNode<Object> IntrinsicsGenerator::IsJSReceiver( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { + TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0); TNode<Oddball> result = __ Select<Oddball>( __ TaggedIsSmi(input), [=] { return __ FalseConstant(); }, - [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); }); + [=] { + return __ SelectBooleanConstant(__ IsJSReceiver(__ CAST(input))); + }); return result; } -Node* IntrinsicsGenerator::IsArray( - const InterpreterAssembler::RegListNodePair& args, Node* context) { - Node* input = __ LoadRegisterFromRegisterList(args, 0); +TNode<Object> IntrinsicsGenerator::IsArray( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { + TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0); return IsInstanceType(input, JS_ARRAY_TYPE); } -Node* IntrinsicsGenerator::IsSmi( - const InterpreterAssembler::RegListNodePair& args, Node* context) { - Node* input = __ LoadRegisterFromRegisterList(args, 0); +TNode<Object> IntrinsicsGenerator::IsSmi( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { + TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0); return __ SelectBooleanConstant(__ TaggedIsSmi(input)); } -Node* IntrinsicsGenerator::IntrinsicAsStubCall( - const InterpreterAssembler::RegListNodePair& args, Node* context, +TNode<Object> IntrinsicsGenerator::IntrinsicAsStubCall( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context, Callable const& callable) { int param_count = callable.descriptor().GetParameterCount(); int input_count = param_count + 2; // +2 for target and context @@ -177,59 +181,60 @@ Node* IntrinsicsGenerator::IntrinsicAsStubCall( stub_args[index++] = __ LoadRegisterFromRegisterList(args, i); } stub_args[index++] = context; - return __ CallStubN(StubCallMode::kCallCodeObject, callable.descriptor(), 1, - input_count, stub_args); + return __ CAST(__ CallStubN(StubCallMode::kCallCodeObject, + callable.descriptor(), 1, input_count, + stub_args)); } -Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall( - const InterpreterAssembler::RegListNodePair& args, Node* context, +TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context, Builtins::Name name) { Callable callable = Builtins::CallableFor(isolate_, name); return IntrinsicAsStubCall(args, context, callable); } -Node* IntrinsicsGenerator::CopyDataProperties( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::CopyDataProperties( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties)); } -Node* IntrinsicsGenerator::CreateIterResultObject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::CreateIterResultObject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject)); } -Node* IntrinsicsGenerator::HasProperty( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::HasProperty( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty)); } -Node* IntrinsicsGenerator::ToStringRT( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::ToStringRT( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kToString)); } -Node* IntrinsicsGenerator::ToLength( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::ToLength( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kToLength)); } -Node* IntrinsicsGenerator::ToObject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::ToObject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsStubCall( args, context, Builtins::CallableFor(isolate(), Builtins::kToObject)); } -Node* IntrinsicsGenerator::Call( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::Call( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { // First argument register contains the function target. - Node* function = __ LoadRegisterFromRegisterList(args, 0); + TNode<Object> function = __ LoadRegisterFromRegisterList(args, 0); // The arguments for the target function are from the second runtime call // argument. @@ -249,26 +254,25 @@ Node* IntrinsicsGenerator::Call( __ CallJSAndDispatch(function, context, target_args, ConvertReceiverMode::kAny); - return nullptr; // We never return from the CallJSAndDispatch above. + return TNode<Object>(); // We never return from the CallJSAndDispatch above. } -Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { InterpreterAssembler::Label not_receiver( assembler_, InterpreterAssembler::Label::kDeferred); InterpreterAssembler::Label done(assembler_); - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); + InterpreterAssembler::TVariable<Object> return_value(assembler_); - Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0); + TNode<Object> sync_iterator = __ LoadRegisterFromRegisterList(args, 0); __ GotoIf(__ TaggedIsSmi(sync_iterator), ¬_receiver); - __ GotoIfNot(__ IsJSReceiver(sync_iterator), ¬_receiver); + __ GotoIfNot(__ IsJSReceiver(__ CAST(sync_iterator)), ¬_receiver); TNode<Object> const next = __ GetProperty(context, sync_iterator, factory()->next_string()); - TNode<Context> const native_context = __ LoadNativeContext(context); + TNode<NativeContext> const native_context = __ LoadNativeContext(context); TNode<Map> const map = __ CAST(__ LoadContextElement( native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX)); TNode<JSObject> const iterator = __ AllocateJSObjectFromMap(map); @@ -278,13 +282,13 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator( __ StoreObjectFieldNoWriteBarrier(iterator, JSAsyncFromSyncIterator::kNextOffset, next); - return_value.Bind(iterator); + return_value = iterator; __ Goto(&done); __ BIND(¬_receiver); { - return_value.Bind( - __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context)); + return_value = + __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context); // Unreachable due to the Throw in runtime call. __ Goto(&done); @@ -294,104 +298,105 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator( return return_value.value(); } -Node* IntrinsicsGenerator::CreateJSGeneratorObject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kCreateGeneratorObject); } -Node* IntrinsicsGenerator::GeneratorGetResumeMode( - const InterpreterAssembler::RegListNodePair& args, Node* context) { - Node* generator = __ LoadRegisterFromRegisterList(args, 0); +TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { + TNode<JSGeneratorObject> generator = + __ CAST(__ LoadRegisterFromRegisterList(args, 0)); TNode<Object> const value = __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset); return value; } -Node* IntrinsicsGenerator::GeneratorClose( - const InterpreterAssembler::RegListNodePair& args, Node* context) { - Node* generator = __ LoadRegisterFromRegisterList(args, 0); +TNode<Object> IntrinsicsGenerator::GeneratorClose( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { + TNode<JSGeneratorObject> generator = + __ CAST(__ LoadRegisterFromRegisterList(args, 0)); __ StoreObjectFieldNoWriteBarrier( generator, JSGeneratorObject::kContinuationOffset, __ SmiConstant(JSGeneratorObject::kGeneratorClosed)); return __ UndefinedConstant(); } -Node* IntrinsicsGenerator::GetImportMetaObject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::GetImportMetaObject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { TNode<Context> const module_context = __ LoadModuleContext(context); TNode<HeapObject> const module = __ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX)); TNode<Object> const import_meta = __ LoadObjectField(module, SourceTextModule::kImportMetaOffset); - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - return_value.Bind(import_meta); + InterpreterAssembler::TVariable<Object> return_value(assembler_); + return_value = import_meta; InterpreterAssembler::Label end(assembler_); __ GotoIfNot(__ IsTheHole(import_meta), &end); - return_value.Bind(__ CallRuntime(Runtime::kGetImportMetaObject, context)); + return_value = __ CallRuntime(Runtime::kGetImportMetaObject, context); __ Goto(&end); __ BIND(&end); return return_value.value(); } -Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionAwaitCaught); } -Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitUncaught( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionAwaitUncaught); } -Node* IntrinsicsGenerator::AsyncFunctionEnter( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncFunctionEnter( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter); } -Node* IntrinsicsGenerator::AsyncFunctionReject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncFunctionReject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject); } -Node* IntrinsicsGenerator::AsyncFunctionResolve( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncFunctionResolve( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve); } -Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitCaught( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorAwaitCaught); } -Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitUncaught( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorAwaitUncaught); } -Node* IntrinsicsGenerator::AsyncGeneratorReject( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncGeneratorReject( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject); } -Node* IntrinsicsGenerator::AsyncGeneratorResolve( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncGeneratorResolve( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorResolve); } -Node* IntrinsicsGenerator::AsyncGeneratorYield( - const InterpreterAssembler::RegListNodePair& args, Node* context) { +TNode<Object> IntrinsicsGenerator::AsyncGeneratorYield( + const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) { return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield); } diff --git a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.h b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.h index fd4e167ed0f..f0c22e7a592 100644 --- a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.h +++ b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.h @@ -16,9 +16,9 @@ class Node; namespace interpreter { -extern compiler::Node* GenerateInvokeIntrinsic( - InterpreterAssembler* assembler, compiler::Node* function_id, - compiler::Node* context, const InterpreterAssembler::RegListNodePair& args); +extern TNode<Object> GenerateInvokeIntrinsic( + InterpreterAssembler* assembler, TNode<Uint32T> function_id, + TNode<Context> context, const InterpreterAssembler::RegListNodePair& args); } // namespace interpreter } // namespace internal diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc index 121971d3051..6c730d5a597 100644 --- a/chromium/v8/src/interpreter/interpreter.cc +++ b/chromium/v8/src/interpreter/interpreter.cc @@ -269,7 +269,7 @@ std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator, std::vector<FunctionLiteral*>* eager_inner_literals) { - return base::make_unique<InterpreterCompilationJob>( + return std::make_unique<InterpreterCompilationJob>( parse_info, literal, allocator, eager_inner_literals); } @@ -277,8 +277,8 @@ std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewSourcePositionCollectionJob( ParseInfo* parse_info, FunctionLiteral* literal, Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator) { - auto job = base::make_unique<InterpreterCompilationJob>(parse_info, literal, - allocator, nullptr); + auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal, + allocator, nullptr); job->compilation_info()->SetBytecodeArray(existing_bytecode); return job; } diff --git a/chromium/v8/src/json/json-parser.cc b/chromium/v8/src/json/json-parser.cc index e49775704db..3a790c210dc 100644 --- a/chromium/v8/src/json/json-parser.cc +++ b/chromium/v8/src/json/json-parser.cc @@ -394,7 +394,8 @@ Handle<Map> ParentOfDescriptorOwner(Isolate* isolate, Handle<Map> maybe_root, DCHECK_EQ(0, maybe_root->NumberOfOwnDescriptors()); return maybe_root; } - return handle(source->FindFieldOwner(isolate, descriptor - 1), isolate); + return handle(source->FindFieldOwner(isolate, InternalIndex(descriptor - 1)), + isolate); } } // namespace @@ -461,10 +462,11 @@ Handle<Object> JsonParser<Char>::BuildJsonObject( if (property.string.is_index()) continue; Handle<String> expected; Handle<Map> target; + InternalIndex descriptor_index(descriptor); if (descriptor < feedback_descriptors) { - expected = handle( - String::cast(feedback->instance_descriptors().GetKey(descriptor)), - isolate_); + expected = handle(String::cast(feedback->instance_descriptors().GetKey( + descriptor_index)), + isolate_); } else { DisallowHeapAllocation no_gc; TransitionsAccessor transitions(isolate(), *map, &no_gc); @@ -495,7 +497,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject( Handle<Object> value = property.value; PropertyDetails details = - target->instance_descriptors().GetDetails(descriptor); + target->instance_descriptors().GetDetails(descriptor_index); Representation expected_representation = details.representation(); if (!value->FitsRepresentation(expected_representation)) { @@ -507,23 +509,24 @@ Handle<Object> JsonParser<Char>::BuildJsonObject( } Handle<FieldType> value_type = value->OptimalType(isolate(), representation); - Map::GeneralizeField(isolate(), target, descriptor, details.constness(), - representation, value_type); + Map::GeneralizeField(isolate(), target, descriptor_index, + details.constness(), representation, value_type); } else if (expected_representation.IsHeapObject() && !target->instance_descriptors() - .GetFieldType(descriptor) + .GetFieldType(descriptor_index) .NowContains(value)) { Handle<FieldType> value_type = value->OptimalType(isolate(), expected_representation); - Map::GeneralizeField(isolate(), target, descriptor, details.constness(), - expected_representation, value_type); + Map::GeneralizeField(isolate(), target, descriptor_index, + details.constness(), expected_representation, + value_type); } else if (!FLAG_unbox_double_fields && expected_representation.IsDouble() && value->IsSmi()) { new_mutable_double++; } DCHECK(target->instance_descriptors() - .GetFieldType(descriptor) + .GetFieldType(descriptor_index) .NowContains(value)); map = target; descriptor++; @@ -560,18 +563,21 @@ Handle<Object> JsonParser<Char>::BuildJsonObject( : reinterpret_cast<Address>( mutable_double_buffer->GetDataStartAddress()); Address filler_address = mutable_double_address; - if (IsAligned(mutable_double_address, kDoubleAlignment)) { - mutable_double_address += kTaggedSize; - } else { - filler_address += HeapNumber::kSize; + if (kTaggedSize != kDoubleSize) { + if (IsAligned(mutable_double_address, kDoubleAlignment)) { + mutable_double_address += kTaggedSize; + } else { + filler_address += HeapNumber::kSize; + } } for (int j = 0; j < i; j++) { const JsonProperty& property = property_stack[start + j]; if (property.string.is_index()) continue; + InternalIndex descriptor_index(descriptor); PropertyDetails details = - map->instance_descriptors().GetDetails(descriptor); + map->instance_descriptors().GetDetails(descriptor_index); Object value = *property.value; - FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor); + FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index); descriptor++; if (details.representation().IsDouble()) { @@ -619,9 +625,13 @@ Handle<Object> JsonParser<Char>::BuildJsonObject( #ifdef DEBUG Address end = reinterpret_cast<Address>(mutable_double_buffer->GetDataEndAddress()); - DCHECK_EQ(Min(filler_address, mutable_double_address), end); - DCHECK_GE(filler_address, end); - DCHECK_GE(mutable_double_address, end); + if (kTaggedSize != kDoubleSize) { + DCHECK_EQ(Min(filler_address, mutable_double_address), end); + DCHECK_GE(filler_address, end); + DCHECK_GE(mutable_double_address, end); + } else { + DCHECK_EQ(mutable_double_address, end); + } #endif mutable_double_buffer->set_length(0); } diff --git a/chromium/v8/src/json/json-stringifier.cc b/chromium/v8/src/json/json-stringifier.cc index 684bcdcf545..47d6a0ddad2 100644 --- a/chromium/v8/src/json/json-stringifier.cc +++ b/chromium/v8/src/json/json-stringifier.cc @@ -771,7 +771,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject( builder_.AppendCharacter('{'); Indent(); bool comma = false; - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { Handle<Name> name(map->instance_descriptors().GetKey(i), isolate_); // TODO(rossberg): Should this throw? if (!name->IsString()) continue; diff --git a/chromium/v8/src/libplatform/default-foreground-task-runner.cc b/chromium/v8/src/libplatform/default-foreground-task-runner.cc index 0a31024d9a5..23942043481 100644 --- a/chromium/v8/src/libplatform/default-foreground-task-runner.cc +++ b/chromium/v8/src/libplatform/default-foreground-task-runner.cc @@ -60,6 +60,16 @@ bool DefaultForegroundTaskRunner::IdleTasksEnabled() { return idle_task_support_ == IdleTaskSupport::kEnabled; } +void DefaultForegroundTaskRunner::PostNonNestableTask( + std::unique_ptr<Task> task) { + // Default platform does not nest tasks. + PostTask(std::move(task)); +} + +bool DefaultForegroundTaskRunner::NonNestableTasksEnabled() const { + return true; +} + std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue( MessageLoopBehavior wait_for_work) { base::MutexGuard guard(&lock_); diff --git a/chromium/v8/src/libplatform/default-foreground-task-runner.h b/chromium/v8/src/libplatform/default-foreground-task-runner.h index 78c0f6b6600..9ff30e39405 100644 --- a/chromium/v8/src/libplatform/default-foreground-task-runner.h +++ b/chromium/v8/src/libplatform/default-foreground-task-runner.h @@ -5,6 +5,7 @@ #ifndef V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_ #define V8_LIBPLATFORM_DEFAULT_FOREGROUND_TASK_RUNNER_H_ +#include <memory> #include <queue> #include "include/libplatform/libplatform.h" @@ -35,14 +36,15 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner // v8::TaskRunner implementation. void PostTask(std::unique_ptr<Task> task) override; - void PostDelayedTask(std::unique_ptr<Task> task, double delay_in_seconds) override; void PostIdleTask(std::unique_ptr<IdleTask> task) override; - bool IdleTasksEnabled() override; + void PostNonNestableTask(std::unique_ptr<Task> task) override; + bool NonNestableTasksEnabled() const override; + private: // The same as PostTask, but the lock is already held by the caller. The // {guard} parameter should make sure that the caller is holding the lock. diff --git a/chromium/v8/src/libplatform/default-worker-threads-task-runner.cc b/chromium/v8/src/libplatform/default-worker-threads-task-runner.cc index 213e98801a0..8cae955fd16 100644 --- a/chromium/v8/src/libplatform/default-worker-threads-task-runner.cc +++ b/chromium/v8/src/libplatform/default-worker-threads-task-runner.cc @@ -15,7 +15,7 @@ DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner( time_function_(time_function), thread_pool_size_(thread_pool_size) { for (uint32_t i = 0; i < thread_pool_size; ++i) { - thread_pool_.push_back(base::make_unique<WorkerThread>(this)); + thread_pool_.push_back(std::make_unique<WorkerThread>(this)); } } diff --git a/chromium/v8/src/libplatform/default-worker-threads-task-runner.h b/chromium/v8/src/libplatform/default-worker-threads-task-runner.h index 31b6c0e8174..d761a36e1b7 100644 --- a/chromium/v8/src/libplatform/default-worker-threads-task-runner.h +++ b/chromium/v8/src/libplatform/default-worker-threads-task-runner.h @@ -5,6 +5,7 @@ #ifndef V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_ #define V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_ +#include <memory> #include <vector> #include "include/libplatform/libplatform-export.h" diff --git a/chromium/v8/src/libplatform/delayed-task-queue.h b/chromium/v8/src/libplatform/delayed-task-queue.h index 675e9ecb8a4..9fec948b86a 100644 --- a/chromium/v8/src/libplatform/delayed-task-queue.h +++ b/chromium/v8/src/libplatform/delayed-task-queue.h @@ -6,6 +6,7 @@ #define V8_LIBPLATFORM_DELAYED_TASK_QUEUE_H_ #include <map> +#include <memory> #include <queue> #include "include/libplatform/libplatform-export.h" diff --git a/chromium/v8/src/libplatform/task-queue.h b/chromium/v8/src/libplatform/task-queue.h index f8c76498f2b..fbad3a8adf9 100644 --- a/chromium/v8/src/libplatform/task-queue.h +++ b/chromium/v8/src/libplatform/task-queue.h @@ -5,6 +5,7 @@ #ifndef V8_LIBPLATFORM_TASK_QUEUE_H_ #define V8_LIBPLATFORM_TASK_QUEUE_H_ +#include <memory> #include <queue> #include "include/libplatform/libplatform-export.h" diff --git a/chromium/v8/src/libplatform/tracing/DEPS b/chromium/v8/src/libplatform/tracing/DEPS index 582200e094f..7a45bba55a1 100644 --- a/chromium/v8/src/libplatform/tracing/DEPS +++ b/chromium/v8/src/libplatform/tracing/DEPS @@ -1,4 +1,4 @@ include_rules = [ "+perfetto", - "+third_party/perfetto/include/perfetto/base", -]
\ No newline at end of file + "+protos/perfetto", +] diff --git a/chromium/v8/src/libplatform/tracing/json-trace-event-listener.cc b/chromium/v8/src/libplatform/tracing/json-trace-event-listener.cc index 94b74ef255a..60cc9a98a8b 100644 --- a/chromium/v8/src/libplatform/tracing/json-trace-event-listener.cc +++ b/chromium/v8/src/libplatform/tracing/json-trace-event-listener.cc @@ -7,9 +7,9 @@ #include <cmath> #include "base/trace_event/common/trace_event_common.h" -#include "perfetto/trace/chrome/chrome_trace_packet.pb.h" -#include "perfetto/trace/trace.pb.h" #include "perfetto/tracing.h" +#include "protos/perfetto/trace/chrome/chrome_trace_packet.pb.h" +#include "protos/perfetto/trace/trace.pb.h" #include "src/base/logging.h" #include "src/base/macros.h" diff --git a/chromium/v8/src/libplatform/tracing/trace-event-listener.cc b/chromium/v8/src/libplatform/tracing/trace-event-listener.cc index 8224221228b..2910d8fab23 100644 --- a/chromium/v8/src/libplatform/tracing/trace-event-listener.cc +++ b/chromium/v8/src/libplatform/tracing/trace-event-listener.cc @@ -4,7 +4,7 @@ #include "src/libplatform/tracing/trace-event-listener.h" -#include "perfetto/trace/trace.pb.h" +#include "protos/perfetto/trace/trace.pb.h" #include "src/base/logging.h" namespace v8 { diff --git a/chromium/v8/src/libplatform/tracing/trace-object.cc b/chromium/v8/src/libplatform/tracing/trace-object.cc index 6b6e0cf404d..d16104df68a 100644 --- a/chromium/v8/src/libplatform/tracing/trace-object.cc +++ b/chromium/v8/src/libplatform/tracing/trace-object.cc @@ -23,12 +23,11 @@ V8_INLINE static size_t GetAllocLength(const char* str) { // location, and then advances |*buffer| by the amount written. V8_INLINE static void CopyTraceObjectParameter(char** buffer, const char** member) { - if (*member) { - size_t length = strlen(*member) + 1; - strncpy(*buffer, *member, length); - *member = *buffer; - *buffer += length; - } + if (*member == nullptr) return; + size_t length = strlen(*member) + 1; + memcpy(*buffer, *member, length); + *member = *buffer; + *buffer += length; } void TraceObject::Initialize( diff --git a/chromium/v8/src/libplatform/tracing/tracing-controller.cc b/chromium/v8/src/libplatform/tracing/tracing-controller.cc index 3fb34366c2f..d0972f93229 100644 --- a/chromium/v8/src/libplatform/tracing/tracing-controller.cc +++ b/chromium/v8/src/libplatform/tracing/tracing-controller.cc @@ -14,9 +14,9 @@ #ifdef V8_USE_PERFETTO #include "base/trace_event/common/trace_event_common.h" -#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h" -#include "perfetto/trace/trace_packet.pbzero.h" #include "perfetto/tracing.h" +#include "protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h" +#include "protos/perfetto/trace/trace_packet.pbzero.h" #include "src/base/platform/platform.h" #include "src/base/platform/semaphore.h" #include "src/libplatform/tracing/json-trace-event-listener.h" @@ -280,7 +280,7 @@ void TracingController::StartTracing(TraceConfig* trace_config) { #ifdef V8_USE_PERFETTO DCHECK_NOT_NULL(output_stream_); DCHECK(output_stream_->good()); - json_listener_ = base::make_unique<JSONTraceEventListener>(output_stream_); + json_listener_ = std::make_unique<JSONTraceEventListener>(output_stream_); // TODO(petermarshall): Set other the params for the config. ::perfetto::TraceConfig perfetto_trace_config; diff --git a/chromium/v8/src/libsampler/sampler.cc b/chromium/v8/src/libsampler/sampler.cc index e445dfc65a7..d9f59dff74a 100644 --- a/chromium/v8/src/libsampler/sampler.cc +++ b/chromium/v8/src/libsampler/sampler.cc @@ -526,7 +526,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { #endif // USE_SIGNALS Sampler::Sampler(Isolate* isolate) - : isolate_(isolate), data_(base::make_unique<PlatformData>()) {} + : isolate_(isolate), data_(std::make_unique<PlatformData>()) {} Sampler::~Sampler() { DCHECK(!IsActive()); diff --git a/chromium/v8/src/libsampler/sampler.h b/chromium/v8/src/libsampler/sampler.h index 997b1276860..c606add82aa 100644 --- a/chromium/v8/src/libsampler/sampler.h +++ b/chromium/v8/src/libsampler/sampler.h @@ -6,6 +6,7 @@ #define V8_LIBSAMPLER_SAMPLER_H_ #include <atomic> +#include <memory> #include <unordered_map> #include "include/v8.h" diff --git a/chromium/v8/src/logging/counters-definitions.h b/chromium/v8/src/logging/counters-definitions.h index 8c808276faa..39317121524 100644 --- a/chromium/v8/src/logging/counters-definitions.h +++ b/chromium/v8/src/logging/counters-definitions.h @@ -221,6 +221,8 @@ namespace internal { MICROSECOND) \ HT(compile_script_no_cache_because_cache_too_cold, \ V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \ + HT(compile_script_streaming_finalization, \ + V8.CompileScriptMicroSeconds.StreamingFinalization, 1000000, MICROSECOND) \ HT(compile_script_on_background, \ V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \ HT(compile_function_on_background, \ diff --git a/chromium/v8/src/logging/counters.cc b/chromium/v8/src/logging/counters.cc index ce2b1fe9c07..a6a56fac833 100644 --- a/chromium/v8/src/logging/counters.cc +++ b/chromium/v8/src/logging/counters.cc @@ -551,7 +551,7 @@ base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() { RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() { DCHECK(TracingFlags::is_runtime_stats_enabled()); std::unique_ptr<RuntimeCallStats> new_table = - base::make_unique<RuntimeCallStats>(); + std::make_unique<RuntimeCallStats>(); RuntimeCallStats* result = new_table.get(); base::MutexGuard lock(&mutex_); diff --git a/chromium/v8/src/logging/counters.h b/chromium/v8/src/logging/counters.h index 4466e0a53bc..99a3c3cf9b1 100644 --- a/chromium/v8/src/logging/counters.h +++ b/chromium/v8/src/logging/counters.h @@ -5,6 +5,8 @@ #ifndef V8_LOGGING_COUNTERS_H_ #define V8_LOGGING_COUNTERS_H_ +#include <memory> + #include "include/v8.h" #include "src/base/atomic-utils.h" #include "src/base/optional.h" @@ -1017,16 +1019,13 @@ class RuntimeCallTimer final { V(LoadIC_LoadNormalDH) \ V(LoadIC_LoadNormalFromPrototypeDH) \ V(LoadIC_NonReceiver) \ - V(LoadIC_Premonomorphic) \ V(LoadIC_SlowStub) \ V(LoadIC_StringLength) \ V(LoadIC_StringWrapperLength) \ V(StoreGlobalIC_SlowStub) \ V(StoreGlobalIC_StoreScriptContextField) \ - V(StoreGlobalIC_Premonomorphic) \ V(StoreIC_HandlerCacheHit_Accessor) \ V(StoreIC_NonReceiver) \ - V(StoreIC_Premonomorphic) \ V(StoreIC_SlowStub) \ V(StoreIC_StoreAccessorDH) \ V(StoreIC_StoreAccessorOnPrototypeDH) \ diff --git a/chromium/v8/src/logging/log-utils.cc b/chromium/v8/src/logging/log-utils.cc index 39808824029..e5c0b027faa 100644 --- a/chromium/v8/src/logging/log-utils.cc +++ b/chromium/v8/src/logging/log-utils.cc @@ -75,8 +75,7 @@ FILE* Log::Close() { } output_handle_ = nullptr; - DeleteArray(format_buffer_); - format_buffer_ = nullptr; + format_buffer_.reset(); is_stopped_ = false; return result; @@ -84,7 +83,7 @@ FILE* Log::Close() { Log::MessageBuilder::MessageBuilder(Log* log) : log_(log), lock_guard_(&log_->mutex_) { - DCHECK_NOT_NULL(log_->format_buffer_); + DCHECK_NOT_NULL(log_->format_buffer_.get()); } void Log::MessageBuilder::AppendString(String str, @@ -185,7 +184,7 @@ void Log::MessageBuilder::AppendSymbolNameDetails(String str, int Log::MessageBuilder::FormatStringIntoBuffer(const char* format, va_list args) { - Vector<char> buf(log_->format_buffer_, Log::kMessageBufferSize); + Vector<char> buf(log_->format_buffer_.get(), Log::kMessageBufferSize); int length = v8::internal::VSNPrintF(buf, format, args); // |length| is -1 if output was truncated. if (length == -1) length = Log::kMessageBufferSize; diff --git a/chromium/v8/src/logging/log-utils.h b/chromium/v8/src/logging/log-utils.h index bc5b09d4382..e89a449f3b4 100644 --- a/chromium/v8/src/logging/log-utils.h +++ b/chromium/v8/src/logging/log-utils.h @@ -125,7 +125,7 @@ class Log { // Buffer used for formatting log messages. This is a singleton buffer and // mutex_ should be acquired before using it. - char* format_buffer_; + std::unique_ptr<char[]> format_buffer_; Logger* logger_; diff --git a/chromium/v8/src/logging/log.cc b/chromium/v8/src/logging/log.cc index 9b86a16031e..2befcd330ab 100644 --- a/chromium/v8/src/logging/log.cc +++ b/chromium/v8/src/logging/log.cc @@ -180,9 +180,9 @@ class CodeEventLogger::NameBuffer { }; CodeEventLogger::CodeEventLogger(Isolate* isolate) - : isolate_(isolate), name_buffer_(new NameBuffer) {} + : isolate_(isolate), name_buffer_(std::make_unique<NameBuffer>()) {} -CodeEventLogger::~CodeEventLogger() { delete name_buffer_; } +CodeEventLogger::~CodeEventLogger() = default; void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, const char* comment) { @@ -477,6 +477,23 @@ void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode code, code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event)); } +void ExternalCodeEventListener::CodeMoveEvent(AbstractCode from, + AbstractCode to) { + CodeEvent code_event; + code_event.previous_code_start_address = + static_cast<uintptr_t>(from.InstructionStart()); + code_event.code_start_address = static_cast<uintptr_t>(to.InstructionStart()); + code_event.code_size = static_cast<size_t>(to.InstructionSize()); + code_event.function_name = isolate_->factory()->empty_string(); + code_event.script_name = isolate_->factory()->empty_string(); + code_event.script_line = 0; + code_event.script_column = 0; + code_event.code_type = v8::CodeEventType::kRelocationType; + code_event.comment = ""; + + code_event_handler_->Handle(reinterpret_cast<v8::CodeEvent*>(&code_event)); +} + // Low-level logging support. class LowLevelLogger : public CodeEventLogger { public: @@ -816,7 +833,7 @@ class Ticker : public sampler::Sampler { Ticker(Isolate* isolate, int interval_microseconds) : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)), sampling_thread_( - base::make_unique<SamplingThread>(this, interval_microseconds)) {} + std::make_unique<SamplingThread>(this, interval_microseconds)) {} ~Ticker() override { if (IsActive()) Stop(); @@ -910,13 +927,11 @@ void Profiler::Run() { Logger::Logger(Isolate* isolate) : isolate_(isolate), - log_events_(nullptr), is_logging_(false), - log_(nullptr), is_initialized_(false), existing_code_logger_(isolate) {} -Logger::~Logger() { delete log_; } +Logger::~Logger() = default; const LogSeparator Logger::kNext = LogSeparator::kSeparator; @@ -931,7 +946,7 @@ void Logger::RemoveCodeEventListener(CodeEventListener* listener) { void Logger::ProfilerBeginEvent() { if (!log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "profiler" << kNext << "begin" << kNext << FLAG_prof_sampling_interval; msg.WriteToLogFile(); } @@ -942,7 +957,7 @@ void Logger::StringEvent(const char* name, const char* value) { void Logger::UncheckedStringEvent(const char* name, const char* value) { if (!log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << name << kNext << value; msg.WriteToLogFile(); } @@ -953,7 +968,7 @@ void Logger::IntPtrTEvent(const char* name, intptr_t value) { void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) { if (!log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << name << kNext; msg.AppendFormatString("%" V8PRIdPTR, value); msg.WriteToLogFile(); @@ -961,14 +976,14 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) { void Logger::HandleEvent(const char* name, Address* location) { if (!log_->IsEnabled() || !FLAG_log_handles) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << name << kNext << reinterpret_cast<void*>(location); msg.WriteToLogFile(); } void Logger::ApiSecurityCheck() { if (!log_->IsEnabled() || !FLAG_log_api) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "api" << kNext << "check-security"; msg.WriteToLogFile(); } @@ -977,7 +992,7 @@ void Logger::SharedLibraryEvent(const std::string& library_path, uintptr_t start, uintptr_t end, intptr_t aslr_slide) { if (!log_->IsEnabled() || !FLAG_prof_cpp) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "shared-library" << kNext << library_path.c_str() << kNext << reinterpret_cast<void*>(start) << kNext << reinterpret_cast<void*>(end) << kNext << aslr_slide; @@ -988,7 +1003,7 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc, int fp_to_sp_delta) { if (!log_->IsEnabled()) return; Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc); - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext << code.CodeSize() << kNext << reinterpret_cast<void*>(code.InstructionStart()); @@ -1014,14 +1029,14 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc, void Logger::CurrentTimeEvent() { if (!log_->IsEnabled()) return; DCHECK(FLAG_log_internal_timer_events); - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "current-time" << kNext << timer_.Elapsed().InMicroseconds(); msg.WriteToLogFile(); } void Logger::TimerEvent(Logger::StartEnd se, const char* name) { if (!log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); switch (se) { case START: msg << "timer-event-start"; @@ -1053,7 +1068,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder, Object property_name) { DCHECK(property_name.IsName()); if (!log_->IsEnabled() || !FLAG_log_api) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "api" << kNext << tag << kNext << holder.class_name() << kNext << Name::cast(property_name); msg.WriteToLogFile(); @@ -1062,7 +1077,7 @@ void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder, void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder, uint32_t index) { if (!log_->IsEnabled() || !FLAG_log_api) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "api" << kNext << tag << kNext << holder.class_name() << kNext << index; msg.WriteToLogFile(); @@ -1070,21 +1085,21 @@ void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder, void Logger::ApiObjectAccess(const char* tag, JSObject object) { if (!log_->IsEnabled() || !FLAG_log_api) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "api" << kNext << tag << kNext << object.class_name(); msg.WriteToLogFile(); } void Logger::ApiEntryCall(const char* name) { if (!log_->IsEnabled() || !FLAG_log_api) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "api" << kNext << name; msg.WriteToLogFile(); } void Logger::NewEvent(const char* name, void* object, size_t size) { if (!log_->IsEnabled() || !FLAG_log) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "new" << kNext << name << kNext << object << kNext << static_cast<unsigned int>(size); msg.WriteToLogFile(); @@ -1092,7 +1107,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) { void Logger::DeleteEvent(const char* name, void* object) { if (!log_->IsEnabled() || !FLAG_log) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "delete" << kNext << name << kNext << object; msg.WriteToLogFile(); } @@ -1100,7 +1115,7 @@ void Logger::DeleteEvent(const char* name, void* object) { void Logger::CallbackEventInternal(const char* prefix, Name name, Address entry_point) { if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT] << kNext << kLogEventsNames[CodeEventListener::CALLBACK_TAG] << kNext << -2 << kNext << timer_.Elapsed().InMicroseconds() << kNext @@ -1149,7 +1164,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, const char* comment) { if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, tag, code, &timer_); msg << comment; msg.WriteToLogFile(); @@ -1159,7 +1174,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, Name name) { if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, tag, code, &timer_); msg << name; msg.WriteToLogFile(); @@ -1175,7 +1190,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, return; } - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, tag, code, &timer_); msg << name << kNext << reinterpret_cast<void*>(shared.address()) << kNext << ComputeMarker(shared, code); @@ -1186,7 +1201,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, const wasm::WasmCode* code, wasm::WasmName name) { if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION, code->instructions().begin(), code->instructions().length(), &timer_); @@ -1215,7 +1230,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; { - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, tag, code, &timer_); msg << shared.DebugName() << " " << source << ":" << line << ":" << column << kNext << reinterpret_cast<void*>(shared.address()) << kNext @@ -1250,7 +1265,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, // <function-id> is an index into the <fns> function table // <fns> is the function table encoded as a sequence of strings // S<shared-function-info-address> - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "code-source-info" << kNext << reinterpret_cast<void*>(code.InstructionStart()) << kNext << script.id() << kNext << shared.StartPosition() << kNext @@ -1307,7 +1322,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, void Logger::CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) { if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT] << kNext << shared.DebugName() << kNext << GetBailoutReason(shared.disable_optimization_reason()); @@ -1323,7 +1338,7 @@ void Logger::CodeMovingGCEvent() { void Logger::RegExpCodeCreateEvent(AbstractCode code, String source) { if (!is_listening_to_code_events()) return; if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, code, &timer_); msg << source; msg.WriteToLogFile(); @@ -1373,7 +1388,7 @@ void Logger::CodeLinePosInfoRecordEvent( void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) { if (code_name == nullptr) return; // Not a code object. - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT] << kNext << pos << kNext << code_name; msg.WriteToLogFile(); @@ -1387,7 +1402,7 @@ void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) { void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event, Address from, Address to) { if (!FLAG_log_code || !log_->IsEnabled()) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << kLogEventsNames[event] << kNext << reinterpret_cast<void*>(from) << kNext << reinterpret_cast<void*>(to); msg.WriteToLogFile(); @@ -1395,7 +1410,7 @@ void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event, void Logger::ResourceEvent(const char* name, const char* tag) { if (!log_->IsEnabled() || !FLAG_log) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << name << kNext << tag << kNext; uint32_t sec, usec; @@ -1409,7 +1424,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) { void Logger::SuspectReadEvent(Name name, Object obj) { if (!log_->IsEnabled() || !FLAG_log_suspect) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); String class_name = obj.IsJSObject() ? JSObject::cast(obj).class_name() : ReadOnlyRoots(isolate_).empty_string(); msg << "suspect-read" << kNext << class_name << kNext << name; @@ -1432,7 +1447,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta, int start_position, int end_position, String function_name) { if (!log_->IsEnabled() || !FLAG_log_function_events) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendFunctionMessage(msg, reason, script_id, time_delta, start_position, end_position, &timer_); if (!function_name.is_null()) msg << function_name; @@ -1444,7 +1459,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta, const char* function_name, size_t function_name_length) { if (!log_->IsEnabled() || !FLAG_log_function_events) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); AppendFunctionMessage(msg, reason, script_id, time_delta, start_position, end_position, &timer_); if (function_name_length > 0) { @@ -1456,7 +1471,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta, void Logger::CompilationCacheEvent(const char* action, const char* cache_type, SharedFunctionInfo sfi) { if (!log_->IsEnabled() || !FLAG_log_function_events) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); int script_id = -1; if (sfi.script().IsScript()) { script_id = Script::cast(sfi.script()).id(); @@ -1470,7 +1485,7 @@ void Logger::CompilationCacheEvent(const char* action, const char* cache_type, void Logger::ScriptEvent(ScriptEventType type, int script_id) { if (!log_->IsEnabled() || !FLAG_log_function_events) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "script" << Logger::kNext; switch (type) { case ScriptEventType::kReserveId: @@ -1497,7 +1512,7 @@ void Logger::ScriptEvent(ScriptEventType type, int script_id) { void Logger::ScriptDetails(Script script) { if (!log_->IsEnabled() || !FLAG_log_function_events) return; { - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "script-details" << Logger::kNext << script.id() << Logger::kNext; if (script.name().IsString()) { msg << String::cast(script.name()); @@ -1514,7 +1529,7 @@ void Logger::ScriptDetails(Script script) { bool Logger::EnsureLogScriptSource(Script script) { if (!log_->IsEnabled()) return false; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); // Make sure the script is written to the log file. int script_id = script.id(); if (logged_source_code_.find(script_id) != logged_source_code_.end()) { @@ -1544,7 +1559,7 @@ void Logger::RuntimeCallTimerEvent() { RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats(); RuntimeCallCounter* counter = stats->current_counter(); if (counter == nullptr) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "active-runtime-timer" << kNext << counter->name(); msg.WriteToLogFile(); } @@ -1555,7 +1570,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) { v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) { RuntimeCallTimerEvent(); } - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << kLogEventsNames[CodeEventListener::TICK_EVENT] << kNext << reinterpret_cast<void*>(sample->pc) << kNext << timer_.Elapsed().InMicroseconds(); @@ -1577,7 +1592,7 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key, char old_state, char new_state, const char* modifier, const char* slow_stub_reason) { if (!log_->IsEnabled() || !FLAG_trace_ic) return; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); if (keyed) msg << "Keyed"; int line; int column; @@ -1611,7 +1626,7 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason, if (!isolate_->bootstrapper()->IsActive()) { pc = isolate_->GetAbstractPC(&line, &column); } - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds() << kNext << AsHex::Address(from.ptr()) << kNext << AsHex::Address(to.ptr()) << kNext << AsHex::Address(pc) << kNext @@ -1634,7 +1649,7 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason, void Logger::MapCreate(Map map) { if (!log_->IsEnabled() || !FLAG_trace_maps) return; DisallowHeapAllocation no_gc; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext << AsHex::Address(map.ptr()); msg.WriteToLogFile(); @@ -1643,7 +1658,7 @@ void Logger::MapCreate(Map map) { void Logger::MapDetails(Map map) { if (!log_->IsEnabled() || !FLAG_trace_maps) return; DisallowHeapAllocation no_gc; - Log::MessageBuilder msg(log_); + Log::MessageBuilder msg(log_.get()); msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext << AsHex::Address(map.ptr()) << kNext; if (FLAG_trace_maps_details) { @@ -1842,24 +1857,25 @@ bool Logger::SetUp(Isolate* isolate) { std::ostringstream log_file_name; std::ostringstream source_log_file_name; PrepareLogFileName(log_file_name, isolate, FLAG_logfile); - log_ = new Log(this, log_file_name.str().c_str()); + log_ = std::make_unique<Log>(this, log_file_name.str().c_str()); if (FLAG_perf_basic_prof) { - perf_basic_logger_.reset(new PerfBasicLogger(isolate)); + perf_basic_logger_ = std::make_unique<PerfBasicLogger>(isolate); AddCodeEventListener(perf_basic_logger_.get()); } if (FLAG_perf_prof) { - perf_jit_logger_.reset(new PerfJitLogger(isolate)); + perf_jit_logger_ = std::make_unique<PerfJitLogger>(isolate); AddCodeEventListener(perf_jit_logger_.get()); } if (FLAG_ll_prof) { - ll_logger_.reset(new LowLevelLogger(isolate, log_file_name.str().c_str())); + ll_logger_ = + std::make_unique<LowLevelLogger>(isolate, log_file_name.str().c_str()); AddCodeEventListener(ll_logger_.get()); } - ticker_.reset(new Ticker(isolate, FLAG_prof_sampling_interval)); + ticker_ = std::make_unique<Ticker>(isolate, FLAG_prof_sampling_interval); if (Log::InitLogAtStart()) { is_logging_ = true; @@ -1868,7 +1884,7 @@ bool Logger::SetUp(Isolate* isolate) { timer_.Start(); if (FLAG_prof_cpp) { - profiler_.reset(new Profiler(isolate)); + profiler_ = std::make_unique<Profiler>(isolate); is_logging_ = true; profiler_->Engage(); } @@ -1891,7 +1907,7 @@ void Logger::SetCodeEventHandler(uint32_t options, if (isolate_->wasm_engine() != nullptr) { isolate_->wasm_engine()->EnableCodeLogging(isolate_); } - jit_logger_.reset(new JitLogger(isolate_, event_handler)); + jit_logger_ = std::make_unique<JitLogger>(isolate_, event_handler); AddCodeEventListener(jit_logger_.get()); if (options & kJitCodeEventEnumExisting) { HandleScope scope(isolate_); @@ -2042,9 +2058,9 @@ void ExistingCodeLogger::LogCompiledFunctions() { const int wasm_module_objects_count = EnumerateWasmModuleObjects(heap, nullptr); - std::unique_ptr<Handle<WasmModuleObject>[]> module_objects( - new Handle<WasmModuleObject>[wasm_module_objects_count]); - EnumerateWasmModuleObjects(heap, module_objects.get()); + ScopedVector<Handle<WasmModuleObject>> module_objects( + wasm_module_objects_count); + EnumerateWasmModuleObjects(heap, module_objects.begin()); for (int i = 0; i < wasm_module_objects_count; ++i) { module_objects[i]->native_module()->LogWasmCodes(isolate_); } diff --git a/chromium/v8/src/logging/log.h b/chromium/v8/src/logging/log.h index 3c28222982c..69760c4c6ca 100644 --- a/chromium/v8/src/logging/log.h +++ b/chromium/v8/src/logging/log.h @@ -5,6 +5,7 @@ #ifndef V8_LOGGING_LOG_H_ #define V8_LOGGING_LOG_H_ +#include <memory> #include <set> #include <string> @@ -115,6 +116,9 @@ class Logger : public CodeEventListener { kStreamingCompile }; + explicit Logger(Isolate* isolate); + ~Logger(); + // The separator is used to write an unescaped "," into the log. static const LogSeparator kNext; @@ -273,9 +277,6 @@ class Logger : public CodeEventListener { void LogCodeObject(Object code_object); private: - explicit Logger(Isolate* isolate); - ~Logger() override; - // Emits the profiler's first message. void ProfilerBeginEvent(); @@ -314,21 +315,11 @@ class Logger : public CodeEventListener { // of samples. std::unique_ptr<Profiler> profiler_; - // An array of log events names. - const char* const* log_events_; - - // Internal implementation classes with access to - // private members. - friend class EventLog; - friend class Isolate; - friend class TimeLog; + // Internal implementation classes with access to private members. friend class Profiler; - template <StateTag Tag> - friend class VMState; - friend class LoggerTestHelper; bool is_logging_; - Log* log_; + std::unique_ptr<Log> log_; std::unique_ptr<PerfBasicLogger> perf_basic_logger_; std::unique_ptr<PerfJitLogger> perf_jit_logger_; std::unique_ptr<LowLevelLogger> ll_logger_; @@ -419,7 +410,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener { virtual void LogRecordedBuffer(const wasm::WasmCode* code, const char* name, int length) = 0; - NameBuffer* name_buffer_; + std::unique_ptr<NameBuffer> name_buffer_; }; struct CodeEvent { @@ -432,6 +423,7 @@ struct CodeEvent { int script_column; CodeEventType code_type; const char* comment; + uintptr_t previous_code_start_address; }; class ExternalCodeEventListener : public CodeEventListener { @@ -457,7 +449,7 @@ class ExternalCodeEventListener : public CodeEventListener { void SetterCallbackEvent(Name name, Address entry_point) override {} void SharedFunctionInfoMoveEvent(Address from, Address to) override {} void NativeContextMoveEvent(Address from, Address to) override {} - void CodeMoveEvent(AbstractCode from, AbstractCode to) override {} + void CodeMoveEvent(AbstractCode from, AbstractCode to) override; void CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) override {} void CodeMovingGCEvent() override {} diff --git a/chromium/v8/src/numbers/OWNERS b/chromium/v8/src/numbers/OWNERS index df62d017308..882d275fe86 100644 --- a/chromium/v8/src/numbers/OWNERS +++ b/chromium/v8/src/numbers/OWNERS @@ -1,4 +1,4 @@ -clemensh@chromium.org +clemensb@chromium.org jgruber@chromium.org jkummerow@chromium.org sigurds@chromium.org diff --git a/chromium/v8/src/numbers/math-random.cc b/chromium/v8/src/numbers/math-random.cc index dee18788a7c..d45b4d0a5f1 100644 --- a/chromium/v8/src/numbers/math-random.cc +++ b/chromium/v8/src/numbers/math-random.cc @@ -16,9 +16,8 @@ namespace internal { void MathRandom::InitializeContext(Isolate* isolate, Handle<Context> native_context) { - Handle<FixedDoubleArray> cache = - Handle<FixedDoubleArray>::cast(isolate->factory()->NewFixedDoubleArray( - kCacheSize, AllocationType::kOld)); + Handle<FixedDoubleArray> cache = Handle<FixedDoubleArray>::cast( + isolate->factory()->NewFixedDoubleArray(kCacheSize)); for (int i = 0; i < kCacheSize; i++) cache->set(i, 0); native_context->set_math_random_cache(*cache); Handle<PodArray<State>> pod = diff --git a/chromium/v8/src/objects/arguments.h b/chromium/v8/src/objects/arguments.h index a306ef592aa..0a1e3e4ac98 100644 --- a/chromium/v8/src/objects/arguments.h +++ b/chromium/v8/src/objects/arguments.h @@ -16,7 +16,7 @@ namespace v8 { namespace internal { -// Superclass for all objects with instance type {JS_ARGUMENTS_TYPE} +// Superclass for all objects with instance type {JS_ARGUMENTS_OBJECT_TYPE} class JSArgumentsObject : public TorqueGeneratedJSArgumentsObject<JSArgumentsObject, JSObject> { public: @@ -25,15 +25,16 @@ class JSArgumentsObject }; // Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject. -// Note that the instance type {JS_ARGUMENTS_TYPE} does _not_ guarantee the -// below layout, the in-object properties might have transitioned to dictionary -// mode already. Only use the below layout with the specific initial maps. +// Note that the instance type {JS_ARGUMENTS_OBJECT_TYPE} does _not_ guarantee +// the below layout, the in-object properties might have transitioned to +// dictionary mode already. Only use the below layout with the specific initial +// maps. class JSArgumentsObjectWithLength : public JSArgumentsObject { public: // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS( JSObject::kHeaderSize, - TORQUE_GENERATED_JSARGUMENTS_OBJECT_WITH_LENGTH_FIELDS) + TORQUE_GENERATED_JS_ARGUMENTS_OBJECT_WITH_LENGTH_FIELDS) // Indices of in-object properties. static const int kLengthIndex = 0; @@ -50,7 +51,7 @@ class JSSloppyArgumentsObject : public JSArgumentsObjectWithLength { public: DEFINE_FIELD_OFFSET_CONSTANTS( JSArgumentsObjectWithLength::kSize, - TORQUE_GENERATED_JSSLOPPY_ARGUMENTS_OBJECT_FIELDS) + TORQUE_GENERATED_JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS) // Indices of in-object properties. static const int kCalleeIndex = kLengthIndex + 1; diff --git a/chromium/v8/src/objects/backing-store.cc b/chromium/v8/src/objects/backing-store.cc new file mode 100644 index 00000000000..6ba2854a291 --- /dev/null +++ b/chromium/v8/src/objects/backing-store.cc @@ -0,0 +1,654 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/backing-store.h" +#include "src/execution/isolate.h" +#include "src/handles/global-handles.h" +#include "src/logging/counters.h" +#include "src/wasm/wasm-engine.h" +#include "src/wasm/wasm-limits.h" +#include "src/wasm/wasm-objects-inl.h" + +#define TRACE_BS(...) \ + do { \ + if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + +namespace v8 { +namespace internal { + +namespace { +#if V8_TARGET_ARCH_64_BIT +constexpr bool kUseGuardRegions = true; +#else +constexpr bool kUseGuardRegions = false; +#endif + +#if V8_TARGET_ARCH_MIPS64 +// MIPS64 has a user space of 2^40 bytes on most processors, +// address space limits needs to be smaller. +constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB +#elif V8_TARGET_ARCH_64_BIT +constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB +#else +constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB +#endif + +constexpr uint64_t kOneGiB = 1024 * 1024 * 1024; +constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB; +constexpr uint64_t kFullGuardSize = 10 * kOneGiB; + +std::atomic<uint64_t> reserved_address_space_{0}; + +// Allocation results are reported to UMA +// +// See wasm_memory_allocation_result in counters.h +enum class AllocationStatus { + kSuccess, // Succeeded on the first try + + kSuccessAfterRetry, // Succeeded after garbage collection + + kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address + // space limit + + kOtherFailure // Failed for an unknown reason +}; + +base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) { + // Guard regions always look like this: + // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx| + // ^ buffer_start + // ^ byte_length + // ^ negative guard region ^ positive guard region + + Address start = reinterpret_cast<Address>(buffer_start); + DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit + DCHECK_EQ(0, start % AllocatePageSize()); + return base::AddressRegion(start - (2 * kOneGiB), + static_cast<size_t>(kFullGuardSize)); +} + +void RecordStatus(Isolate* isolate, AllocationStatus status) { + isolate->counters()->wasm_memory_allocation_result()->AddSample( + static_cast<int>(status)); +} + +inline void DebugCheckZero(void* start, size_t byte_length) { +#if DEBUG + // Double check memory is zero-initialized. + const byte* bytes = reinterpret_cast<const byte*>(start); + for (size_t i = 0; i < byte_length; i++) { + DCHECK_EQ(0, bytes[i]); + } +#endif +} +} // namespace + +bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) { + uint64_t reservation_limit = kAddressSpaceLimit; + while (true) { + uint64_t old_count = reserved_address_space_.load(); + if (old_count > reservation_limit) return false; + if (reservation_limit - old_count < num_bytes) return false; + if (reserved_address_space_.compare_exchange_weak(old_count, + old_count + num_bytes)) { + return true; + } + } +} + +void BackingStore::ReleaseReservation(uint64_t num_bytes) { + uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes); + USE(old_reserved); + DCHECK_LE(num_bytes, old_reserved); +} + +// The backing store for a Wasm shared memory remembers all the isolates +// with which it has been shared. +struct SharedWasmMemoryData { + std::vector<Isolate*> isolates_; +}; + +void BackingStore::Clear() { + buffer_start_ = nullptr; + byte_length_ = 0; + has_guard_regions_ = false; + type_specific_data_.v8_api_array_buffer_allocator = nullptr; +} + +BackingStore::~BackingStore() { + GlobalBackingStoreRegistry::Unregister(this); + + if (buffer_start_ == nullptr) return; // nothing to deallocate + + if (is_wasm_memory_) { + DCHECK(free_on_destruct_); + TRACE_BS("BSw:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this, + buffer_start_, byte_length(), byte_capacity_); + if (is_shared_) { + // Deallocate the list of attached memory objects. + SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data(); + delete shared_data; + type_specific_data_.shared_wasm_memory_data = nullptr; + } + + // Wasm memories are always allocated through the page allocator. + auto region = + has_guard_regions_ + ? GetGuardedRegion(buffer_start_, byte_length_) + : base::AddressRegion(reinterpret_cast<Address>(buffer_start_), + byte_capacity_); + bool pages_were_freed = + region.size() == 0 /* no need to free any pages */ || + FreePages(GetPlatformPageAllocator(), + reinterpret_cast<void*>(region.begin()), region.size()); + CHECK(pages_were_freed); + BackingStore::ReleaseReservation(has_guard_regions_ ? kFullGuardSize + : byte_capacity_); + Clear(); + return; + } + if (free_on_destruct_) { + // JSArrayBuffer backing store. Deallocate through the embedder's allocator. + auto allocator = reinterpret_cast<v8::ArrayBuffer::Allocator*>( + get_v8_api_array_buffer_allocator()); + TRACE_BS("BS:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this, + buffer_start_, byte_length(), byte_capacity_); + allocator->Free(buffer_start_, byte_length_); + } + Clear(); +} + +// Allocate a backing store using the array buffer allocator from the embedder. +std::unique_ptr<BackingStore> BackingStore::Allocate( + Isolate* isolate, size_t byte_length, SharedFlag shared, + InitializedFlag initialized) { + void* buffer_start = nullptr; + auto allocator = isolate->array_buffer_allocator(); + CHECK_NOT_NULL(allocator); + if (byte_length != 0) { + auto counters = isolate->counters(); + int mb_length = static_cast<int>(byte_length / MB); + if (mb_length > 0) { + counters->array_buffer_big_allocations()->AddSample(mb_length); + } + if (shared == SharedFlag::kShared) { + counters->shared_array_allocations()->AddSample(mb_length); + } + auto allocate_buffer = [allocator, initialized](size_t byte_length) { + if (initialized == InitializedFlag::kUninitialized) { + return allocator->AllocateUninitialized(byte_length); + } + void* buffer_start = allocator->Allocate(byte_length); + if (buffer_start) { + // TODO(wasm): node does not implement the zero-initialization API. + // Reenable this debug check when node does implement it properly. + constexpr bool + kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true; + if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) && + !FLAG_mock_arraybuffer_allocator) { + DebugCheckZero(buffer_start, byte_length); + } + } + return buffer_start; + }; + + buffer_start = isolate->heap()->AllocateExternalBackingStore( + allocate_buffer, byte_length); + + if (buffer_start == nullptr) { + // Allocation failed. + counters->array_buffer_new_size_failures()->AddSample(mb_length); + return {}; + } + } + + auto result = new BackingStore(buffer_start, // start + byte_length, // length + byte_length, // capacity + shared, // shared + false, // is_wasm_memory + true, // free_on_destruct + false); // has_guard_regions + + TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result, + result->buffer_start(), byte_length); + result->type_specific_data_.v8_api_array_buffer_allocator = allocator; + return std::unique_ptr<BackingStore>(result); +} + +// Allocate a backing store for a Wasm memory. Always use the page allocator +// and add guard regions. +std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared) { + // Cannot reserve 0 pages on some OSes. + if (maximum_pages == 0) maximum_pages = 1; + + TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages); + + bool guards = kUseGuardRegions; + + // For accounting purposes, whether a GC was necessary. + bool did_retry = false; + + // A helper to try running a function up to 3 times, executing a GC + // if the first and second attempts failed. + auto gc_retry = [&](const std::function<bool()>& fn) { + for (int i = 0; i < 3; i++) { + if (fn()) return true; + // Collect garbage and retry. + did_retry = true; + // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first? + isolate->heap()->MemoryPressureNotification( + MemoryPressureLevel::kCritical, true); + } + return false; + }; + + // Compute size of reserved memory. + + size_t engine_max_pages = wasm::max_mem_pages(); + size_t byte_capacity = + std::min(engine_max_pages, maximum_pages) * wasm::kWasmPageSize; + size_t reservation_size = + guards ? static_cast<size_t>(kFullGuardSize) : byte_capacity; + + //-------------------------------------------------------------------------- + // 1. Enforce maximum address space reservation per engine. + //-------------------------------------------------------------------------- + auto reserve_memory_space = [&] { + return BackingStore::ReserveAddressSpace(reservation_size); + }; + + if (!gc_retry(reserve_memory_space)) { + // Crash on out-of-memory if the correctness fuzzer is running. + if (FLAG_correctness_fuzzer_suppressions) { + FATAL("could not allocate wasm memory backing store"); + } + RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure); + TRACE_BS("BSw:try failed to reserve address space\n"); + return {}; + } + + //-------------------------------------------------------------------------- + // 2. Allocate pages (inaccessible by default). + //-------------------------------------------------------------------------- + void* allocation_base = nullptr; + auto allocate_pages = [&] { + allocation_base = + AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size, + wasm::kWasmPageSize, PageAllocator::kNoAccess); + return allocation_base != nullptr; + }; + if (!gc_retry(allocate_pages)) { + // Page allocator could not reserve enough pages. + BackingStore::ReleaseReservation(reservation_size); + RecordStatus(isolate, AllocationStatus::kOtherFailure); + TRACE_BS("BSw:try failed to allocate pages\n"); + return {}; + } + + // Get a pointer to the start of the buffer, skipping negative guard region + // if necessary. + byte* buffer_start = reinterpret_cast<byte*>(allocation_base) + + (guards ? kNegativeGuardSize : 0); + + //-------------------------------------------------------------------------- + // 3. Commit the initial pages (allow read/write). + //-------------------------------------------------------------------------- + size_t byte_length = initial_pages * wasm::kWasmPageSize; + auto commit_memory = [&] { + return byte_length == 0 || + SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length, + PageAllocator::kReadWrite); + }; + if (!gc_retry(commit_memory)) { + // SetPermissions put us over the process memory limit. + V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()"); + TRACE_BS("BSw:try failed to set permissions\n"); + } + + DebugCheckZero(buffer_start, byte_length); // touch the bytes. + + RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry + : AllocationStatus::kSuccess); + + auto result = new BackingStore(buffer_start, // start + byte_length, // length + byte_capacity, // capacity + shared, // shared + true, // is_wasm_memory + true, // free_on_destruct + guards); // has_guard_regions + + TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result, + result->buffer_start(), byte_length, byte_capacity); + + // Shared Wasm memories need an anchor for the memory object list. + if (shared == SharedFlag::kShared) { + result->type_specific_data_.shared_wasm_memory_data = + new SharedWasmMemoryData(); + } + + return std::unique_ptr<BackingStore>(result); +} + +// Allocate a backing store for a Wasm memory. Always use the page allocator +// and add guard regions. +std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared) { + // Wasm pages must be a multiple of the allocation page size. + DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize()); + + // Enforce engine limitation on the maximum number of pages. + if (initial_pages > wasm::max_mem_pages()) return nullptr; + + auto backing_store = + TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared); + if (!backing_store && maximum_pages > initial_pages) { + // If reserving {maximum_pages} failed, try with maximum = initial. + backing_store = + TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared); + } + return backing_store; +} + +std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate, + size_t new_pages) { + DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_); + // Note that we could allocate uninitialized to save initialization cost here, + // but since Wasm memories are allocated by the page allocator, the zeroing + // cost is already built-in. + // TODO(titzer): should we use a suitable maximum here? + auto new_backing_store = BackingStore::AllocateWasmMemory( + isolate, new_pages, new_pages, + is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared); + + if (!new_backing_store || + new_backing_store->has_guard_regions() != has_guard_regions_) { + return {}; + } + + if (byte_length_ > 0) { + memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_); + } + + return new_backing_store; +} + +// Try to grow the size of a wasm memory in place, without realloc + copy. +bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages, + size_t max_pages) { + DCHECK(is_wasm_memory_); + max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize); + + if (delta_pages == 0) return true; // degenerate grow. + if (delta_pages > max_pages) return false; // would never work. + + // Do a compare-exchange loop, because we also need to adjust page + // permissions. Note that multiple racing grows both try to set page + // permissions for the entire range (to be RW), so the operating system + // should deal with that raciness. We know we succeeded when we can + // compare/swap the old length with the new length. + size_t old_length = 0; + size_t new_length = 0; + while (true) { + old_length = byte_length_.load(std::memory_order_acquire); + size_t current_pages = old_length / wasm::kWasmPageSize; + + // Check if we have exceed the supplied maximum. + if (current_pages > (max_pages - delta_pages)) return false; + + new_length = (current_pages + delta_pages) * wasm::kWasmPageSize; + + // Try to adjust the permissions on the memory. + if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_, + new_length, PageAllocator::kReadWrite)) { + return false; + } + if (byte_length_.compare_exchange_weak(old_length, new_length, + std::memory_order_acq_rel)) { + // Successfully updated both the length and permissions. + break; + } + } + + if (!is_shared_) { + // Only do per-isolate accounting for non-shared backing stores. + reinterpret_cast<v8::Isolate*>(isolate) + ->AdjustAmountOfExternalAllocatedMemory(new_length - old_length); + } + return true; +} + +void BackingStore::AttachSharedWasmMemoryObject( + Isolate* isolate, Handle<WasmMemoryObject> memory_object) { + DCHECK(is_wasm_memory_); + DCHECK(is_shared_); + // We need to take the global registry lock for this operation. + GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this, + memory_object); +} + +void BackingStore::BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr<BackingStore> backing_store, + size_t new_pages) { + GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow( + isolate, backing_store, new_pages); +} + +void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) { + GlobalBackingStoreRegistry::Purge(isolate); +} + +void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) { + GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate); +} + +std::unique_ptr<BackingStore> BackingStore::WrapAllocation( + Isolate* isolate, void* allocation_base, size_t allocation_length, + SharedFlag shared, bool free_on_destruct) { + auto result = + new BackingStore(allocation_base, allocation_length, allocation_length, + shared, false, free_on_destruct, false); + result->type_specific_data_.v8_api_array_buffer_allocator = + isolate->array_buffer_allocator(); + TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result, + result->buffer_start(), result->byte_length()); + return std::unique_ptr<BackingStore>(result); +} + +std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore( + SharedFlag shared) { + auto result = new BackingStore(nullptr, // start + 0, // length + 0, // capacity + shared, // shared + false, // is_wasm_memory + false, // free_on_destruct + false); // has_guard_regions + + return std::unique_ptr<BackingStore>(result); +} + +void* BackingStore::get_v8_api_array_buffer_allocator() { + CHECK(!is_wasm_memory_); + auto array_buffer_allocator = + type_specific_data_.v8_api_array_buffer_allocator; + CHECK_NOT_NULL(array_buffer_allocator); + return array_buffer_allocator; +} + +SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() { + CHECK(is_wasm_memory_ && is_shared_); + auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data; + CHECK(shared_wasm_memory_data); + return shared_wasm_memory_data; +} + +namespace { +// Implementation details of GlobalBackingStoreRegistry. +struct GlobalBackingStoreRegistryImpl { + GlobalBackingStoreRegistryImpl() {} + base::Mutex mutex_; + std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_; +}; +base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ = + LAZY_INSTANCE_INITIALIZER; +inline GlobalBackingStoreRegistryImpl* impl() { + return global_registry_impl_.Pointer(); +} +} // namespace + +void GlobalBackingStoreRegistry::Register( + std::shared_ptr<BackingStore> backing_store) { + if (!backing_store || !backing_store->buffer_start()) return; + + if (!backing_store->free_on_destruct()) { + // If the backing store buffer is managed by the embedder, + // then we don't have to guarantee that there is single unique + // BackingStore per buffer_start() because the destructor of + // of the BackingStore will be a no-op in that case. + return; + } + + base::MutexGuard scope_lock(&impl()->mutex_); + if (backing_store->globally_registered_) return; + TRACE_BS("BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n", + backing_store.get(), backing_store->buffer_start(), + backing_store->byte_length(), backing_store->byte_capacity()); + std::weak_ptr<BackingStore> weak = backing_store; + auto result = impl()->map_.insert({backing_store->buffer_start(), weak}); + CHECK(result.second); + backing_store->globally_registered_ = true; +} + +void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) { + if (!backing_store->globally_registered_) return; + + DCHECK_NOT_NULL(backing_store->buffer_start()); + + base::MutexGuard scope_lock(&impl()->mutex_); + const auto& result = impl()->map_.find(backing_store->buffer_start()); + if (result != impl()->map_.end()) { + DCHECK(!result->second.lock()); + impl()->map_.erase(result); + } + backing_store->globally_registered_ = false; +} + +std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup( + void* buffer_start, size_t length) { + base::MutexGuard scope_lock(&impl()->mutex_); + TRACE_BS("BS:lookup mem=%p (%zu bytes)\n", buffer_start, length); + const auto& result = impl()->map_.find(buffer_start); + if (result == impl()->map_.end()) { + return std::shared_ptr<BackingStore>(); + } + auto backing_store = result->second.lock(); + CHECK_EQ(buffer_start, backing_store->buffer_start()); + if (backing_store->is_wasm_memory()) { + // Grow calls to shared WebAssembly threads can be triggered from different + // workers, length equality cannot be guaranteed here. + CHECK_LE(length, backing_store->byte_length()); + } else { + CHECK_EQ(length, backing_store->byte_length()); + } + return backing_store; +} + +void GlobalBackingStoreRegistry::Purge(Isolate* isolate) { + // We need to keep a reference to all backing stores that are inspected + // in the purging loop below. Otherwise, we might get a deadlock + // if the temporary backing store reference created in the loop is + // the last reference. In that case the destructor of the backing store + // may try to take the &impl()->mutex_ in order to unregister itself. + std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock; + base::MutexGuard scope_lock(&impl()->mutex_); + // Purge all entries in the map that refer to the given isolate. + for (auto& entry : impl()->map_) { + auto backing_store = entry.second.lock(); + prevent_destruction_under_lock.emplace_back(backing_store); + if (!backing_store) continue; // skip entries where weak ptr is null + if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory + if (!backing_store->is_shared()) continue; // skip non-shared memory + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + // Remove this isolate from the isolates list. + auto& isolates = shared_data->isolates_; + for (size_t i = 0; i < isolates.size(); i++) { + if (isolates[i] == isolate) isolates[i] = nullptr; + } + } +} + +void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject( + Isolate* isolate, BackingStore* backing_store, + Handle<WasmMemoryObject> memory_object) { + // Add to the weak array list of shared memory objects in the isolate. + isolate->AddSharedWasmMemory(memory_object); + + // Add the isolate to the list of isolates sharing this backing store. + base::MutexGuard scope_lock(&impl()->mutex_); + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + auto& isolates = shared_data->isolates_; + int free_entry = -1; + for (size_t i = 0; i < isolates.size(); i++) { + if (isolates[i] == isolate) return; + if (isolates[i] == nullptr) free_entry = static_cast<int>(i); + } + if (free_entry >= 0) + isolates[free_entry] = isolate; + else + isolates.push_back(isolate); +} + +void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr<BackingStore> backing_store, + size_t new_pages) { + { + // The global lock protects the list of isolates per backing store. + base::MutexGuard scope_lock(&impl()->mutex_); + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + for (Isolate* other : shared_data->isolates_) { + if (other && other != isolate) { + other->stack_guard()->RequestGrowSharedMemory(); + } + } + } + // Update memory objects in this isolate. + UpdateSharedWasmMemoryObjects(isolate); +} + +void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects( + Isolate* isolate) { + HandleScope scope(isolate); + Handle<WeakArrayList> shared_wasm_memories = + isolate->factory()->shared_wasm_memories(); + + for (int i = 0; i < shared_wasm_memories->length(); i++) { + HeapObject obj; + if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue; + + Handle<WasmMemoryObject> memory_object(WasmMemoryObject::cast(obj), + isolate); + Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate); + std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore(); + + if (old_buffer->byte_length() != backing_store->byte_length()) { + Handle<JSArrayBuffer> new_buffer = + isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); + memory_object->update_instances(isolate, new_buffer); + } + } +} + +} // namespace internal +} // namespace v8 + +#undef TRACE_BS diff --git a/chromium/v8/src/objects/backing-store.h b/chromium/v8/src/objects/backing-store.h new file mode 100644 index 00000000000..2c6ffb28daf --- /dev/null +++ b/chromium/v8/src/objects/backing-store.h @@ -0,0 +1,206 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_BACKING_STORE_H_ +#define V8_OBJECTS_BACKING_STORE_H_ + +#include <memory> + +#include "include/v8-internal.h" +#include "src/handles/handles.h" + +namespace v8 { +namespace internal { + +class Isolate; +class WasmMemoryObject; + +// Whether the backing store is shared or not. +enum class SharedFlag : uint8_t { kNotShared, kShared }; + +// Whether the backing store memory is initialied to zero or not. +enum class InitializedFlag : uint8_t { kUninitialized, kZeroInitialized }; + +// Internal information for shared wasm memories. E.g. contains +// a list of all memory objects (across all isolates) that share this +// backing store. +struct SharedWasmMemoryData; + +// The {BackingStore} data structure stores all the low-level details about the +// backing store of an array buffer or Wasm memory, including its base address +// and length, whether it is shared, provided by the embedder, has guard +// regions, etc. Instances of this classes *own* the underlying memory +// when they are created through one of the {Allocate()} methods below, +// and the destructor frees the memory (and page allocation if necessary). +// Backing stores can also *wrap* embedder-allocated memory. In this case, +// they do not own the memory, and upon destruction, they do not deallocate it. +class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { + public: + ~BackingStore(); + + // Allocate an array buffer backing store using the default method, + // which currently is the embedder-provided array buffer allocator. + static std::unique_ptr<BackingStore> Allocate(Isolate* isolate, + size_t byte_length, + SharedFlag shared, + InitializedFlag initialized); + + // Allocate the backing store for a Wasm memory. + static std::unique_ptr<BackingStore> AllocateWasmMemory(Isolate* isolate, + size_t initial_pages, + size_t maximum_pages, + SharedFlag shared); + + // Create a backing store that wraps existing allocated memory. + // If {free_on_destruct} is {true}, the memory will be freed using the + // ArrayBufferAllocator::Free() callback when this backing store is + // destructed. Otherwise destructing the backing store will do nothing + // to the allocated memory. + static std::unique_ptr<BackingStore> WrapAllocation(Isolate* isolate, + void* allocation_base, + size_t allocation_length, + SharedFlag shared, + bool free_on_destruct); + + // Create an empty backing store. + static std::unique_ptr<BackingStore> EmptyBackingStore(SharedFlag shared); + + // Accessors. + void* buffer_start() const { return buffer_start_; } + size_t byte_length() const { + return byte_length_.load(std::memory_order_relaxed); + } + size_t byte_capacity() const { return byte_capacity_; } + bool is_shared() const { return is_shared_; } + bool is_wasm_memory() const { return is_wasm_memory_; } + bool has_guard_regions() const { return has_guard_regions_; } + bool free_on_destruct() const { return free_on_destruct_; } + + // Attempt to grow this backing store in place. + bool GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages, + size_t max_pages); + + // Allocate a new, larger, backing store for this Wasm memory and copy the + // contents of this backing store into it. + std::unique_ptr<BackingStore> CopyWasmMemory(Isolate* isolate, + size_t new_pages); + + // Attach the given memory object to this backing store. The memory object + // will be updated if this backing store is grown. + void AttachSharedWasmMemoryObject(Isolate* isolate, + Handle<WasmMemoryObject> memory_object); + + // Send asynchronous updates to attached memory objects in other isolates + // after the backing store has been grown. Memory objects in this + // isolate are updated synchronously. + static void BroadcastSharedWasmMemoryGrow(Isolate* isolate, + std::shared_ptr<BackingStore>, + size_t new_pages); + + // TODO(wasm): address space limitations should be enforced in page alloc. + // These methods enforce a limit on the total amount of address space, + // which is used for both backing stores and wasm memory. + static bool ReserveAddressSpace(uint64_t num_bytes); + static void ReleaseReservation(uint64_t num_bytes); + + // Remove all memory objects in the given isolate that refer to this + // backing store. + static void RemoveSharedWasmMemoryObjects(Isolate* isolate); + + // Update all shared memory objects in this isolate (after a grow operation). + static void UpdateSharedWasmMemoryObjects(Isolate* isolate); + + private: + friend class GlobalBackingStoreRegistry; + + BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity, + SharedFlag shared, bool is_wasm_memory, bool free_on_destruct, + bool has_guard_regions) + : buffer_start_(buffer_start), + byte_length_(byte_length), + byte_capacity_(byte_capacity), + is_shared_(shared == SharedFlag::kShared), + is_wasm_memory_(is_wasm_memory), + free_on_destruct_(free_on_destruct), + has_guard_regions_(has_guard_regions), + globally_registered_(false) { + type_specific_data_.v8_api_array_buffer_allocator = nullptr; + } + + void* buffer_start_ = nullptr; + std::atomic<size_t> byte_length_{0}; + size_t byte_capacity_ = 0; + bool is_shared_ : 1; + bool is_wasm_memory_ : 1; + bool free_on_destruct_ : 1; + bool has_guard_regions_ : 1; + bool globally_registered_ : 1; + + union { + // If this backing store was allocated through the ArrayBufferAllocator API, + // this is a direct pointer to the API object for freeing the backing + // store. + // Note: we use {void*} here because we cannot forward-declare an inner + // class from the API. + void* v8_api_array_buffer_allocator; + + // For shared Wasm memories, this is a list of all the attached memory + // objects, which is needed to grow shared backing stores. + SharedWasmMemoryData* shared_wasm_memory_data; + } type_specific_data_; + + // Accessors for type-specific data. + void* get_v8_api_array_buffer_allocator(); + SharedWasmMemoryData* get_shared_wasm_memory_data(); + + void Clear(); // Internally clears fields after deallocation. + static std::unique_ptr<BackingStore> TryAllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared); + + DISALLOW_COPY_AND_ASSIGN(BackingStore); +}; + +// A global, per-process mapping from buffer addresses to backing stores. +// This is generally only used for dealing with an embedder that has not +// migrated to the new API which should use proper pointers to manage +// backing stores. +class GlobalBackingStoreRegistry { + public: + // Register a backing store in the global registry. A mapping from the + // {buffer_start} to the backing store object will be added. The backing + // store will automatically unregister itself upon destruction. + static void Register(std::shared_ptr<BackingStore> backing_store); + + // Look up a backing store based on the {buffer_start} pointer. + static std::shared_ptr<BackingStore> Lookup(void* buffer_start, + size_t length); + + private: + friend class BackingStore; + // Unregister a backing store in the global registry. + static void Unregister(BackingStore* backing_store); + + // Adds the given memory object to the backing store's weak list + // of memory objects (under the registry lock). + static void AddSharedWasmMemoryObject(Isolate* isolate, + BackingStore* backing_store, + Handle<WasmMemoryObject> memory_object); + + // Purge any shared wasm memory lists that refer to this isolate. + static void Purge(Isolate* isolate); + + // Broadcast updates to all attached memory objects. + static void BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr<BackingStore> backing_store, + size_t new_pages); + + // Update all shared memory objects in the given isolate. + static void UpdateSharedWasmMemoryObjects(Isolate* isolate); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_BACKING_STORE_H_ diff --git a/chromium/v8/src/objects/bigint.cc b/chromium/v8/src/objects/bigint.cc index 2905bb44c6f..6cc43a78e77 100644 --- a/chromium/v8/src/objects/bigint.cc +++ b/chromium/v8/src/objects/bigint.cc @@ -1981,14 +1981,13 @@ void BigInt::SerializeDigits(uint8_t* storage) { // The serialization format MUST NOT CHANGE without updating the format // version in value-serializer.cc! MaybeHandle<BigInt> BigInt::FromSerializedDigits( - Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage, - AllocationType allocation) { + Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage) { int bytelength = LengthBits::decode(bitfield); DCHECK(digits_storage.length() == bytelength); bool sign = SignBits::decode(bitfield); int length = (bytelength + kDigitSize - 1) / kDigitSize; // Round up. Handle<MutableBigInt> result = - MutableBigInt::Cast(isolate->factory()->NewBigInt(length, allocation)); + MutableBigInt::Cast(isolate->factory()->NewBigInt(length)); result->initialize_bitfield(sign, length); void* digits = reinterpret_cast<void*>(result->ptr() + kDigitsOffset - kHeapObjectTag); diff --git a/chromium/v8/src/objects/bigint.h b/chromium/v8/src/objects/bigint.h index ca80547230f..f50e3bcf04d 100644 --- a/chromium/v8/src/objects/bigint.h +++ b/chromium/v8/src/objects/bigint.h @@ -6,8 +6,8 @@ #define V8_OBJECTS_BIGINT_H_ #include "src/common/globals.h" -#include "src/objects/heap-object.h" #include "src/objects/objects.h" +#include "src/objects/primitive-heap-object.h" #include "src/utils/utils.h" // Has to be the last include (doesn't have include guards): @@ -28,7 +28,7 @@ class ValueSerializer; // BigIntBase is just the raw data object underlying a BigInt. Use with care! // Most code should be using BigInts instead. -class BigIntBase : public HeapObject { +class BigIntBase : public PrimitiveHeapObject { public: inline int length() const { int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset); @@ -69,7 +69,7 @@ class BigIntBase : public HeapObject { V(kHeaderSize, 0) \ V(kDigitsOffset, 0) - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, BIGINT_FIELDS) + DEFINE_FIELD_OFFSET_CONSTANTS(PrimitiveHeapObject::kHeaderSize, BIGINT_FIELDS) #undef BIGINT_FIELDS static constexpr bool HasOptionalPadding() { @@ -105,7 +105,7 @@ class BigIntBase : public HeapObject { // Only serves to make macros happy; other code should use IsBigInt. bool IsBigIntBase() const { return true; } - OBJECT_CONSTRUCTORS(BigIntBase, HeapObject); + OBJECT_CONSTRUCTORS(BigIntBase, PrimitiveHeapObject); }; class FreshlyAllocatedBigInt : public BigIntBase { @@ -263,8 +263,8 @@ class BigInt : public BigIntBase { // {DigitsByteLengthForBitfield(GetBitfieldForSerialization())}. void SerializeDigits(uint8_t* storage); V8_WARN_UNUSED_RESULT static MaybeHandle<BigInt> FromSerializedDigits( - Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage, - AllocationType allocation); + Isolate* isolate, uint32_t bitfield, + Vector<const uint8_t> digits_storage); OBJECT_CONSTRUCTORS(BigInt, BigIntBase); }; diff --git a/chromium/v8/src/objects/code.cc b/chromium/v8/src/objects/code.cc index b416df8878a..a477a7da26f 100644 --- a/chromium/v8/src/objects/code.cc +++ b/chromium/v8/src/objects/code.cc @@ -101,7 +101,6 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) { // Unbox handles and relocate. Assembler* origin = desc.origin; - AllowDeferredHandleDereference embedding_raw_address; const int mode_mask = RelocInfo::PostCodegenRelocationMask(); for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) { RelocInfo::Mode mode = it.rinfo()->rmode(); @@ -670,8 +669,8 @@ inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code code, } // namespace -void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) { - Isolate* isolate = GetIsolate(); +void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate, + Address current_pc) { os << "kind = " << Kind2String(kind()) << "\n"; if (name == nullptr) { name = GetName(isolate); @@ -683,7 +682,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) { os << "stack_slots = " << stack_slots() << "\n"; } os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n"; - os << "address = " << static_cast<const void*>(this) << "\n\n"; + os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n"; if (is_off_heap_trampoline()) { int trampoline_size = raw_instruction_size(); @@ -991,8 +990,7 @@ Handle<DependentCode> DependentCode::EnsureSpace( int capacity = kCodesStartIndex + DependentCode::Grow(entries->count()); int grow_by = capacity - entries->length(); return Handle<DependentCode>::cast( - isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by, - AllocationType::kOld)); + isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by)); } bool DependentCode::Compact() { diff --git a/chromium/v8/src/objects/code.h b/chromium/v8/src/objects/code.h index 6a5ac9f31a8..6f8c378093a 100644 --- a/chromium/v8/src/objects/code.h +++ b/chromium/v8/src/objects/code.h @@ -61,6 +61,7 @@ class Code : public HeapObject { #ifdef ENABLE_DISASSEMBLER const char* GetName(Isolate* isolate) const; V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os, + Isolate* isolate, Address current_pc = kNullAddress); #endif diff --git a/chromium/v8/src/objects/contexts-inl.h b/chromium/v8/src/objects/contexts-inl.h index 0c566dd081a..669e98591fb 100644 --- a/chromium/v8/src/objects/contexts-inl.h +++ b/chromium/v8/src/objects/contexts-inl.h @@ -13,6 +13,7 @@ #include "src/objects/js-objects-inl.h" #include "src/objects/map-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/osr-optimized-code-cache-inl.h" #include "src/objects/regexp-match-info.h" #include "src/objects/scope-info.h" #include "src/objects/shared-function-info.h" @@ -47,10 +48,29 @@ Context ScriptContextTable::get_context(int i) const { OBJECT_CONSTRUCTORS_IMPL(Context, HeapObject) NEVER_READ_ONLY_SPACE_IMPL(Context) CAST_ACCESSOR(Context) -SMI_ACCESSORS(Context, length, kLengthOffset) + +SMI_ACCESSORS(Context, length_and_extension_flag, kLengthOffset) +SYNCHRONIZED_SMI_ACCESSORS(Context, length_and_extension_flag, kLengthOffset) CAST_ACCESSOR(NativeContext) +int Context::length() const { + return LengthField::decode(length_and_extension_flag()); +} + +int Context::synchronized_length() const { + return LengthField::decode(synchronized_length_and_extension_flag()); +} + +void Context::initialize_length_and_extension_bit(int len, + Context::HasExtension flag) { + DCHECK(LengthField::is_valid(len)); + int value = 0; + value = LengthField::update(value, len); + value = HasExtensionField::update(value, flag == Context::HasExtension::kYes); + set_length_and_extension_flag(value); +} + Object Context::get(int index) const { Isolate* isolate = GetIsolateForPtrCompr(*this); return get(isolate, index); @@ -94,11 +114,20 @@ void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); } Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); } -bool Context::has_extension() { return !extension().IsTheHole(); } +bool Context::has_extension() { + return static_cast<bool>( + HasExtensionField::decode(length_and_extension_flag())) && + !extension().IsTheHole(); +} + HeapObject Context::extension() { return HeapObject::cast(get(EXTENSION_INDEX)); } -void Context::set_extension(HeapObject object) { set(EXTENSION_INDEX, object); } +void Context::set_extension(HeapObject object) { + set(EXTENSION_INDEX, object); + synchronized_set_length_and_extension_flag( + HasExtensionField::update(length_and_extension_flag(), true)); +} NativeContext Context::native_context() const { Object result = get(NATIVE_CONTEXT_INDEX); @@ -197,7 +226,7 @@ int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind, base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX : GENERATOR_FUNCTION_MAP_INDEX; - } else if (IsAsyncFunction(kind)) { + } else if (IsAsyncFunction(kind) || IsAsyncModule(kind)) { CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX); @@ -252,6 +281,10 @@ void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) { reinterpret_cast<Address>(microtask_queue)); } +OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() { + return OSROptimizedCodeCache::cast(osr_code_cache()); +} + OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context) } // namespace internal diff --git a/chromium/v8/src/objects/contexts.cc b/chromium/v8/src/objects/contexts.cc index 74fb4477b18..9dbba06a4d4 100644 --- a/chromium/v8/src/objects/contexts.cc +++ b/chromium/v8/src/objects/contexts.cc @@ -39,12 +39,14 @@ Handle<ScriptContextTable> ScriptContextTable::Extend( bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table, String name, LookupResult* result) { DisallowHeapAllocation no_gc; + // Static variables cannot be in script contexts. + IsStaticFlag is_static_flag; for (int i = 0; i < table.used(); i++) { Context context = table.get_context(i); DCHECK(context.IsScriptContext()); int slot_index = ScopeInfo::ContextSlotIndex( context.scope_info(), name, &result->mode, &result->init_flag, - &result->maybe_assigned_flag); + &result->maybe_assigned_flag, &is_static_flag); if (slot_index >= 0) { result->context_index = i; @@ -129,10 +131,6 @@ JSGlobalProxy Context::global_proxy() { return native_context().global_proxy_object(); } -void Context::set_global_proxy(JSGlobalProxy object) { - native_context().set_global_proxy_object(object); -} - /** * Lookups a property in an object environment, taking the unscopables into * account. This is used For HasBinding spec algorithms for ObjectEnvironment. @@ -175,7 +173,6 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name, Isolate* isolate = context->GetIsolate(); bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; - bool failed_whitelist = false; *index = kNotFound; *attributes = ABSENT; *init_flag = kCreatedInitialized; @@ -287,8 +284,10 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name, VariableMode mode; InitializationFlag flag; MaybeAssignedFlag maybe_assigned_flag; - int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode, - &flag, &maybe_assigned_flag); + IsStaticFlag is_static_flag; + int slot_index = + ScopeInfo::ContextSlotIndex(scope_info, *name, &mode, &flag, + &maybe_assigned_flag, &is_static_flag); DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); if (slot_index >= 0) { if (FLAG_trace_contexts) { @@ -357,6 +356,17 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name, return extension; } } + + // Check blacklist. Names that are listed, cannot be resolved further. + Object blacklist = context->get(BLACK_LIST_INDEX); + if (blacklist.IsStringSet() && + StringSet::cast(blacklist).Has(isolate, name)) { + if (FLAG_trace_contexts) { + PrintF(" - name is blacklisted. Aborting.\n"); + } + break; + } + // Check the original context, but do not follow its context chain. Object obj = context->get(WRAPPED_CONTEXT_INDEX); if (obj.IsContext()) { @@ -366,26 +376,12 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name, attributes, init_flag, variable_mode); if (!result.is_null()) return result; } - // Check whitelist. Names that do not pass whitelist shall only resolve - // to with, script or native contexts up the context chain. - obj = context->get(WHITE_LIST_INDEX); - if (obj.IsStringSet()) { - failed_whitelist = - failed_whitelist || !StringSet::cast(obj).Has(isolate, name); - } } // 3. Prepare to continue with the previous (next outermost) context. if (context->IsNativeContext()) break; - do { - context = Handle<Context>(context->previous(), isolate); - // If we come across a whitelist context, and the name is not - // whitelisted, then only consider with, script, module or native - // contexts. - } while (failed_whitelist && !context->IsScriptContext() && - !context->IsNativeContext() && !context->IsWithContext() && - !context->IsModuleContext()); + context = Handle<Context>(context->previous(), isolate); } while (follow_context_chain); if (FLAG_trace_contexts) { diff --git a/chromium/v8/src/objects/contexts.h b/chromium/v8/src/objects/contexts.h index a7b60ff7b95..7fa988be070 100644 --- a/chromium/v8/src/objects/contexts.h +++ b/chromium/v8/src/objects/contexts.h @@ -7,6 +7,7 @@ #include "src/objects/fixed-array.h" #include "src/objects/function-kind.h" +#include "src/objects/osr-optimized-code-cache.h" #include "torque-generated/field-offsets-tq.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -37,21 +38,23 @@ enum ContextLookupFlags { // must always be allocated via Heap::AllocateContext() or // Factory::NewContext. -#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \ - V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \ - V(MAKE_ERROR_INDEX, JSFunction, make_error) \ - V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \ - V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \ - V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \ - V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \ - V(OBJECT_CREATE, JSFunction, object_create) \ - V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \ - V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \ - V(MATH_FLOOR_INDEX, JSFunction, math_floor) \ - V(MATH_POW_INDEX, JSFunction, math_pow) \ - V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \ - promise_internal_constructor) \ - V(IS_PROMISE_INDEX, JSFunction, is_promise) \ +#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \ + V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \ + V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \ + async_module_evaluate_internal) \ + V(MAKE_ERROR_INDEX, JSFunction, make_error) \ + V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \ + V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \ + V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \ + V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \ + V(OBJECT_CREATE, JSFunction, object_create) \ + V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \ + V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \ + V(MATH_FLOOR_INDEX, JSFunction, math_floor) \ + V(MATH_POW_INDEX, JSFunction, math_pow) \ + V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \ + promise_internal_constructor) \ + V(IS_PROMISE_INDEX, JSFunction, is_promise) \ V(PROMISE_THEN_INDEX, JSFunction, promise_then) #define NATIVE_CONTEXT_FIELDS(V) \ @@ -104,6 +107,8 @@ enum ContextLookupFlags { V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ call_as_constructor_delegate) \ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ + V(CALL_ASYNC_MODULE_FULFILLED, JSFunction, call_async_module_fulfilled) \ + V(CALL_ASYNC_MODULE_REJECTED, JSFunction, call_async_module_rejected) \ V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \ @@ -159,6 +164,11 @@ enum ContextLookupFlags { V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \ intl_number_format_function) \ V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \ + V(INTL_LIST_FORMAT_FUNCTION_INDEX, JSFunction, intl_list_format_function) \ + V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \ + V(INTL_RELATIVE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \ + intl_relative_time_format_function) \ + V(INTL_SEGMENTER_FUNCTION_INDEX, JSFunction, intl_segmenter_function) \ V(INTL_SEGMENT_ITERATOR_MAP_INDEX, Map, intl_segment_iterator_map) \ V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \ V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \ @@ -233,6 +243,7 @@ enum ContextLookupFlags { V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \ V(REGEXP_REPLACE_FUNCTION_INDEX, JSFunction, regexp_replace_function) \ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \ + V(REGEXP_RESULT_INDICES_MAP_INDEX, Map, regexp_result_indices_map) \ V(REGEXP_SEARCH_FUNCTION_INDEX, JSFunction, regexp_search_function) \ V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function) \ V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \ @@ -345,6 +356,7 @@ enum ContextLookupFlags { V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \ V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \ + V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache) \ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) // A table of all script contexts. Every loaded top-level script with top-level @@ -443,9 +455,19 @@ class Context : public HeapObject { DECL_CAST(Context) + enum class HasExtension { kYes, kNo }; + // [length]: length of the context. V8_INLINE int length() const; - V8_INLINE void set_length(int value); + V8_INLINE int synchronized_length() const; + V8_INLINE void initialize_length_and_extension_bit( + int len, HasExtension flag = HasExtension::kNo); + + // We use the 30th bit. Otherwise if we set the 31st bit, + // the number would be pottentially bigger than an SMI. + // Any DCHECK(Smi::IsValue(...)) would fail. + using LengthField = BitField<int, 0, kSmiValueSize - 2>; + using HasExtensionField = BitField<int, kSmiValueSize - 2, 1>; // Setter and getter for elements. V8_INLINE Object get(int index) const; @@ -458,18 +480,18 @@ class Context : public HeapObject { TORQUE_GENERATED_CONTEXT_FIELDS) // TODO(v8:8989): [torque] Support marker constants. /* TODO(ishell): remove this fixedArray-like header size. */ - static const int kHeaderSize = kScopeInfoOffset; + static const int kFixedArrayLikeHeaderSize = kScopeInfoOffset; static const int kStartOfTaggedFieldsOffset = kScopeInfoOffset; /* Header size. */ \ /* TODO(ishell): use this as header size once MIN_CONTEXT_SLOTS */ \ /* is removed in favour of offset-based access to common fields. */ \ - static const int kTodoHeaderSize = kSize; + static const int kTodoHeaderSize = kHeaderSize; // Garbage collection support. V8_INLINE static constexpr int SizeFor(int length) { // TODO(ishell): switch to kTodoHeaderSize based approach once we no longer // reference common Context fields via index - return kHeaderSize + length * kTaggedSize; + return kFixedArrayLikeHeaderSize + length * kTaggedSize; } // Code Generation support. @@ -517,7 +539,7 @@ class Context : public HeapObject { // These slots hold values in debug evaluate contexts. WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS, - WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1 + BLACK_LIST_INDEX = MIN_CONTEXT_SLOTS + 1 }; // A region of native context entries containing maps for functions created @@ -558,7 +580,6 @@ class Context : public HeapObject { // Returns a JSGlobalProxy object or null. V8_EXPORT_PRIVATE JSGlobalProxy global_proxy(); - void set_global_proxy(JSGlobalProxy global); // Get the JSGlobalObject object. V8_EXPORT_PRIVATE JSGlobalObject global_object(); @@ -652,6 +673,8 @@ class Context : public HeapObject { #endif OBJECT_CONSTRUCTORS(Context, HeapObject); + DECL_INT_ACCESSORS(length_and_extension_flag) + DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_extension_flag) }; class NativeContext : public Context { @@ -696,6 +719,8 @@ class NativeContext : public Context { void SetDeoptimizedCodeListHead(Object head); Object DeoptimizedCodeListHead(); + inline OSROptimizedCodeCache GetOSROptimizedCodeCache(); + void ResetErrorsThrown(); void IncrementErrorsThrown(); int GetErrorsThrown(); diff --git a/chromium/v8/src/objects/data-handler.h b/chromium/v8/src/objects/data-handler.h index 667b19b3d45..c9c0cf4cbcf 100644 --- a/chromium/v8/src/objects/data-handler.h +++ b/chromium/v8/src/objects/data-handler.h @@ -41,7 +41,7 @@ class DataHandler : public Struct { static const int kSizeWithData0 = kData1Offset; static const int kSizeWithData1 = kData2Offset; static const int kSizeWithData2 = kData3Offset; - static const int kSizeWithData3 = kSize; + static const int kSizeWithData3 = kHeaderSize; DECL_CAST(DataHandler) diff --git a/chromium/v8/src/objects/debug-objects-inl.h b/chromium/v8/src/objects/debug-objects-inl.h index 273f710c3b6..8189481394b 100644 --- a/chromium/v8/src/objects/debug-objects-inl.h +++ b/chromium/v8/src/objects/debug-objects-inl.h @@ -21,24 +21,16 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(BreakPoint, Tuple2) OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo, Tuple2) OBJECT_CONSTRUCTORS_IMPL(CoverageInfo, FixedArray) -OBJECT_CONSTRUCTORS_IMPL(DebugInfo, Struct) +TQ_OBJECT_CONSTRUCTORS_IMPL(DebugInfo) NEVER_READ_ONLY_SPACE_IMPL(DebugInfo) CAST_ACCESSOR(BreakPointInfo) -CAST_ACCESSOR(DebugInfo) CAST_ACCESSOR(CoverageInfo) CAST_ACCESSOR(BreakPoint) -SMI_ACCESSORS(DebugInfo, flags, kFlagsOffset) -ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoOffset) -SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset) -ACCESSORS(DebugInfo, script, Object, kScriptOffset) -ACCESSORS(DebugInfo, original_bytecode_array, Object, - kOriginalBytecodeArrayOffset) -ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset) -ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsOffset) -ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset) +TQ_SMI_ACCESSORS(DebugInfo, flags) +TQ_SMI_ACCESSORS(DebugInfo, debugger_hints) BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state, DebugInfo::SideEffectStateBits) diff --git a/chromium/v8/src/objects/debug-objects.h b/chromium/v8/src/objects/debug-objects.h index 243caaa5268..39f42c11688 100644 --- a/chromium/v8/src/objects/debug-objects.h +++ b/chromium/v8/src/objects/debug-objects.h @@ -5,6 +5,8 @@ #ifndef V8_OBJECTS_DEBUG_OBJECTS_H_ #define V8_OBJECTS_DEBUG_OBJECTS_H_ +#include <memory> + #include "src/objects/fixed-array.h" #include "src/objects/objects.h" #include "src/objects/struct.h" @@ -20,7 +22,7 @@ class BytecodeArray; // The DebugInfo class holds additional information for a function being // debugged. -class DebugInfo : public Struct { +class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> { public: NEVER_READ_ONLY_SPACE enum Flag { @@ -38,15 +40,9 @@ class DebugInfo : public Struct { // A bitfield that lists uses of the current instance. DECL_INT_ACCESSORS(flags) - // The shared function info for the source being debugged. - DECL_ACCESSORS(shared, SharedFunctionInfo) - // Bit field containing various information collected for debugging. DECL_INT_ACCESSORS(debugger_hints) - // Script field from shared function info. - DECL_ACCESSORS(script, Object) - // DebugInfo can be detached from the SharedFunctionInfo iff it is empty. bool IsEmpty() const; @@ -83,17 +79,6 @@ class DebugInfo : public Struct { void ClearBreakAtEntry(); bool BreakAtEntry() const; - // The original uninstrumented bytecode array for functions with break - // points - the instrumented bytecode is held in the shared function info. - DECL_ACCESSORS(original_bytecode_array, Object) - - // The debug instrumented bytecode array for functions with break points - // - also pointed to by the shared function info. - DECL_ACCESSORS(debug_bytecode_array, Object) - - // Fixed array holding status information for each active break point. - DECL_ACCESSORS(break_points, FixedArray) - // Check if there is a break point at a source position. bool HasBreakPoint(Isolate* isolate, int source_position); // Attempt to clear a break point. Return true if successful. @@ -160,17 +145,9 @@ class DebugInfo : public Struct { // Clears all fields related to block coverage. void ClearCoverageInfo(Isolate* isolate); - DECL_ACCESSORS(coverage_info, Object) - - DECL_CAST(DebugInfo) // Dispatched behavior. DECL_PRINTER(DebugInfo) - DECL_VERIFIER(DebugInfo) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, - TORQUE_GENERATED_DEBUG_INFO_FIELDS) static const int kEstimatedNofBreakPointsInFunction = 4; @@ -178,7 +155,7 @@ class DebugInfo : public Struct { // Get the break point info object for a source position. Object GetBreakPointInfo(Isolate* isolate, int source_position); - OBJECT_CONSTRUCTORS(DebugInfo, Struct); + TQ_OBJECT_CONSTRUCTORS(DebugInfo) }; // The BreakPointInfo class holds information for break points set in a diff --git a/chromium/v8/src/objects/descriptor-array-inl.h b/chromium/v8/src/objects/descriptor-array-inl.h index e2805d795a0..5ea14c1e600 100644 --- a/chromium/v8/src/objects/descriptor-array-inl.h +++ b/chromium/v8/src/objects/descriptor-array-inl.h @@ -58,33 +58,35 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) { set_enum_cache(array.enum_cache()); } -int DescriptorArray::Search(Name name, int valid_descriptors) { +InternalIndex DescriptorArray::Search(Name name, int valid_descriptors) { DCHECK(name.IsUniqueName()); - return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, - nullptr); + return InternalIndex( + internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, nullptr)); } -int DescriptorArray::Search(Name name, Map map) { +InternalIndex DescriptorArray::Search(Name name, Map map) { DCHECK(name.IsUniqueName()); int number_of_own_descriptors = map.NumberOfOwnDescriptors(); - if (number_of_own_descriptors == 0) return kNotFound; + if (number_of_own_descriptors == 0) return InternalIndex::NotFound(); return Search(name, number_of_own_descriptors); } -int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) { +InternalIndex DescriptorArray::SearchWithCache(Isolate* isolate, Name name, + Map map) { DCHECK(name.IsUniqueName()); int number_of_own_descriptors = map.NumberOfOwnDescriptors(); - if (number_of_own_descriptors == 0) return kNotFound; + if (number_of_own_descriptors == 0) return InternalIndex::NotFound(); DescriptorLookupCache* cache = isolate->descriptor_lookup_cache(); int number = cache->Lookup(map, name); if (number == DescriptorLookupCache::kAbsent) { - number = Search(name, number_of_own_descriptors); + InternalIndex result = Search(name, number_of_own_descriptors); + number = result.is_found() ? result.as_int() : DescriptorArray::kNotFound; cache->Update(map, name, number); } - - return number; + if (number == DescriptorArray::kNotFound) return InternalIndex::NotFound(); + return InternalIndex(number); } ObjectSlot DescriptorArray::GetFirstPointerSlot() { @@ -102,26 +104,27 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) { return RawField(OffsetOfDescriptorAt(descriptor)); } -Name DescriptorArray::GetKey(int descriptor_number) const { +Name DescriptorArray::GetKey(InternalIndex descriptor_number) const { Isolate* isolate = GetIsolateForPtrCompr(*this); return GetKey(isolate, descriptor_number); } -Name DescriptorArray::GetKey(Isolate* isolate, int descriptor_number) const { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); +Name DescriptorArray::GetKey(Isolate* isolate, + InternalIndex descriptor_number) const { + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); return Name::cast(EntryKeyField::Relaxed_Load(isolate, *this, entry_offset)); } -void DescriptorArray::SetKey(int descriptor_number, Name key) { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); +void DescriptorArray::SetKey(InternalIndex descriptor_number, Name key) { + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); EntryKeyField::Relaxed_Store(*this, entry_offset, key); WRITE_BARRIER(*this, entry_offset + kEntryKeyOffset, key); } int DescriptorArray::GetSortedKeyIndex(int descriptor_number) { - return GetDetails(descriptor_number).pointer(); + return GetDetails(InternalIndex(descriptor_number)).pointer(); } Name DescriptorArray::GetSortedKey(int descriptor_number) { @@ -130,81 +133,83 @@ Name DescriptorArray::GetSortedKey(int descriptor_number) { } Name DescriptorArray::GetSortedKey(Isolate* isolate, int descriptor_number) { - return GetKey(isolate, GetSortedKeyIndex(descriptor_number)); + return GetKey(isolate, InternalIndex(GetSortedKeyIndex(descriptor_number))); } void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) { - PropertyDetails details = GetDetails(descriptor_number); - SetDetails(descriptor_number, details.set_pointer(pointer)); + PropertyDetails details = GetDetails(InternalIndex(descriptor_number)); + SetDetails(InternalIndex(descriptor_number), details.set_pointer(pointer)); } -Object DescriptorArray::GetStrongValue(int descriptor_number) { +Object DescriptorArray::GetStrongValue(InternalIndex descriptor_number) { Isolate* isolate = GetIsolateForPtrCompr(*this); return GetStrongValue(isolate, descriptor_number); } Object DescriptorArray::GetStrongValue(Isolate* isolate, - int descriptor_number) { + InternalIndex descriptor_number) { return GetValue(isolate, descriptor_number).cast<Object>(); } -void DescriptorArray::SetValue(int descriptor_number, MaybeObject value) { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); +void DescriptorArray::SetValue(InternalIndex descriptor_number, + MaybeObject value) { + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); EntryValueField::Relaxed_Store(*this, entry_offset, value); WEAK_WRITE_BARRIER(*this, entry_offset + kEntryValueOffset, value); } -MaybeObject DescriptorArray::GetValue(int descriptor_number) { +MaybeObject DescriptorArray::GetValue(InternalIndex descriptor_number) { Isolate* isolate = GetIsolateForPtrCompr(*this); return GetValue(isolate, descriptor_number); } -MaybeObject DescriptorArray::GetValue(Isolate* isolate, int descriptor_number) { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); +MaybeObject DescriptorArray::GetValue(Isolate* isolate, + InternalIndex descriptor_number) { + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); return EntryValueField::Relaxed_Load(isolate, *this, entry_offset); } -PropertyDetails DescriptorArray::GetDetails(int descriptor_number) { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); +PropertyDetails DescriptorArray::GetDetails(InternalIndex descriptor_number) { + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); Smi details = EntryDetailsField::Relaxed_Load(*this, entry_offset); return PropertyDetails(details); } -void DescriptorArray::SetDetails(int descriptor_number, +void DescriptorArray::SetDetails(InternalIndex descriptor_number, PropertyDetails details) { - DCHECK_LT(descriptor_number, number_of_descriptors()); - int entry_offset = OffsetOfDescriptorAt(descriptor_number); + DCHECK_LT(descriptor_number.as_int(), number_of_descriptors()); + int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int()); EntryDetailsField::Relaxed_Store(*this, entry_offset, details.AsSmi()); } -int DescriptorArray::GetFieldIndex(int descriptor_number) { +int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) { DCHECK_EQ(GetDetails(descriptor_number).location(), kField); return GetDetails(descriptor_number).field_index(); } -FieldType DescriptorArray::GetFieldType(int descriptor_number) { +FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) { Isolate* isolate = GetIsolateForPtrCompr(*this); return GetFieldType(isolate, descriptor_number); } FieldType DescriptorArray::GetFieldType(Isolate* isolate, - int descriptor_number) { + InternalIndex descriptor_number) { DCHECK_EQ(GetDetails(descriptor_number).location(), kField); MaybeObject wrapped_type = GetValue(isolate, descriptor_number); return Map::UnwrapFieldType(wrapped_type); } -void DescriptorArray::Set(int descriptor_number, Name key, MaybeObject value, - PropertyDetails details) { +void DescriptorArray::Set(InternalIndex descriptor_number, Name key, + MaybeObject value, PropertyDetails details) { SetKey(descriptor_number, key); SetDetails(descriptor_number, details); SetValue(descriptor_number, value); } -void DescriptorArray::Set(int descriptor_number, Descriptor* desc) { +void DescriptorArray::Set(InternalIndex descriptor_number, Descriptor* desc) { Name key = *desc->GetKey(); MaybeObject value = *desc->GetValue(); Set(descriptor_number, key, value, desc->GetDetails()); @@ -215,7 +220,7 @@ void DescriptorArray::Append(Descriptor* desc) { int descriptor_number = number_of_descriptors(); DCHECK_LE(descriptor_number + 1, number_of_all_descriptors()); set_number_of_descriptors(descriptor_number + 1); - Set(descriptor_number, desc); + Set(InternalIndex(descriptor_number), desc); uint32_t hash = desc->GetKey()->Hash(); diff --git a/chromium/v8/src/objects/descriptor-array.h b/chromium/v8/src/objects/descriptor-array.h index 0f17cd22eac..73b94b7cfa2 100644 --- a/chromium/v8/src/objects/descriptor-array.h +++ b/chromium/v8/src/objects/descriptor-array.h @@ -6,6 +6,8 @@ #define V8_OBJECTS_DESCRIPTOR_ARRAY_H_ #include "src/objects/fixed-array.h" +// TODO(jkummerow): Consider forward-declaring instead. +#include "src/objects/internal-index.h" #include "src/objects/objects.h" #include "src/objects/struct.h" #include "src/utils/utils.h" @@ -62,27 +64,29 @@ class DescriptorArray : public HeapObject { Handle<FixedArray> indices); // Accessors for fetching instance descriptor at descriptor number. - inline Name GetKey(int descriptor_number) const; - inline Name GetKey(Isolate* isolate, int descriptor_number) const; - inline Object GetStrongValue(int descriptor_number); - inline Object GetStrongValue(Isolate* isolate, int descriptor_number); - inline MaybeObject GetValue(int descriptor_number); - inline MaybeObject GetValue(Isolate* isolate, int descriptor_number); - inline PropertyDetails GetDetails(int descriptor_number); - inline int GetFieldIndex(int descriptor_number); - inline FieldType GetFieldType(int descriptor_number); - inline FieldType GetFieldType(Isolate* isolate, int descriptor_number); + inline Name GetKey(InternalIndex descriptor_number) const; + inline Name GetKey(Isolate* isolate, InternalIndex descriptor_number) const; + inline Object GetStrongValue(InternalIndex descriptor_number); + inline Object GetStrongValue(Isolate* isolate, + InternalIndex descriptor_number); + inline MaybeObject GetValue(InternalIndex descriptor_number); + inline MaybeObject GetValue(Isolate* isolate, + InternalIndex descriptor_number); + inline PropertyDetails GetDetails(InternalIndex descriptor_number); + inline int GetFieldIndex(InternalIndex descriptor_number); + inline FieldType GetFieldType(InternalIndex descriptor_number); + inline FieldType GetFieldType(Isolate* isolate, + InternalIndex descriptor_number); inline Name GetSortedKey(int descriptor_number); inline Name GetSortedKey(Isolate* isolate, int descriptor_number); inline int GetSortedKeyIndex(int descriptor_number); - inline void SetSortedKey(int pointer, int descriptor_number); // Accessor for complete descriptor. - inline void Set(int descriptor_number, Descriptor* desc); - inline void Set(int descriptor_number, Name key, MaybeObject value, + inline void Set(InternalIndex descriptor_number, Descriptor* desc); + inline void Set(InternalIndex descriptor_number, Name key, MaybeObject value, PropertyDetails details); - void Replace(int descriptor_number, Descriptor* descriptor); + void Replace(InternalIndex descriptor_number, Descriptor* descriptor); // Generalizes constness, representation and field type of all field // descriptors. @@ -109,20 +113,20 @@ class DescriptorArray : public HeapObject { void Sort(); // Search the instance descriptors for given name. - V8_INLINE int Search(Name name, int number_of_own_descriptors); - V8_INLINE int Search(Name name, Map map); + V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors); + V8_INLINE InternalIndex Search(Name name, Map map); // As the above, but uses DescriptorLookupCache and updates it when // necessary. - V8_INLINE int SearchWithCache(Isolate* isolate, Name name, Map map); + V8_INLINE InternalIndex SearchWithCache(Isolate* isolate, Name name, Map map); bool IsEqualUpTo(DescriptorArray desc, int nof_descriptors); // Allocates a DescriptorArray, but returns the singleton // empty descriptor array object if number_of_descriptors is 0. - V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate( - Isolate* isolate, int nof_descriptors, int slack, - AllocationType allocation = AllocationType::kYoung); + V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(Isolate* isolate, + int nof_descriptors, + int slack); void Initialize(EnumCache enum_cache, HeapObject undefined_value, int nof_descriptors, int slack); @@ -176,7 +180,7 @@ class DescriptorArray : public HeapObject { // Print all the descriptors. void PrintDescriptors(std::ostream& os); - void PrintDescriptorDetails(std::ostream& os, int descriptor, + void PrintDescriptorDetails(std::ostream& os, InternalIndex descriptor, PropertyDetails::PrintMode mode); DECL_PRINTER(DescriptorArray) @@ -210,13 +214,16 @@ class DescriptorArray : public HeapObject { private: DECL_INT16_ACCESSORS(filler16bits) - inline void SetKey(int descriptor_number, Name key); - inline void SetValue(int descriptor_number, MaybeObject value); - inline void SetDetails(int descriptor_number, PropertyDetails details); + inline void SetKey(InternalIndex descriptor_number, Name key); + inline void SetValue(InternalIndex descriptor_number, MaybeObject value); + inline void SetDetails(InternalIndex descriptor_number, + PropertyDetails details); // Transfer a complete descriptor from the src descriptor array to this // descriptor array. - void CopyFrom(int index, DescriptorArray src); + void CopyFrom(InternalIndex index, DescriptorArray src); + + inline void SetSortedKey(int pointer, int descriptor_number); // Swap first and second descriptor. inline void SwapSortedKeys(int first, int second); diff --git a/chromium/v8/src/objects/elements.cc b/chromium/v8/src/objects/elements.cc index 6e5648d2f4d..686f1a9b1ae 100644 --- a/chromium/v8/src/objects/elements.cc +++ b/chromium/v8/src/objects/elements.cc @@ -8,6 +8,7 @@ #include "src/execution/arguments.h" #include "src/execution/frames.h" #include "src/execution/isolate-inl.h" +#include "src/execution/protectors-inl.h" #include "src/heap/factory.h" #include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize. #include "src/heap/heap-write-barrier-inl.h" @@ -509,11 +510,11 @@ Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate, Handle<JSObject> receiver, // that take an entry (instead of an index) as an argument. class InternalElementsAccessor : public ElementsAccessor { public: - uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index) override = 0; + InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) override = 0; - PropertyDetails GetDetails(JSObject holder, uint32_t entry) override = 0; + PropertyDetails GetDetails(JSObject holder, InternalIndex entry) override = 0; }; // Base class for element handler implementations. Contains the @@ -594,16 +595,17 @@ class ElementsAccessorBase : public InternalElementsAccessor { FixedArrayBase backing_store, PropertyFilter filter = ALL_PROPERTIES) { return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, - filter) != kMaxUInt32; + filter) + .is_found(); } - bool HasEntry(JSObject holder, uint32_t entry) final { + bool HasEntry(JSObject holder, InternalIndex entry) final { return Subclass::HasEntryImpl(holder.GetIsolate(), holder.elements(), entry); } static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { UNIMPLEMENTED(); } @@ -615,33 +617,33 @@ class ElementsAccessorBase : public InternalElementsAccessor { return false; } - Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final { + Handle<Object> Get(Handle<JSObject> holder, InternalIndex entry) final { return Subclass::GetInternalImpl(holder, entry); } static Handle<Object> GetInternalImpl(Handle<JSObject> holder, - uint32_t entry) { + InternalIndex entry) { return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry); } static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { uint32_t index = GetIndexForEntryImpl(backing_store, entry); return handle(BackingStore::cast(backing_store).get(index), isolate); } - void Set(Handle<JSObject> holder, uint32_t entry, Object value) final { + void Set(Handle<JSObject> holder, InternalIndex entry, Object value) final { Subclass::SetImpl(holder, entry, value); } void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store, - uint32_t entry, Handle<Object> value, + InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) final { Subclass::ReconfigureImpl(object, store, entry, value, attributes); } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { UNREACHABLE(); @@ -917,7 +919,7 @@ class ElementsAccessorBase : public InternalElementsAccessor { return true; } - void Delete(Handle<JSObject> obj, uint32_t entry) final { + void Delete(Handle<JSObject> obj, InternalIndex entry) final { Subclass::DeleteImpl(obj, entry); } @@ -1024,9 +1026,9 @@ class ElementsAccessorBase : public InternalElementsAccessor { if (!key->ToUint32(&index)) continue; DCHECK_EQ(object->GetElementsKind(), original_elements_kind); - uint32_t entry = Subclass::GetEntryForIndexImpl( + InternalIndex entry = Subclass::GetEntryForIndexImpl( isolate, *object, object->elements(), index, filter); - if (entry == kMaxUInt32) continue; + if (entry.is_not_found()) continue; PropertyDetails details = Subclass::GetDetailsImpl(*object, entry); Handle<Object> value; @@ -1053,9 +1055,9 @@ class ElementsAccessorBase : public InternalElementsAccessor { InternalElementsAccessor* accessor = reinterpret_cast<InternalElementsAccessor*>( object->GetElementsAccessor()); - uint32_t entry = accessor->GetEntryForIndex(isolate, *object, - object->elements(), index); - if (entry == kMaxUInt32) continue; + InternalIndex entry = accessor->GetEntryForIndex( + isolate, *object, object->elements(), index); + if (entry.is_not_found()) continue; PropertyDetails details = accessor->GetDetails(*object, entry); if (!details.IsEnumerable()) continue; } @@ -1280,43 +1282,44 @@ class ElementsAccessorBase : public InternalElementsAccessor { void Reverse(JSObject receiver) final { Subclass::ReverseImpl(receiver); } static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store, - uint32_t entry) { - return entry; + InternalIndex entry) { + return entry.as_uint32(); } - static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index, PropertyFilter filter) { + static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, + PropertyFilter filter) { DCHECK(IsFastElementsKind(kind()) || IsAnyNonextensibleElementsKind(kind())); uint32_t length = Subclass::GetMaxIndex(holder, backing_store); if (IsHoleyElementsKindForRead(kind())) { return index < length && !BackingStore::cast(backing_store) .is_the_hole(isolate, index) - ? index - : kMaxUInt32; + ? InternalIndex(index) + : InternalIndex::NotFound(); } else { - return index < length ? index : kMaxUInt32; + return index < length ? InternalIndex(index) : InternalIndex::NotFound(); } } - uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index) final { + InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) final { return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, ALL_PROPERTIES); } static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); } - static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) { return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); } - PropertyDetails GetDetails(JSObject holder, uint32_t entry) final { + PropertyDetails GetDetails(JSObject holder, InternalIndex entry) final { return Subclass::GetDetailsImpl(holder, entry); } @@ -1419,10 +1422,11 @@ class DictionaryElementsAccessor UNREACHABLE(); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()), obj->GetIsolate()); - dict = NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry); + dict = + NumberDictionary::DeleteEntry(obj->GetIsolate(), dict, entry.as_int()); obj->set_elements(*dict); } @@ -1441,38 +1445,38 @@ class DictionaryElementsAccessor return false; } - static Object GetRaw(FixedArrayBase store, uint32_t entry) { + static Object GetRaw(FixedArrayBase store, InternalIndex entry) { NumberDictionary backing_store = NumberDictionary::cast(store); - return backing_store.ValueAt(entry); + return backing_store.ValueAt(entry.as_int()); } static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { return handle(GetRaw(backing_store, entry), isolate); } - static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, + static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry, Object value) { SetImpl(holder->elements(), entry, value); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value) { - NumberDictionary::cast(backing_store).ValueAtPut(entry, value); + NumberDictionary::cast(backing_store).ValueAtPut(entry.as_int(), value); } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { NumberDictionary dictionary = NumberDictionary::cast(*store); if (attributes != NONE) object->RequireSlowElements(dictionary); - dictionary.ValueAtPut(entry, *value); - PropertyDetails details = dictionary.DetailsAt(entry); + dictionary.ValueAtPut(entry.as_int(), *value); + PropertyDetails details = dictionary.DetailsAt(entry.as_int()); details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell, details.dictionary_index()); - dictionary.DetailsAtPut(object->GetIsolate(), entry, details); + dictionary.DetailsAtPut(object->GetIsolate(), entry.as_int(), details); } static void AddImpl(Handle<JSObject> object, uint32_t index, @@ -1493,43 +1497,47 @@ class DictionaryElementsAccessor } static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store, - uint32_t entry) { + InternalIndex entry) { DisallowHeapAllocation no_gc; NumberDictionary dict = NumberDictionary::cast(store); - Object index = dict.KeyAt(entry); + Object index = dict.KeyAt(entry.as_int()); return !index.IsTheHole(isolate); } - static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) { + static uint32_t GetIndexForEntryImpl(FixedArrayBase store, + InternalIndex entry) { DisallowHeapAllocation no_gc; NumberDictionary dict = NumberDictionary::cast(store); uint32_t result = 0; - CHECK(dict.KeyAt(entry).ToArrayIndex(&result)); + CHECK(dict.KeyAt(entry.as_int()).ToArrayIndex(&result)); return result; } - static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, - FixedArrayBase store, uint32_t index, - PropertyFilter filter) { + static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase store, + uint32_t index, + PropertyFilter filter) { DisallowHeapAllocation no_gc; NumberDictionary dictionary = NumberDictionary::cast(store); int entry = dictionary.FindEntry(isolate, index); - if (entry == NumberDictionary::kNotFound) return kMaxUInt32; + if (entry == NumberDictionary::kNotFound) { + return InternalIndex::NotFound(); + } if (filter != ALL_PROPERTIES) { PropertyDetails details = dictionary.DetailsAt(entry); PropertyAttributes attr = details.attributes(); - if ((attr & filter) != 0) return kMaxUInt32; + if ((attr & filter) != 0) return InternalIndex::NotFound(); } - return static_cast<uint32_t>(entry); + return InternalIndex(entry); } - static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) { return GetDetailsImpl(holder.elements(), entry); } static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, - uint32_t entry) { - return NumberDictionary::cast(backing_store).DetailsAt(entry); + InternalIndex entry) { + return NumberDictionary::cast(backing_store).DetailsAt(entry.as_int()); } static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry, @@ -1688,7 +1696,8 @@ class DictionaryElementsAccessor continue; } - PropertyDetails details = GetDetailsImpl(*dictionary, entry); + PropertyDetails details = + GetDetailsImpl(*dictionary, InternalIndex(entry)); switch (details.kind()) { case kData: { Object element_k = dictionary->ValueAt(entry); @@ -1757,7 +1766,8 @@ class DictionaryElementsAccessor int entry = dictionary->FindEntry(isolate, k); if (entry == NumberDictionary::kNotFound) continue; - PropertyDetails details = GetDetailsImpl(*dictionary, entry); + PropertyDetails details = + GetDetailsImpl(*dictionary, InternalIndex(entry)); switch (details.kind()) { case kData: { Object element_k = dictionary->ValueAt(entry); @@ -1863,7 +1873,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { if (BackingStore::cast(*store).is_the_hole(isolate, i)) continue; } max_number_key = i; - Handle<Object> value = Subclass::GetImpl(isolate, *store, i); + Handle<Object> value = + Subclass::GetImpl(isolate, *store, InternalIndex(i)); dictionary = NumberDictionary::Add(isolate, dictionary, i, value, details); j++; @@ -1971,11 +1982,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object); - entry = dictionary->FindEntry(object->GetIsolate(), entry); + entry = InternalIndex( + dictionary->FindEntry(object->GetIsolate(), entry.as_uint32())); DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry, value, attributes); } @@ -2000,10 +2012,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { JSObject::EnsureWritableFastElements(object); } } - Subclass::SetImpl(object, index, *value); + Subclass::SetImpl(object, InternalIndex(index), *value); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { ElementsKind kind = KindTraits::Kind; if (IsFastPackedElementsKind(kind) || kind == PACKED_NONEXTENSIBLE_ELEMENTS) { @@ -2013,12 +2025,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { IsNonextensibleElementsKind(kind)) { JSObject::EnsureWritableFastElements(obj); } - DeleteCommon(obj, entry, handle(obj->elements(), obj->GetIsolate())); + DeleteCommon(obj, entry.as_uint32(), + handle(obj->elements(), obj->GetIsolate())); } static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { - return !BackingStore::cast(backing_store).is_the_hole(isolate, entry); + InternalIndex entry) { + return !BackingStore::cast(backing_store) + .is_the_hole(isolate, entry.as_int()); } static uint32_t NumberOfElementsImpl(JSObject receiver, @@ -2028,7 +2042,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { Isolate* isolate = receiver.GetIsolate(); uint32_t count = 0; for (uint32_t i = 0; i < max_index; i++) { - if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++; + if (Subclass::HasEntryImpl(isolate, backing_store, InternalIndex(i))) { + count++; + } } return count; } @@ -2041,9 +2057,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements); for (uint32_t i = 0; i < length; i++) { if (IsFastPackedElementsKind(KindTraits::Kind) || - HasEntryImpl(isolate, *elements, i)) { + HasEntryImpl(isolate, *elements, InternalIndex(i))) { RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey( - Subclass::GetImpl(isolate, *elements, i), convert)); + Subclass::GetImpl(isolate, *elements, InternalIndex(i)), convert)); } } return ExceptionStatus::kSuccess; @@ -2157,7 +2173,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements())); for (uint32_t index = start; index < end; ++index) { - Subclass::SetImpl(receiver, index, *obj_value); + Subclass::SetImpl(receiver, InternalIndex(index), *obj_value); } return *receiver; } @@ -2311,9 +2327,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { Handle<FixedArray> result = isolate->factory()->NewFixedArray(length); Handle<FixedArrayBase> elements(object->elements(), isolate); for (uint32_t i = 0; i < length; i++) { - if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue; + InternalIndex entry(i); + if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue; Handle<Object> value; - value = Subclass::GetImpl(isolate, *elements, i); + value = Subclass::GetImpl(isolate, *elements, entry); if (value->IsName()) { value = isolate->factory()->InternalizeName(Handle<Name>::cast(value)); } @@ -2336,7 +2353,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { int new_length = length - 1; int remove_index = remove_position == AT_START ? 0 : new_length; Handle<Object> result = - Subclass::GetImpl(isolate, *backing_store, remove_index); + Subclass::GetImpl(isolate, *backing_store, InternalIndex(remove_index)); if (remove_position == AT_START) { Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length, 0, 0); @@ -2396,7 +2413,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { for (uint32_t i = 0; i < copy_size; i++) { Object argument = (*args)[src_index + i]; DCHECK(!argument.IsTheHole()); - Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode); + Subclass::SetImpl(raw_backing_store, InternalIndex(dst_index + i), + argument, mode); } } }; @@ -2405,22 +2423,22 @@ template <typename Subclass, typename KindTraits> class FastSmiOrObjectElementsAccessor : public FastElementsAccessor<Subclass, KindTraits> { public: - static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, + static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry, Object value) { SetImpl(holder->elements(), entry, value); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value) { - FixedArray::cast(backing_store).set(entry, value); + FixedArray::cast(backing_store).set(entry.as_int(), value); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value, WriteBarrierMode mode) { - FixedArray::cast(backing_store).set(entry, value, mode); + FixedArray::cast(backing_store).set(entry.as_int(), value, mode); } - static Object GetRaw(FixedArray backing_store, uint32_t entry) { + static Object GetRaw(FixedArray backing_store, InternalIndex entry) { uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry); return backing_store.get(index); } @@ -2488,8 +2506,9 @@ class FastSmiOrObjectElementsAccessor isolate); uint32_t length = elements->length(); for (uint32_t index = 0; index < length; ++index) { - if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue; - Handle<Object> value = Subclass::GetImpl(isolate, *elements, index); + InternalIndex entry(index); + if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue; + Handle<Object> value = Subclass::GetImpl(isolate, *elements, entry); value = MakeEntryPair(isolate, index, value); values_or_entries->set(count++, *value); } @@ -2499,8 +2518,9 @@ class FastSmiOrObjectElementsAccessor FixedArray elements = FixedArray::cast(object->elements()); uint32_t length = elements.length(); for (uint32_t index = 0; index < length; ++index) { - if (!Subclass::HasEntryImpl(isolate, elements, index)) continue; - Object value = GetRaw(elements, index); + InternalIndex entry(index); + if (!Subclass::HasEntryImpl(isolate, elements, entry)) continue; + Object value = GetRaw(elements, entry); values_or_entries->set(count++, value); } } @@ -2641,7 +2661,7 @@ class FastSealedObjectElementsAccessor UNREACHABLE(); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { UNREACHABLE(); } @@ -2733,17 +2753,17 @@ class FastFrozenObjectElementsAccessor public: using BackingStore = typename KindTraits::BackingStore; - static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, + static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry, Object value) { UNREACHABLE(); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value) { UNREACHABLE(); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value, WriteBarrierMode mode) { UNREACHABLE(); } @@ -2753,7 +2773,7 @@ class FastFrozenObjectElementsAccessor UNREACHABLE(); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { UNREACHABLE(); } @@ -2787,7 +2807,7 @@ class FastFrozenObjectElementsAccessor } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { UNREACHABLE(); @@ -2816,24 +2836,24 @@ class FastDoubleElementsAccessor : public FastElementsAccessor<Subclass, KindTraits> { public: static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { - return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry, - isolate); + InternalIndex entry) { + return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), + entry.as_int(), isolate); } - static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, + static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry, Object value) { SetImpl(holder->elements(), entry, value); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value) { - FixedDoubleArray::cast(backing_store).set(entry, value.Number()); + FixedDoubleArray::cast(backing_store).set(entry.as_int(), value.Number()); } - static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry, + static inline void SetImpl(FixedArrayBase backing_store, InternalIndex entry, Object value, WriteBarrierMode mode) { - FixedDoubleArray::cast(backing_store).set(entry, value.Number()); + FixedDoubleArray::cast(backing_store).set(entry.as_int(), value.Number()); } static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from, @@ -2890,8 +2910,9 @@ class FastDoubleElementsAccessor int count = 0; uint32_t length = elements->length(); for (uint32_t index = 0; index < length; ++index) { - if (!Subclass::HasEntryImpl(isolate, *elements, index)) continue; - Handle<Object> value = Subclass::GetImpl(isolate, *elements, index); + InternalIndex entry(index); + if (!Subclass::HasEntryImpl(isolate, *elements, entry)) continue; + Handle<Object> value = Subclass::GetImpl(isolate, *elements, entry); if (get_entries) { value = MakeEntryPair(isolate, index, value); } @@ -2988,11 +3009,12 @@ class TypedElementsAccessor // Conversion of scalar value to handlified object. static Handle<Object> ToHandle(Isolate* isolate, ElementType value); - static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) { + static void SetImpl(Handle<JSObject> holder, InternalIndex entry, + Object value) { Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder); - DCHECK_LE(entry, typed_array->length()); - SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry, - FromObject(value)); + DCHECK_LE(entry.raw_value(), typed_array->length()); + SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), + entry.raw_value(), FromObject(value)); } static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) { @@ -3019,18 +3041,18 @@ class TypedElementsAccessor } static Handle<Object> GetInternalImpl(Handle<JSObject> holder, - uint32_t entry) { + InternalIndex entry) { Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder); Isolate* isolate = typed_array->GetIsolate(); - DCHECK_LE(entry, typed_array->length()); + DCHECK_LE(entry.raw_value(), typed_array->length()); DCHECK(!typed_array->WasDetached()); - ElementType elem = - GetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry); + ElementType elem = GetImpl( + static_cast<ElementType*>(typed_array->DataPtr()), entry.raw_value()); return ToHandle(isolate, elem); } static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { UNREACHABLE(); } @@ -3059,12 +3081,12 @@ class TypedElementsAccessor return result; } - static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) { return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell); } static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store, - uint32_t entry) { + InternalIndex entry) { return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell); } @@ -3085,21 +3107,22 @@ class TypedElementsAccessor UNREACHABLE(); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { UNREACHABLE(); } static uint32_t GetIndexForEntryImpl(FixedArrayBase backing_store, - uint32_t entry) { - return entry; + InternalIndex entry) { + return entry.as_uint32(); } - static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index, PropertyFilter filter) { + static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, + PropertyFilter filter) { return index < AccessorClass::GetCapacityImpl(holder, backing_store) - ? index - : kMaxUInt32; + ? InternalIndex(index) + : InternalIndex::NotFound(); } static uint32_t GetCapacityImpl(JSObject holder, @@ -3122,7 +3145,8 @@ class TypedElementsAccessor Handle<FixedArrayBase> elements(receiver->elements(), isolate); uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements); for (uint32_t i = 0; i < length; i++) { - Handle<Object> value = AccessorClass::GetInternalImpl(receiver, i); + Handle<Object> value = + AccessorClass::GetInternalImpl(receiver, InternalIndex(i)); RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert)); } return ExceptionStatus::kSuccess; @@ -3137,7 +3161,8 @@ class TypedElementsAccessor Handle<FixedArrayBase> elements(object->elements(), isolate); uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements); for (uint32_t index = 0; index < length; ++index) { - Handle<Object> value = AccessorClass::GetInternalImpl(object, index); + Handle<Object> value = + AccessorClass::GetInternalImpl(object, InternalIndex(index)); if (get_entries) { value = MakeEntryPair(isolate, index, value); } @@ -3361,7 +3386,8 @@ class TypedElementsAccessor Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object); Handle<FixedArray> result = isolate->factory()->NewFixedArray(length); for (uint32_t i = 0; i < length; i++) { - Handle<Object> value = AccessorClass::GetInternalImpl(typed_array, i); + Handle<Object> value = + AccessorClass::GetInternalImpl(typed_array, InternalIndex(i)); result->set(i, *value); } return result; @@ -3499,7 +3525,7 @@ class TypedElementsAccessor return true; } - return !isolate->IsNoElementsProtectorIntact(context); + return !Protectors::IsNoElementsIntact(isolate); } static bool TryCopyElementsFastNumber(Context context, JSArray source, @@ -3539,18 +3565,18 @@ class TypedElementsAccessor if (kind == PACKED_SMI_ELEMENTS) { FixedArray source_store = FixedArray::cast(source.elements()); - for (uint32_t i = 0; i < length; i++) { - Object elem = source_store.get(i); + for (size_t i = 0; i < length; i++) { + Object elem = source_store.get(static_cast<int>(i)); SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); } return true; } else if (kind == HOLEY_SMI_ELEMENTS) { FixedArray source_store = FixedArray::cast(source.elements()); - for (uint32_t i = 0; i < length; i++) { - if (source_store.is_the_hole(isolate, i)) { + for (size_t i = 0; i < length; i++) { + if (source_store.is_the_hole(isolate, static_cast<int>(i))) { SetImpl(dest_data, i, FromObject(undefined)); } else { - Object elem = source_store.get(i); + Object elem = source_store.get(static_cast<int>(i)); SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); } } @@ -3560,20 +3586,20 @@ class TypedElementsAccessor // unboxing the double here by using get_scalar. FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements()); - for (uint32_t i = 0; i < length; i++) { + for (size_t i = 0; i < length; i++) { // Use the from_double conversion for this specific TypedArray type, // rather than relying on C++ to convert elem. - double elem = source_store.get_scalar(i); + double elem = source_store.get_scalar(static_cast<int>(i)); SetImpl(dest_data, i, FromScalar(elem)); } return true; } else if (kind == HOLEY_DOUBLE_ELEMENTS) { FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements()); - for (uint32_t i = 0; i < length; i++) { - if (source_store.is_the_hole(i)) { + for (size_t i = 0; i < length; i++) { + if (source_store.is_the_hole(static_cast<int>(i))) { SetImpl(dest_data, i, FromObject(undefined)); } else { - double elem = source_store.get_scalar(i); + double elem = source_store.get_scalar(static_cast<int>(i)); SetImpl(dest_data, i, FromScalar(elem)); } } @@ -3588,7 +3614,8 @@ class TypedElementsAccessor Isolate* isolate = destination->GetIsolate(); for (size_t i = 0; i < length; i++) { Handle<Object> elem; - if (i <= kMaxUInt32) { + // TODO(4153): This if-branch will subsume its else-branch. + if (i <= JSArray::kMaxArrayIndex) { LookupIterator it(isolate, source, static_cast<uint32_t>(i)); ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, Object::GetProperty(&it)); @@ -3619,8 +3646,7 @@ class TypedElementsAccessor } // The spec says we store the length, then get each element, so we don't // need to check changes to length. - // TODO(bmeurer, v8:4153): Remove this static_cast. - SetImpl(destination, static_cast<uint32_t>(offset + i), *elem); + SetImpl(destination, InternalIndex(offset + i), *elem); } return *isolate->factory()->undefined_value(); } @@ -3893,14 +3919,14 @@ class SloppyArgumentsElementsAccessor } static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase parameters, - uint32_t entry) { + InternalIndex entry) { Handle<SloppyArgumentsElements> elements( SloppyArgumentsElements::cast(parameters), isolate); uint32_t length = elements->parameter_map_length(); - if (entry < length) { + if (entry.as_uint32() < length) { // Read context mapped entry. DisallowHeapAllocation no_gc; - Object probe = elements->get_mapped_entry(entry); + Object probe = elements->get_mapped_entry(entry.as_uint32()); DCHECK(!probe.IsTheHole(isolate)); Context context = elements->context(); int context_entry = Smi::ToInt(probe); @@ -3909,7 +3935,7 @@ class SloppyArgumentsElementsAccessor } else { // Entry is not context mapped, defer to the arguments. Handle<Object> result = ArgumentsAccessor::GetImpl( - isolate, elements->arguments(), entry - length); + isolate, elements->arguments(), entry.adjust_down(length)); return Subclass::ConvertArgumentsStoreResult(isolate, elements, result); } } @@ -3924,19 +3950,19 @@ class SloppyArgumentsElementsAccessor UNREACHABLE(); } - static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, + static inline void SetImpl(Handle<JSObject> holder, InternalIndex entry, Object value) { SetImpl(holder->elements(), entry, value); } - static inline void SetImpl(FixedArrayBase store, uint32_t entry, + static inline void SetImpl(FixedArrayBase store, InternalIndex entry, Object value) { SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store); uint32_t length = elements.parameter_map_length(); - if (entry < length) { + if (entry.as_uint32() < length) { // Store context mapped entry. DisallowHeapAllocation no_gc; - Object probe = elements.get_mapped_entry(entry); + Object probe = elements.get_mapped_entry(entry.as_uint32()); DCHECK(!probe.IsTheHole()); Context context = elements.context(); int context_entry = Smi::ToInt(probe); @@ -3945,7 +3971,8 @@ class SloppyArgumentsElementsAccessor } else { // Entry is not context mapped defer to arguments. FixedArray arguments = elements.arguments(); - Object current = ArgumentsAccessor::GetRaw(arguments, entry - length); + Object current = + ArgumentsAccessor::GetRaw(arguments, entry.adjust_down(length)); if (current.IsAliasedArgumentsEntry()) { AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current); Context context = elements.context(); @@ -3953,7 +3980,7 @@ class SloppyArgumentsElementsAccessor DCHECK(!context.get(context_entry).IsTheHole()); context.set(context_entry, value); } else { - ArgumentsAccessor::SetImpl(arguments, entry - length, value); + ArgumentsAccessor::SetImpl(arguments, entry.adjust_down(length), value); } } } @@ -3989,8 +4016,8 @@ class SloppyArgumentsElementsAccessor FixedArrayBase arguments = elements.arguments(); uint32_t nof_elements = 0; uint32_t length = elements.parameter_map_length(); - for (uint32_t entry = 0; entry < length; entry++) { - if (HasParameterMapArg(isolate, elements, entry)) nof_elements++; + for (uint32_t index = 0; index < length; index++) { + if (HasParameterMapArg(isolate, elements, index)) nof_elements++; } return nof_elements + ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments); @@ -4002,7 +4029,8 @@ class SloppyArgumentsElementsAccessor Isolate* isolate = accumulator->isolate(); Handle<FixedArrayBase> elements(receiver->elements(), isolate); uint32_t length = GetCapacityImpl(*receiver, *elements); - for (uint32_t entry = 0; entry < length; entry++) { + for (uint32_t index = 0; index < length; index++) { + InternalIndex entry(index); if (!HasEntryImpl(isolate, *elements, entry)) continue; Handle<Object> value = GetImpl(isolate, *elements, entry); RETURN_FAILURE_IF_NOT_SUCCESSFUL(accumulator->AddKey(value, convert)); @@ -4011,15 +4039,16 @@ class SloppyArgumentsElementsAccessor } static bool HasEntryImpl(Isolate* isolate, FixedArrayBase parameters, - uint32_t entry) { + InternalIndex entry) { SloppyArgumentsElements elements = SloppyArgumentsElements::cast(parameters); uint32_t length = elements.parameter_map_length(); - if (entry < length) { - return HasParameterMapArg(isolate, elements, entry); + if (entry.as_uint32() < length) { + return HasParameterMapArg(isolate, elements, entry.as_uint32()); } FixedArrayBase arguments = elements.arguments(); - return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length); + return ArgumentsAccessor::HasEntryImpl(isolate, arguments, + entry.adjust_down(length)); } static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) { @@ -4030,39 +4059,45 @@ class SloppyArgumentsElementsAccessor } static uint32_t GetIndexForEntryImpl(FixedArrayBase parameters, - uint32_t entry) { + InternalIndex entry) { SloppyArgumentsElements elements = SloppyArgumentsElements::cast(parameters); uint32_t length = elements.parameter_map_length(); - if (entry < length) return entry; + uint32_t index = entry.as_uint32(); + if (index < length) return index; FixedArray arguments = elements.arguments(); - return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length); + return ArgumentsAccessor::GetIndexForEntryImpl(arguments, + entry.adjust_down(length)); } - static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, - FixedArrayBase parameters, - uint32_t index, PropertyFilter filter) { + static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase parameters, + uint32_t index, + PropertyFilter filter) { SloppyArgumentsElements elements = SloppyArgumentsElements::cast(parameters); - if (HasParameterMapArg(isolate, elements, index)) return index; + if (HasParameterMapArg(isolate, elements, index)) { + return InternalIndex(index); + } FixedArray arguments = elements.arguments(); - uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl( + InternalIndex entry = ArgumentsAccessor::GetEntryForIndexImpl( isolate, holder, arguments, index, filter); - if (entry == kMaxUInt32) return kMaxUInt32; + if (entry.is_not_found()) return entry; // Arguments entries could overlap with the dictionary entries, hence offset // them by the number of context mapped entries. - return elements.parameter_map_length() + entry; + return entry.adjust_up(elements.parameter_map_length()); } - static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) { SloppyArgumentsElements elements = SloppyArgumentsElements::cast(holder.elements()); uint32_t length = elements.parameter_map_length(); - if (entry < length) { + if (entry.as_uint32() < length) { return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); } FixedArray arguments = elements.arguments(); - return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length); + return ArgumentsAccessor::GetDetailsImpl(arguments, + entry.adjust_down(length)); } static bool HasParameterMapArg(Isolate* isolate, @@ -4073,26 +4108,26 @@ class SloppyArgumentsElementsAccessor return !elements.get_mapped_entry(index).IsTheHole(isolate); } - static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) { Handle<SloppyArgumentsElements> elements( SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate()); uint32_t length = elements->parameter_map_length(); - uint32_t delete_or_entry = entry; - if (entry < length) { - delete_or_entry = kMaxUInt32; + InternalIndex delete_or_entry = entry; + if (entry.as_uint32() < length) { + delete_or_entry = InternalIndex::NotFound(); } Subclass::SloppyDeleteImpl(obj, elements, delete_or_entry); // SloppyDeleteImpl allocates a new dictionary elements store. For making // heap verification happy we postpone clearing out the mapped entry. - if (entry < length) { - elements->set_mapped_entry(entry, + if (entry.as_uint32() < length) { + elements->set_mapped_entry(entry.as_uint32(), obj->GetReadOnlyRoots().the_hole_value()); } } static void SloppyDeleteImpl(Handle<JSObject> obj, Handle<SloppyArgumentsElements> elements, - uint32_t entry) { + InternalIndex entry) { // Implemented in subclasses. UNREACHABLE(); } @@ -4152,9 +4187,9 @@ class SloppyArgumentsElementsAccessor for (uint32_t k = start_from; k < length; ++k) { DCHECK_EQ(object->map(), *original_map); - uint32_t entry = + InternalIndex entry = GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES); - if (entry == kMaxUInt32) { + if (entry.is_not_found()) { if (search_for_hole) return Just(true); continue; } @@ -4193,9 +4228,9 @@ class SloppyArgumentsElementsAccessor for (uint32_t k = start_from; k < length; ++k) { DCHECK_EQ(object->map(), *original_map); - uint32_t entry = + InternalIndex entry = GetEntryForIndexImpl(isolate, *object, *elements, k, ALL_PROPERTIES); - if (entry == kMaxUInt32) { + if (entry.is_not_found()) { continue; } @@ -4246,14 +4281,15 @@ class SlowSloppyArgumentsElementsAccessor } static void SloppyDeleteImpl(Handle<JSObject> obj, Handle<SloppyArgumentsElements> elements, - uint32_t entry) { + InternalIndex entry) { // No need to delete a context mapped entry from the arguments elements. - if (entry == kMaxUInt32) return; + if (entry.is_not_found()) return; Isolate* isolate = obj->GetIsolate(); Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()), isolate); - int length = elements->parameter_map_length(); - dict = NumberDictionary::DeleteEntry(isolate, dict, entry - length); + uint32_t length = elements->parameter_map_length(); + dict = NumberDictionary::DeleteEntry(isolate, dict, + entry.as_uint32() - length); elements->set_arguments(*dict); } static void AddImpl(Handle<JSObject> object, uint32_t index, @@ -4278,15 +4314,15 @@ class SlowSloppyArgumentsElementsAccessor } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { Isolate* isolate = object->GetIsolate(); Handle<SloppyArgumentsElements> elements = Handle<SloppyArgumentsElements>::cast(store); uint32_t length = elements->parameter_map_length(); - if (entry < length) { - Object probe = elements->get_mapped_entry(entry); + if (entry.as_uint32() < length) { + Object probe = elements->get_mapped_entry(entry.as_uint32()); DCHECK(!probe.IsTheHole(isolate)); Context context = elements->context(); int context_entry = Smi::ToInt(probe); @@ -4294,7 +4330,7 @@ class SlowSloppyArgumentsElementsAccessor context.set(context_entry, *value); // Redefining attributes of an aliased element destroys fast aliasing. - elements->set_mapped_entry(entry, + elements->set_mapped_entry(entry.as_uint32(), ReadOnlyRoots(isolate).the_hole_value()); // For elements that are still writable we re-establish slow aliasing. if ((attributes & READ_ONLY) == 0) { @@ -4304,8 +4340,8 @@ class SlowSloppyArgumentsElementsAccessor PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); Handle<NumberDictionary> arguments( NumberDictionary::cast(elements->arguments()), isolate); - arguments = - NumberDictionary::Add(isolate, arguments, entry, value, details); + arguments = NumberDictionary::Add(isolate, arguments, entry.as_uint32(), + value, details); // If the attributes were NONE, we would have called set rather than // reconfigure. DCHECK_NE(NONE, attributes); @@ -4314,7 +4350,7 @@ class SlowSloppyArgumentsElementsAccessor } else { Handle<FixedArrayBase> arguments(elements->arguments(), isolate); DictionaryElementsAccessor::ReconfigureImpl( - object, arguments, entry - length, value, attributes); + object, arguments, entry.adjust_down(length), value, attributes); } } }; @@ -4346,23 +4382,25 @@ class FastSloppyArgumentsElementsAccessor static Handle<NumberDictionary> NormalizeArgumentsElements( Handle<JSObject> object, Handle<SloppyArgumentsElements> elements, - uint32_t* entry) { + InternalIndex* entry) { Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object); elements->set_arguments(*dictionary); // kMaxUInt32 indicates that a context mapped element got deleted. In this // case we only normalize the elements (aka. migrate to SLOW_SLOPPY). - if (*entry == kMaxUInt32) return dictionary; + if (entry->is_not_found()) return dictionary; uint32_t length = elements->parameter_map_length(); - if (*entry >= length) { + if (entry->as_uint32() >= length) { *entry = - dictionary->FindEntry(object->GetIsolate(), *entry - length) + length; + InternalIndex(dictionary->FindEntry(object->GetIsolate(), + entry->as_uint32() - length) + + length); } return dictionary; } static void SloppyDeleteImpl(Handle<JSObject> obj, Handle<SloppyArgumentsElements> elements, - uint32_t entry) { + InternalIndex entry) { // Always normalize element on deleting an entry. NormalizeArgumentsElements(obj, elements, &entry); SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry); @@ -4386,11 +4424,12 @@ class FastSloppyArgumentsElementsAccessor // index to entry explicitly since the slot still contains the hole, so the // current EntryForIndex would indicate that it is "absent" by returning // kMaxUInt32. - FastHoleyObjectElementsAccessor::SetImpl(arguments, index, *value); + FastHoleyObjectElementsAccessor::SetImpl(arguments, InternalIndex(index), + *value); } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { DCHECK_EQ(object->elements(), *store); @@ -4443,63 +4482,67 @@ class StringWrapperElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { public: static Handle<Object> GetInternalImpl(Handle<JSObject> holder, - uint32_t entry) { + InternalIndex entry) { return GetImpl(holder, entry); } - static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) { + static Handle<Object> GetImpl(Handle<JSObject> holder, InternalIndex entry) { Isolate* isolate = holder->GetIsolate(); Handle<String> string(GetString(*holder), isolate); uint32_t length = static_cast<uint32_t>(string->length()); - if (entry < length) { + if (entry.as_uint32() < length) { return isolate->factory()->LookupSingleCharacterStringFromCode( - String::Flatten(isolate, string)->Get(entry)); + String::Flatten(isolate, string)->Get(entry.as_int())); } return BackingStoreAccessor::GetImpl(isolate, holder->elements(), - entry - length); + entry.adjust_down(length)); } static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase elements, - uint32_t entry) { + InternalIndex entry) { UNREACHABLE(); } - static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) { + static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) { uint32_t length = static_cast<uint32_t>(GetString(holder).length()); - if (entry < length) { + if (entry.as_uint32() < length) { PropertyAttributes attributes = static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); return PropertyDetails(kData, attributes, PropertyCellType::kNoCell); } - return BackingStoreAccessor::GetDetailsImpl(holder, entry - length); + return BackingStoreAccessor::GetDetailsImpl(holder, + entry.adjust_down(length)); } - static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index, PropertyFilter filter) { + static InternalIndex GetEntryForIndexImpl(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index, + PropertyFilter filter) { uint32_t length = static_cast<uint32_t>(GetString(holder).length()); - if (index < length) return index; - uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl( - isolate, holder, backing_store, index, filter); - if (backing_store_entry == kMaxUInt32) return kMaxUInt32; - DCHECK(backing_store_entry < kMaxUInt32 - length); - return backing_store_entry + length; + if (index < length) return InternalIndex(index); + InternalIndex backing_store_entry = + BackingStoreAccessor::GetEntryForIndexImpl( + isolate, holder, backing_store, index, filter); + if (backing_store_entry.is_not_found()) return backing_store_entry; + return backing_store_entry.adjust_up(length); } - static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) { + static void DeleteImpl(Handle<JSObject> holder, InternalIndex entry) { uint32_t length = static_cast<uint32_t>(GetString(*holder).length()); - if (entry < length) { + if (entry.as_uint32() < length) { return; // String contents can't be deleted. } - BackingStoreAccessor::DeleteImpl(holder, entry - length); + BackingStoreAccessor::DeleteImpl(holder, entry.adjust_down(length)); } - static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) { + static void SetImpl(Handle<JSObject> holder, InternalIndex entry, + Object value) { uint32_t length = static_cast<uint32_t>(GetString(*holder).length()); - if (entry < length) { + if (entry.as_uint32() < length) { return; // String contents are read-only. } - BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value); + BackingStoreAccessor::SetImpl(holder->elements(), entry.adjust_down(length), + value); } static void AddImpl(Handle<JSObject> object, uint32_t index, @@ -4519,15 +4562,15 @@ class StringWrapperElementsAccessor } static void ReconfigureImpl(Handle<JSObject> object, - Handle<FixedArrayBase> store, uint32_t entry, + Handle<FixedArrayBase> store, InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) { uint32_t length = static_cast<uint32_t>(GetString(*object).length()); - if (entry < length) { + if (entry.as_uint32() < length) { return; // String contents can't be reconfigured. } - BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value, - attributes); + BackingStoreAccessor::ReconfigureImpl( + object, store, entry.adjust_down(length), value, attributes); } V8_WARN_UNUSED_RESULT static ExceptionStatus AddElementsToKeyAccumulatorImpl( diff --git a/chromium/v8/src/objects/elements.h b/chromium/v8/src/objects/elements.h index b7fcd907a37..219a9ad73a0 100644 --- a/chromium/v8/src/objects/elements.h +++ b/chromium/v8/src/objects/elements.h @@ -6,6 +6,7 @@ #define V8_OBJECTS_ELEMENTS_H_ #include "src/objects/elements-kind.h" +#include "src/objects/internal-index.h" #include "src/objects/keys.h" #include "src/objects/objects.h" @@ -50,11 +51,9 @@ class ElementsAccessor { // Note: this is currently not implemented for string wrapper and // typed array elements. - virtual bool HasEntry(JSObject holder, uint32_t entry) = 0; + virtual bool HasEntry(JSObject holder, InternalIndex entry) = 0; - // TODO(cbruni): HasEntry and Get should not be exposed publicly with the - // entry parameter. - virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0; + virtual Handle<Object> Get(Handle<JSObject> holder, InternalIndex entry) = 0; virtual bool HasAccessors(JSObject holder) = 0; virtual uint32_t NumberOfElements(JSObject holder) = 0; @@ -105,7 +104,8 @@ class ElementsAccessor { static void InitializeOncePerProcess(); static void TearDown(); - virtual void Set(Handle<JSObject> holder, uint32_t entry, Object value) = 0; + virtual void Set(Handle<JSObject> holder, InternalIndex entry, + Object value) = 0; virtual void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value, PropertyAttributes attributes, @@ -178,18 +178,18 @@ class ElementsAccessor { // indices are equivalent to entries. In the NumberDictionary // ElementsAccessor, entries are mapped to an index using the KeyAt method on // the NumberDictionary. - virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder, - FixedArrayBase backing_store, - uint32_t index) = 0; + virtual InternalIndex GetEntryForIndex(Isolate* isolate, JSObject holder, + FixedArrayBase backing_store, + uint32_t index) = 0; - virtual PropertyDetails GetDetails(JSObject holder, uint32_t entry) = 0; + virtual PropertyDetails GetDetails(JSObject holder, InternalIndex entry) = 0; virtual void Reconfigure(Handle<JSObject> object, - Handle<FixedArrayBase> backing_store, uint32_t entry, - Handle<Object> value, + Handle<FixedArrayBase> backing_store, + InternalIndex entry, Handle<Object> value, PropertyAttributes attributes) = 0; // Deletes an element in an object. - virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0; + virtual void Delete(Handle<JSObject> holder, InternalIndex entry) = 0; // NOTE: this method violates the handlified function signature convention: // raw pointer parameter |source_holder| in the function that allocates. diff --git a/chromium/v8/src/objects/feedback-cell-inl.h b/chromium/v8/src/objects/feedback-cell-inl.h index e06cfce7de1..188666d4626 100644 --- a/chromium/v8/src/objects/feedback-cell-inl.h +++ b/chromium/v8/src/objects/feedback-cell-inl.h @@ -17,12 +17,7 @@ namespace v8 { namespace internal { -OBJECT_CONSTRUCTORS_IMPL(FeedbackCell, Struct) - -CAST_ACCESSOR(FeedbackCell) - -ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset) -INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset) +TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackCell) void FeedbackCell::clear_padding() { if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return; diff --git a/chromium/v8/src/objects/feedback-cell.h b/chromium/v8/src/objects/feedback-cell.h index 3c085f72d9a..669efaeaeca 100644 --- a/chromium/v8/src/objects/feedback-cell.h +++ b/chromium/v8/src/objects/feedback-cell.h @@ -18,7 +18,7 @@ namespace internal { // number of closures created for a certain function per native // context. There's at most one FeedbackCell for each function in // a native context. -class FeedbackCell : public Struct { +class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> { public: static int GetInitialInterruptBudget() { if (FLAG_lazy_feedback_allocation) { @@ -27,19 +27,8 @@ class FeedbackCell : public Struct { return FLAG_interrupt_budget; } - // [value]: value of the cell. - DECL_ACCESSORS(value, HeapObject) - DECL_INT32_ACCESSORS(interrupt_budget) - - DECL_CAST(FeedbackCell) - // Dispatched behavior. DECL_PRINTER(FeedbackCell) - DECL_VERIFIER(FeedbackCell) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_FEEDBACK_CELL_FIELDS) static const int kUnalignedSize = kSize; static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize}); @@ -50,7 +39,7 @@ class FeedbackCell : public Struct { using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>; - OBJECT_CONSTRUCTORS(FeedbackCell, Struct); + TQ_OBJECT_CONSTRUCTORS(FeedbackCell) }; } // namespace internal diff --git a/chromium/v8/src/objects/feedback-vector-inl.h b/chromium/v8/src/objects/feedback-vector-inl.h index 9cdc03b5c24..024b92165de 100644 --- a/chromium/v8/src/objects/feedback-vector-inl.h +++ b/chromium/v8/src/objects/feedback-vector-inl.h @@ -286,10 +286,6 @@ Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) { return isolate->factory()->megamorphic_symbol(); } -Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) { - return isolate->factory()->premonomorphic_symbol(); -} - Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) { return ReadOnlyRoots(isolate).uninitialized_symbol(); } diff --git a/chromium/v8/src/objects/feedback-vector.cc b/chromium/v8/src/objects/feedback-vector.cc index 2fbc48a95ed..4fe75ab325e 100644 --- a/chromium/v8/src/objects/feedback-vector.cc +++ b/chromium/v8/src/objects/feedback-vector.cc @@ -52,7 +52,6 @@ static bool IsPropertyNameFeedback(MaybeObject feedback) { Symbol symbol = Symbol::cast(heap_object); ReadOnlyRoots roots = symbol.GetReadOnlyRoots(); return symbol != roots.uninitialized_symbol() && - symbol != roots.premonomorphic_symbol() && symbol != roots.megamorphic_symbol(); } @@ -233,8 +232,8 @@ Handle<FeedbackVector> FeedbackVector::New( const int slot_count = shared->feedback_metadata().slot_count(); - Handle<FeedbackVector> vector = factory->NewFeedbackVector( - shared, closure_feedback_cell_array, AllocationType::kOld); + Handle<FeedbackVector> vector = + factory->NewFeedbackVector(shared, closure_feedback_cell_array); DCHECK_EQ(vector->length(), slot_count); @@ -524,12 +523,6 @@ bool FeedbackNexus::Clear() { return feedback_updated; } -void FeedbackNexus::ConfigurePremonomorphic(Handle<Map> receiver_map) { - SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()), - SKIP_WRITE_BARRIER); - SetFeedbackExtra(HeapObjectReference::Weak(*receiver_map)); -} - bool FeedbackNexus::ConfigureMegamorphic() { DisallowHeapAllocation no_gc; Isolate* isolate = GetIsolate(); @@ -585,13 +578,6 @@ InlineCacheState FeedbackNexus::ic_state() const { case FeedbackSlotKind::kLoadGlobalInsideTypeof: { if (feedback->IsSmi()) return MONOMORPHIC; - if (feedback == MaybeObject::FromObject( - *FeedbackVector::PremonomorphicSentinel(isolate))) { - DCHECK(kind() == FeedbackSlotKind::kStoreGlobalSloppy || - kind() == FeedbackSlotKind::kStoreGlobalStrict); - return PREMONOMORPHIC; - } - DCHECK(feedback->IsWeakOrCleared()); MaybeObject extra = GetFeedbackExtra(); if (!feedback->IsCleared() || @@ -619,10 +605,6 @@ InlineCacheState FeedbackNexus::ic_state() const { *FeedbackVector::MegamorphicSentinel(isolate))) { return MEGAMORPHIC; } - if (feedback == MaybeObject::FromObject( - *FeedbackVector::PremonomorphicSentinel(isolate))) { - return PREMONOMORPHIC; - } if (feedback->IsWeakOrCleared()) { // Don't check if the map is cleared. return MONOMORPHIC; @@ -974,14 +956,6 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const { Map map = Map::cast(heap_object); maps->push_back(handle(map, isolate)); return 1; - } else if (feedback->GetHeapObjectIfStrong(&heap_object) && - heap_object == - heap_object.GetReadOnlyRoots().premonomorphic_symbol()) { - if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) { - Map map = Map::cast(heap_object); - maps->push_back(handle(map, isolate)); - return 1; - } } return 0; @@ -1203,9 +1177,11 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const { handler = handle(Code::cast(data_handler->smi_handler()), vector().GetIsolate()); } else if (maybe_code_handler.object()->IsSmi()) { - // Skip proxy handlers. - DCHECK_EQ(*(maybe_code_handler.object()), - *StoreHandler::StoreProxy(GetIsolate())); + // Skip proxy handlers and the slow handler. + DCHECK(*(maybe_code_handler.object()) == + *StoreHandler::StoreProxy(GetIsolate()) || + *(maybe_code_handler.object()) == + *StoreHandler::StoreSlow(GetIsolate())); continue; } else { // Element store without prototype chain check. diff --git a/chromium/v8/src/objects/feedback-vector.h b/chromium/v8/src/objects/feedback-vector.h index af03bb4130c..1c34266dc83 100644 --- a/chromium/v8/src/objects/feedback-vector.h +++ b/chromium/v8/src/objects/feedback-vector.h @@ -305,9 +305,6 @@ class FeedbackVector : public HeapObject { // The object that indicates a megamorphic state. static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate); - // The object that indicates a premonomorphic state. - static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate); - // A raw version of the uninitialized sentinel that's safe to read during // garbage collection (e.g., for patching the cache). static inline Symbol RawUninitializedSentinel(Isolate* isolate); @@ -567,7 +564,7 @@ class FeedbackMetadata : public HeapObject { // possibly be confused with a pointer. // NOLINTNEXTLINE(runtime/references) (false positive) STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag); -STATIC_ASSERT(Name::kEmptyHashField == 0x3); +STATIC_ASSERT(Name::kEmptyHashField == 0x7); // Verify that a set hash field will not look like a tagged object. STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag); @@ -657,13 +654,12 @@ class V8_EXPORT_PRIVATE FeedbackNexus final { bool IsCleared() const { InlineCacheState state = ic_state(); - return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC; + return !FLAG_use_ic || state == UNINITIALIZED; } // Clear() returns true if the state of the underlying vector was changed. bool Clear(); void ConfigureUninitialized(); - void ConfigurePremonomorphic(Handle<Map> receiver_map); // ConfigureMegamorphic() returns true if the state of the underlying vector // was changed. Extra feedback is cleared if the 0 parameter version is used. bool ConfigureMegamorphic(); diff --git a/chromium/v8/src/objects/field-index-inl.h b/chromium/v8/src/objects/field-index-inl.h index 997cd68c32b..93ffc59c724 100644 --- a/chromium/v8/src/objects/field-index-inl.h +++ b/chromium/v8/src/objects/field-index-inl.h @@ -60,13 +60,13 @@ int FieldIndex::GetLoadByFieldIndex() const { return is_double() ? (result | 1) : result; } -FieldIndex FieldIndex::ForDescriptor(Map map, int descriptor_index) { +FieldIndex FieldIndex::ForDescriptor(Map map, InternalIndex descriptor_index) { Isolate* isolate = GetIsolateForPtrCompr(map); return ForDescriptor(isolate, map, descriptor_index); } FieldIndex FieldIndex::ForDescriptor(Isolate* isolate, Map map, - int descriptor_index) { + InternalIndex descriptor_index) { PropertyDetails details = map.instance_descriptors(isolate).GetDetails(descriptor_index); int field_index = details.field_index(); diff --git a/chromium/v8/src/objects/field-index.h b/chromium/v8/src/objects/field-index.h index 4fae87774d4..fbde0bc6091 100644 --- a/chromium/v8/src/objects/field-index.h +++ b/chromium/v8/src/objects/field-index.h @@ -5,6 +5,8 @@ #ifndef V8_OBJECTS_FIELD_INDEX_H_ #define V8_OBJECTS_FIELD_INDEX_H_ +// TODO(jkummerow): Consider forward-declaring instead. +#include "src/objects/internal-index.h" #include "src/objects/property-details.h" #include "src/utils/utils.h" @@ -27,9 +29,10 @@ class FieldIndex final { Map map, int index, Representation representation = Representation::Tagged()); static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding); - static inline FieldIndex ForDescriptor(Map map, int descriptor_index); + static inline FieldIndex ForDescriptor(Map map, + InternalIndex descriptor_index); static inline FieldIndex ForDescriptor(Isolate* isolate, Map map, - int descriptor_index); + InternalIndex descriptor_index); inline int GetLoadByFieldIndex() const; diff --git a/chromium/v8/src/objects/fixed-array-inl.h b/chromium/v8/src/objects/fixed-array-inl.h index 79c29a6eeba..9701f8ef095 100644 --- a/chromium/v8/src/objects/fixed-array-inl.h +++ b/chromium/v8/src/objects/fixed-array-inl.h @@ -240,7 +240,7 @@ int BinarySearch(T* array, Name name, int valid_entries, for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); - Name entry = array->GetKey(sort_index); + Name entry = array->GetKey(InternalIndex(sort_index)); uint32_t current_hash = entry.hash_field(); if (current_hash != hash) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { @@ -272,7 +272,7 @@ int LinearSearch(T* array, Name name, int valid_entries, int len = array->number_of_entries(); for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); - Name entry = array->GetKey(sorted_index); + Name entry = array->GetKey(InternalIndex(sorted_index)); uint32_t current_hash = entry.hash_field(); if (current_hash > hash) { *out_insertion_index = sorted_index; @@ -286,7 +286,7 @@ int LinearSearch(T* array, Name name, int valid_entries, DCHECK_LE(valid_entries, array->number_of_entries()); DCHECK_NULL(out_insertion_index); // Not supported here. for (int number = 0; number < valid_entries; number++) { - if (array->GetKey(number) == name) return number; + if (array->GetKey(InternalIndex(number)) == name) return number; } return T::kNotFound; } diff --git a/chromium/v8/src/objects/fixed-array.h b/chromium/v8/src/objects/fixed-array.h index 40290797f71..b9d644b4923 100644 --- a/chromium/v8/src/objects/fixed-array.h +++ b/chromium/v8/src/objects/fixed-array.h @@ -86,14 +86,14 @@ class FixedArrayBase : public HeapObject { V8_EXPORT_PRIVATE bool IsCowArray() const; -// Maximal allowed size, in bytes, of a single FixedArrayBase. -// Prevents overflowing size computations, as well as extreme memory -// consumption. -#ifdef V8_HOST_ARCH_32_BIT - static const int kMaxSize = 512 * MB; -#else - static const int kMaxSize = 1024 * MB; -#endif // V8_HOST_ARCH_32_BIT + // Maximal allowed size, in bytes, of a single FixedArrayBase. + // Prevents overflowing size computations, as well as extreme memory + // consumption. It's either (512Mb - kTaggedSize) or (1024Mb - kTaggedSize). + // -kTaggedSize is here to ensure that this max size always fits into Smi + // which is necessary for being able to create a free space filler for the + // whole array of kMaxSize. + static const int kMaxSize = 128 * kTaggedSize * MB - kTaggedSize; + STATIC_ASSERT(Smi::IsValid(kMaxSize)); // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, @@ -120,7 +120,7 @@ class FixedArray : public FixedArrayBase { // Return a grown copy if the index is bigger than the array's length. V8_EXPORT_PRIVATE static Handle<FixedArray> SetAndGrow( Isolate* isolate, Handle<FixedArray> array, int index, - Handle<Object> value, AllocationType allocation = AllocationType::kYoung); + Handle<Object> value); // Setter that uses write barrier. inline void set(int index, Object value); @@ -303,7 +303,6 @@ class WeakFixedArray : public HeapObject { DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TORQUE_GENERATED_WEAK_FIXED_ARRAY_FIELDS) - static constexpr int kHeaderSize = kSize; static const int kMaxLength = (FixedArray::kMaxSize - kHeaderSize) / kTaggedSize; @@ -339,6 +338,12 @@ class WeakArrayList : public HeapObject { Isolate* isolate, Handle<WeakArrayList> array, const MaybeObjectHandle& value); + // A version that adds to elements. This ensures that the elements are + // inserted atomically w.r.t GC. + V8_EXPORT_PRIVATE static Handle<WeakArrayList> AddToEnd( + Isolate* isolate, Handle<WeakArrayList> array, + const MaybeObjectHandle& value1, const MaybeObjectHandle& value2); + inline MaybeObject Get(int index) const; inline MaybeObject Get(Isolate* isolate, int index) const; diff --git a/chromium/v8/src/objects/function-kind.h b/chromium/v8/src/objects/function-kind.h index 8e9c68e426c..9b0de76126b 100644 --- a/chromium/v8/src/objects/function-kind.h +++ b/chromium/v8/src/objects/function-kind.h @@ -14,6 +14,7 @@ enum FunctionKind : uint8_t { // BEGIN constructable functions kNormalFunction, kModule, + kAsyncModule, // BEGIN class constructors // BEGIN base constructors kBaseConstructor, @@ -61,7 +62,11 @@ inline bool IsArrowFunction(FunctionKind kind) { } inline bool IsModule(FunctionKind kind) { - return kind == FunctionKind::kModule; + return IsInRange(kind, FunctionKind::kModule, FunctionKind::kAsyncModule); +} + +inline bool IsAsyncModule(FunctionKind kind) { + return kind == FunctionKind::kAsyncModule; } inline bool IsAsyncGeneratorFunction(FunctionKind kind) { @@ -163,6 +168,8 @@ inline const char* FunctionKind2String(FunctionKind kind) { return "AsyncFunction"; case FunctionKind::kModule: return "Module"; + case FunctionKind::kAsyncModule: + return "AsyncModule"; case FunctionKind::kClassMembersInitializerFunction: return "ClassMembersInitializerFunction"; case FunctionKind::kDefaultBaseConstructor: diff --git a/chromium/v8/src/objects/heap-number-inl.h b/chromium/v8/src/objects/heap-number-inl.h index 78e65ca2313..546b16e93d8 100644 --- a/chromium/v8/src/objects/heap-number-inl.h +++ b/chromium/v8/src/objects/heap-number-inl.h @@ -7,8 +7,8 @@ #include "src/objects/heap-number.h" -#include "src/objects/heap-object-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/primitive-heap-object-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -16,7 +16,7 @@ namespace v8 { namespace internal { -OBJECT_CONSTRUCTORS_IMPL(HeapNumber, HeapObject) +OBJECT_CONSTRUCTORS_IMPL(HeapNumber, PrimitiveHeapObject) CAST_ACCESSOR(HeapNumber) diff --git a/chromium/v8/src/objects/heap-number.h b/chromium/v8/src/objects/heap-number.h index 9063f3d22c9..0982cc232ea 100644 --- a/chromium/v8/src/objects/heap-number.h +++ b/chromium/v8/src/objects/heap-number.h @@ -5,7 +5,7 @@ #ifndef V8_OBJECTS_HEAP_NUMBER_H_ #define V8_OBJECTS_HEAP_NUMBER_H_ -#include "src/objects/heap-object.h" +#include "src/objects/primitive-heap-object.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -15,7 +15,7 @@ namespace internal { // The HeapNumber class describes heap allocated numbers that cannot be // represented in a Smi (small integer). -class HeapNumber : public HeapObject { +class HeapNumber : public PrimitiveHeapObject { public: // [value]: number value. inline double value() const; @@ -28,7 +28,7 @@ class HeapNumber : public HeapObject { inline int get_sign(); // Layout description. - static const int kValueOffset = HeapObject::kHeaderSize; + static const int kValueOffset = PrimitiveHeapObject::kHeaderSize; // IEEE doubles are two 32 bit words. The first is just mantissa, the second // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit // words within double numbers are endian dependent and they are set @@ -59,7 +59,7 @@ class HeapNumber : public HeapObject { DECL_CAST(HeapNumber) V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os); - OBJECT_CONSTRUCTORS(HeapNumber, HeapObject); + OBJECT_CONSTRUCTORS(HeapNumber, PrimitiveHeapObject); }; } // namespace internal diff --git a/chromium/v8/src/objects/instance-type.h b/chromium/v8/src/objects/instance-type.h index 9a855de95bc..f9931972049 100644 --- a/chromium/v8/src/objects/instance-type.h +++ b/chromium/v8/src/objects/instance-type.h @@ -80,8 +80,8 @@ static inline bool IsShortcutCandidate(int type) { enum InstanceType : uint16_t { // String types. - INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag | - kInternalizedTag, // FIRST_PRIMITIVE_TYPE + INTERNALIZED_STRING_TYPE = + kTwoByteStringTag | kSeqStringTag | kInternalizedTag, ONE_BYTE_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag | kInternalizedTag, EXTERNAL_INTERNALIZED_STRING_TYPE = @@ -116,262 +116,41 @@ enum InstanceType : uint16_t { THIN_ONE_BYTE_STRING_TYPE = kOneByteStringTag | kThinStringTag | kNotInternalizedTag, - // Non-string names - SYMBOL_TYPE = - 1 + (kIsNotInternalizedMask | kUncachedExternalStringMask | - kStringEncodingMask | - kStringRepresentationMask), // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE - - // Other primitives (cannot contain non-map-word pointers to heap objects). - HEAP_NUMBER_TYPE, - BIGINT_TYPE, - ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE - - // Objects allocated in their own spaces (never in new space). - MAP_TYPE, - CODE_TYPE, - - // "Data", objects that cannot contain non-map-word pointers to heap - // objects. - FOREIGN_TYPE, - BYTE_ARRAY_TYPE, - BYTECODE_ARRAY_TYPE, - FREE_SPACE_TYPE, - FIXED_DOUBLE_ARRAY_TYPE, - FEEDBACK_METADATA_TYPE, - FILLER_TYPE, // LAST_DATA_TYPE - - // Structs. - ACCESS_CHECK_INFO_TYPE, - ACCESSOR_INFO_TYPE, - ACCESSOR_PAIR_TYPE, - ALIASED_ARGUMENTS_ENTRY_TYPE, - ALLOCATION_MEMENTO_TYPE, - ARRAY_BOILERPLATE_DESCRIPTION_TYPE, - ASM_WASM_DATA_TYPE, - ASYNC_GENERATOR_REQUEST_TYPE, - CLASS_POSITIONS_TYPE, - DEBUG_INFO_TYPE, - ENUM_CACHE_TYPE, - FUNCTION_TEMPLATE_INFO_TYPE, - FUNCTION_TEMPLATE_RARE_DATA_TYPE, - INTERCEPTOR_INFO_TYPE, - INTERPRETER_DATA_TYPE, - OBJECT_TEMPLATE_INFO_TYPE, - PROMISE_CAPABILITY_TYPE, - PROMISE_REACTION_TYPE, - PROTOTYPE_INFO_TYPE, - SCRIPT_TYPE, - SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, - SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, - STACK_FRAME_INFO_TYPE, - STACK_TRACE_FRAME_TYPE, - TEMPLATE_OBJECT_DESCRIPTION_TYPE, - TUPLE2_TYPE, - TUPLE3_TYPE, - WASM_CAPI_FUNCTION_DATA_TYPE, - WASM_DEBUG_INFO_TYPE, - WASM_EXCEPTION_TAG_TYPE, - WASM_EXPORTED_FUNCTION_DATA_TYPE, - WASM_INDIRECT_FUNCTION_TABLE_TYPE, - WASM_JS_FUNCTION_DATA_TYPE, - - CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE - CALLBACK_TASK_TYPE, - PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, - PROMISE_REJECT_REACTION_JOB_TASK_TYPE, - PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE - -#define MAKE_TORQUE_INSTANCE_TYPE(V) V, - TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE) +// Most instance types are defined in Torque, with the exception of the string +// types above. They are ordered by inheritance hierarchy so that we can easily +// use range checks to determine whether an object is an instance of a subclass +// of any type. There are a few more constraints specified in the Torque type +// definitions: +// - Some instance types are exposed in v8.h, so they are locked to specific +// values to not unnecessarily change the ABI. +// - JSSpecialObject and JSCustomElementsObject are aligned with the beginning +// of the JSObject range, so that we can use a larger range check from +// FIRST_JS_RECEIVER_TYPE to the end of those ranges and include JSProxy too. +// - JSFunction is last, meaning we can use a single inequality check to +// determine whether an instance type is within the range for any class in the +// inheritance hierarchy of JSFunction. This includes commonly-checked classes +// JSObject and JSReceiver. +#define MAKE_TORQUE_INSTANCE_TYPE(TYPE, value) TYPE = value, + TORQUE_ASSIGNED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE) #undef MAKE_TORQUE_INSTANCE_TYPE - // Modules - SOURCE_TEXT_MODULE_TYPE, // FIRST_MODULE_TYPE - SYNTHETIC_MODULE_TYPE, // LAST_MODULE_TYPE - - ALLOCATION_SITE_TYPE, - EMBEDDER_DATA_ARRAY_TYPE, - // FixedArrays. - FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE - OBJECT_BOILERPLATE_DESCRIPTION_TYPE, - CLOSURE_FEEDBACK_CELL_ARRAY_TYPE, - HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE - ORDERED_HASH_MAP_TYPE, - ORDERED_HASH_SET_TYPE, - ORDERED_NAME_DICTIONARY_TYPE, - NAME_DICTIONARY_TYPE, - GLOBAL_DICTIONARY_TYPE, - NUMBER_DICTIONARY_TYPE, - SIMPLE_NUMBER_DICTIONARY_TYPE, - STRING_TABLE_TYPE, - EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE - SCOPE_INFO_TYPE, - SCRIPT_CONTEXT_TABLE_TYPE, // LAST_FIXED_ARRAY_TYPE, - - // Contexts. - AWAIT_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE - BLOCK_CONTEXT_TYPE, - CATCH_CONTEXT_TYPE, - DEBUG_EVALUATE_CONTEXT_TYPE, - EVAL_CONTEXT_TYPE, - FUNCTION_CONTEXT_TYPE, - MODULE_CONTEXT_TYPE, - NATIVE_CONTEXT_TYPE, - SCRIPT_CONTEXT_TYPE, - WITH_CONTEXT_TYPE, // LAST_CONTEXT_TYPE - - WEAK_FIXED_ARRAY_TYPE, // FIRST_WEAK_FIXED_ARRAY_TYPE - TRANSITION_ARRAY_TYPE, // LAST_WEAK_FIXED_ARRAY_TYPE - - // Misc. - CALL_HANDLER_INFO_TYPE, - CELL_TYPE, - CODE_DATA_CONTAINER_TYPE, - DESCRIPTOR_ARRAY_TYPE, - FEEDBACK_CELL_TYPE, - FEEDBACK_VECTOR_TYPE, - LOAD_HANDLER_TYPE, - PREPARSE_DATA_TYPE, - PROPERTY_ARRAY_TYPE, - PROPERTY_CELL_TYPE, - SHARED_FUNCTION_INFO_TYPE, - SMALL_ORDERED_HASH_MAP_TYPE, - SMALL_ORDERED_HASH_SET_TYPE, - SMALL_ORDERED_NAME_DICTIONARY_TYPE, - STORE_HANDLER_TYPE, - UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE, - UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE, - WEAK_ARRAY_LIST_TYPE, - WEAK_CELL_TYPE, - - // All the following types are subtypes of JSReceiver, which corresponds to - // objects in the JS sense. The first and the last type in this range are - // the two forms of function. This organization enables using the same - // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range. - // Some of the following instance types are exposed in v8.h, so to not - // unnecessarily change the ABI when we introduce new instance types in the - // future, we leave some space between instance types. - JS_PROXY_TYPE = 0x0400, // FIRST_JS_RECEIVER_TYPE - JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE - JS_GLOBAL_PROXY_TYPE, - JS_MODULE_NAMESPACE_TYPE, - // Like JS_API_OBJECT_TYPE, but requires access checks and/or has - // interceptors. - JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE - JS_PRIMITIVE_WRAPPER_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER - // Like JS_OBJECT_TYPE, but created from API function. - JS_API_OBJECT_TYPE = 0x0420, - JS_OBJECT_TYPE, - JS_ARGUMENTS_TYPE, - JS_ARRAY_BUFFER_TYPE, - JS_ARRAY_ITERATOR_TYPE, - JS_ARRAY_TYPE, - JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, - JS_ASYNC_FUNCTION_OBJECT_TYPE, - JS_ASYNC_GENERATOR_OBJECT_TYPE, - JS_CONTEXT_EXTENSION_OBJECT_TYPE, - JS_DATE_TYPE, - JS_ERROR_TYPE, - JS_GENERATOR_OBJECT_TYPE, - JS_MAP_TYPE, - JS_MAP_KEY_ITERATOR_TYPE, - JS_MAP_KEY_VALUE_ITERATOR_TYPE, - JS_MAP_VALUE_ITERATOR_TYPE, - JS_MESSAGE_OBJECT_TYPE, - JS_PROMISE_TYPE, - JS_REGEXP_TYPE, - JS_REGEXP_STRING_ITERATOR_TYPE, - JS_SET_TYPE, - JS_SET_KEY_VALUE_ITERATOR_TYPE, - JS_SET_VALUE_ITERATOR_TYPE, - JS_STRING_ITERATOR_TYPE, - JS_WEAK_REF_TYPE, - JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE, - JS_FINALIZATION_GROUP_TYPE, - JS_WEAK_MAP_TYPE, - JS_WEAK_SET_TYPE, - - JS_TYPED_ARRAY_TYPE, - JS_DATA_VIEW_TYPE, - -#ifdef V8_INTL_SUPPORT - JS_INTL_V8_BREAK_ITERATOR_TYPE, - JS_INTL_COLLATOR_TYPE, - JS_INTL_DATE_TIME_FORMAT_TYPE, - JS_INTL_LIST_FORMAT_TYPE, - JS_INTL_LOCALE_TYPE, - JS_INTL_NUMBER_FORMAT_TYPE, - JS_INTL_PLURAL_RULES_TYPE, - JS_INTL_RELATIVE_TIME_FORMAT_TYPE, - JS_INTL_SEGMENT_ITERATOR_TYPE, - JS_INTL_SEGMENTER_TYPE, -#endif // V8_INTL_SUPPORT - - WASM_EXCEPTION_TYPE, - WASM_GLOBAL_TYPE, - WASM_INSTANCE_TYPE, - WASM_MEMORY_TYPE, - WASM_MODULE_TYPE, - WASM_TABLE_TYPE, - JS_BOUND_FUNCTION_TYPE, - JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE - // Pseudo-types - FIRST_TYPE = 0x0, - LAST_TYPE = JS_FUNCTION_TYPE, - FIRST_STRING_TYPE = FIRST_TYPE, - FIRST_NAME_TYPE = FIRST_STRING_TYPE, - LAST_NAME_TYPE = SYMBOL_TYPE, FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE, LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE, FIRST_NONSTRING_TYPE = SYMBOL_TYPE, - FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE, - LAST_PRIMITIVE_TYPE = ODDBALL_TYPE, - FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE, - LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE, - // Boundaries for testing if given HeapObject is a subclass of FixedArray. - FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE, - LAST_FIXED_ARRAY_TYPE = SCRIPT_CONTEXT_TABLE_TYPE, - // Boundaries for testing if given HeapObject is a subclass of HashTable - FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE, - LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE, - // Boundaries for testing if given HeapObject is a subclass of WeakFixedArray. - FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE, - LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE, - // Boundaries for testing if given HeapObject is a Context - FIRST_CONTEXT_TYPE = AWAIT_CONTEXT_TYPE, - LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE, - // Boundaries for testing if given HeapObject is a subclass of Microtask. - FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE, - LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, - // Boundaries of module record types - FIRST_MODULE_TYPE = SOURCE_TEXT_MODULE_TYPE, - LAST_MODULE_TYPE = SYNTHETIC_MODULE_TYPE, - // Boundary for promotion to old space. - LAST_DATA_TYPE = FILLER_TYPE, - // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy). - // Note that there is no range for JSObject or JSProxy, since their subtypes - // are not continuous in this enum! The enum ranges instead reflect the - // external class names, where proxies are treated as either ordinary objects, - // or functions. - FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE, - LAST_JS_RECEIVER_TYPE = LAST_TYPE, - // Boundaries for testing the types represented as JSObject - FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE, - LAST_JS_OBJECT_TYPE = LAST_TYPE, // Boundary for testing JSReceivers that need special property lookup handling - LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE, + LAST_SPECIAL_RECEIVER_TYPE = LAST_JS_SPECIAL_OBJECT_TYPE, // Boundary case for testing JSReceivers that may have elements while having // an empty fixed array as elements backing store. This is true for string // wrappers. - LAST_CUSTOM_ELEMENTS_RECEIVER = JS_PRIMITIVE_WRAPPER_TYPE, - - FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE, - LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE, - - FIRST_MAP_ITERATOR_TYPE = JS_MAP_KEY_ITERATOR_TYPE, - LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE, + LAST_CUSTOM_ELEMENTS_RECEIVER = LAST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE, + + // Convenient names for things where the generated name is awkward: + FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE, + LAST_TYPE = LAST_HEAP_OBJECT_TYPE, + FIRST_FUNCTION_TYPE = FIRST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE, + LAST_FUNCTION_TYPE = LAST_JS_FUNCTION_OR_BOUND_FUNCTION_TYPE, + BIGINT_TYPE = BIG_INT_BASE_TYPE, }; // This constant is defined outside of the InstanceType enum because the @@ -389,6 +168,40 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType); STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType); STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType); +// Verify that string types are all less than other types. +#define CHECK_STRING_RANGE(TYPE, ...) \ + STATIC_ASSERT(TYPE < FIRST_NONSTRING_TYPE); +STRING_TYPE_LIST(CHECK_STRING_RANGE) +#undef CHECK_STRING_RANGE +#define CHECK_NONSTRING_RANGE(TYPE) STATIC_ASSERT(TYPE >= FIRST_NONSTRING_TYPE); +TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_NONSTRING_RANGE) +#undef CHECK_NONSTRING_RANGE + +// Two ranges don't cleanly follow the inheritance hierarchy. Here we ensure +// that only expected types fall within these ranges. +// - From FIRST_JS_RECEIVER_TYPE to LAST_SPECIAL_RECEIVER_TYPE should correspond +// to the union type JSProxy | JSSpecialObject. +// - From FIRST_JS_RECEIVER_TYPE to LAST_CUSTOM_ELEMENTS_RECEIVER should +// correspond to the union type JSProxy | JSCustomElementsObject. +// Note in particular that these ranges include all subclasses of JSReceiver +// that are not also subclasses of JSObject (currently only JSProxy). +#define CHECK_INSTANCE_TYPE(TYPE) \ + STATIC_ASSERT((TYPE >= FIRST_JS_RECEIVER_TYPE && \ + TYPE <= LAST_SPECIAL_RECEIVER_TYPE) == \ + (TYPE == JS_PROXY_TYPE || TYPE == JS_GLOBAL_OBJECT_TYPE || \ + TYPE == JS_GLOBAL_PROXY_TYPE || \ + TYPE == JS_MODULE_NAMESPACE_TYPE || \ + TYPE == JS_SPECIAL_API_OBJECT_TYPE)); \ + STATIC_ASSERT((TYPE >= FIRST_JS_RECEIVER_TYPE && \ + TYPE <= LAST_CUSTOM_ELEMENTS_RECEIVER) == \ + (TYPE == JS_PROXY_TYPE || TYPE == JS_GLOBAL_OBJECT_TYPE || \ + TYPE == JS_GLOBAL_PROXY_TYPE || \ + TYPE == JS_MODULE_NAMESPACE_TYPE || \ + TYPE == JS_SPECIAL_API_OBJECT_TYPE || \ + TYPE == JS_PRIMITIVE_WRAPPER_TYPE)); +TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_INSTANCE_TYPE) +#undef CHECK_INSTANCE_TYPE + // Make sure it doesn't matter whether we sign-extend or zero-extend these // values, because Torque treats InstanceType as signed. STATIC_ASSERT(LAST_TYPE < 1 << 15); @@ -424,8 +237,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(FreeSpace, FREE_SPACE_TYPE) \ V(GlobalDictionary, GLOBAL_DICTIONARY_TYPE) \ V(HeapNumber, HEAP_NUMBER_TYPE) \ - V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \ - V(JSArgumentsObjectWithLength, JS_ARGUMENTS_TYPE) \ + V(JSArgumentsObject, JS_ARGUMENTS_OBJECT_TYPE) \ + V(JSArgumentsObjectWithLength, JS_ARGUMENTS_OBJECT_TYPE) \ V(JSArray, JS_ARRAY_TYPE) \ V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \ V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \ @@ -449,9 +262,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(JSPrimitiveWrapper, JS_PRIMITIVE_WRAPPER_TYPE) \ V(JSPromise, JS_PROMISE_TYPE) \ V(JSProxy, JS_PROXY_TYPE) \ - V(JSRegExp, JS_REGEXP_TYPE) \ + V(JSRegExp, JS_REG_EXP_TYPE) \ V(JSRegExpResult, JS_ARRAY_TYPE) \ - V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \ + V(JSRegExpResultIndices, JS_ARRAY_TYPE) \ + V(JSRegExpStringIterator, JS_REG_EXP_STRING_ITERATOR_TYPE) \ V(JSSet, JS_SET_TYPE) \ V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \ V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \ @@ -487,28 +301,28 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(UncompiledDataWithoutPreparseData, \ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \ V(UncompiledDataWithPreparseData, UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \ - V(WasmExceptionObject, WASM_EXCEPTION_TYPE) \ - V(WasmGlobalObject, WASM_GLOBAL_TYPE) \ - V(WasmInstanceObject, WASM_INSTANCE_TYPE) \ - V(WasmMemoryObject, WASM_MEMORY_TYPE) \ - V(WasmModuleObject, WASM_MODULE_TYPE) \ - V(WasmTableObject, WASM_TABLE_TYPE) \ + V(WasmExceptionObject, WASM_EXCEPTION_OBJECT_TYPE) \ + V(WasmGlobalObject, WASM_GLOBAL_OBJECT_TYPE) \ + V(WasmInstanceObject, WASM_INSTANCE_OBJECT_TYPE) \ + V(WasmMemoryObject, WASM_MEMORY_OBJECT_TYPE) \ + V(WasmModuleObject, WASM_MODULE_OBJECT_TYPE) \ + V(WasmTableObject, WASM_TABLE_OBJECT_TYPE) \ V(WeakArrayList, WEAK_ARRAY_LIST_TYPE) \ V(WeakCell, WEAK_CELL_TYPE) #ifdef V8_INTL_SUPPORT -#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \ - INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \ - V(JSV8BreakIterator, JS_INTL_V8_BREAK_ITERATOR_TYPE) \ - V(JSCollator, JS_INTL_COLLATOR_TYPE) \ - V(JSDateTimeFormat, JS_INTL_DATE_TIME_FORMAT_TYPE) \ - V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \ - V(JSLocale, JS_INTL_LOCALE_TYPE) \ - V(JSNumberFormat, JS_INTL_NUMBER_FORMAT_TYPE) \ - V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \ - V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \ - V(JSSegmentIterator, JS_INTL_SEGMENT_ITERATOR_TYPE) \ - V(JSSegmenter, JS_INTL_SEGMENTER_TYPE) +#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \ + INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \ + V(JSV8BreakIterator, JS_V8_BREAK_ITERATOR_TYPE) \ + V(JSCollator, JS_COLLATOR_TYPE) \ + V(JSDateTimeFormat, JS_DATE_TIME_FORMAT_TYPE) \ + V(JSListFormat, JS_LIST_FORMAT_TYPE) \ + V(JSLocale, JS_LOCALE_TYPE) \ + V(JSNumberFormat, JS_NUMBER_FORMAT_TYPE) \ + V(JSPluralRules, JS_PLURAL_RULES_TYPE) \ + V(JSRelativeTimeFormat, JS_RELATIVE_TIME_FORMAT_TYPE) \ + V(JSSegmentIterator, JS_SEGMENT_ITERATOR_TYPE) \ + V(JSSegmenter, JS_SEGMENTER_TYPE) #else @@ -516,16 +330,23 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, #endif // V8_INTL_SUPPORT -#define INSTANCE_TYPE_CHECKERS_RANGE(V) \ - V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \ - V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \ - V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \ - V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \ - V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \ - V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \ - V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \ - V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \ - V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \ +#define INSTANCE_TYPE_CHECKERS_RANGE(V) \ + V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \ + V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \ + V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \ + V(JSCustomElementsObject, FIRST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE, \ + LAST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE) \ + V(JSFunctionOrBoundFunction, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE) \ + V(JSMapIterator, FIRST_JS_MAP_ITERATOR_TYPE, LAST_JS_MAP_ITERATOR_TYPE) \ + V(JSSetIterator, FIRST_JS_SET_ITERATOR_TYPE, LAST_JS_SET_ITERATOR_TYPE) \ + V(JSSpecialObject, FIRST_JS_SPECIAL_OBJECT_TYPE, \ + LAST_JS_SPECIAL_OBJECT_TYPE) \ + V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \ + V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \ + V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \ + V(PrimitiveHeapObject, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE, \ + LAST_PRIMITIVE_HEAP_OBJECT_TYPE) \ + V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \ V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE) #define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \ diff --git a/chromium/v8/src/objects/internal-index.h b/chromium/v8/src/objects/internal-index.h new file mode 100644 index 00000000000..ce7378a9017 --- /dev/null +++ b/chromium/v8/src/objects/internal-index.h @@ -0,0 +1,79 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_INTERNAL_INDEX_H_ +#define V8_OBJECTS_INTERNAL_INDEX_H_ + +#include <stdint.h> + +#include <limits> + +#include "src/base/logging.h" + +namespace v8 { +namespace internal { + +// Simple wrapper around an entry (which is notably different from "index" for +// dictionary backing stores). Most code should treat this as an opaque +// wrapper: get it via GetEntryForIndex, pass it on to consumers. +class InternalIndex { + public: + explicit InternalIndex(size_t raw) : entry_(raw) {} + static InternalIndex NotFound() { return InternalIndex(kNotFound); } + + InternalIndex adjust_down(size_t subtract) { + DCHECK_GE(entry_, subtract); + return InternalIndex(entry_ - subtract); + } + InternalIndex adjust_up(size_t add) { + DCHECK_LT(entry_, std::numeric_limits<size_t>::max() - add); + return InternalIndex(entry_ + add); + } + + bool is_found() const { return entry_ != kNotFound; } + bool is_not_found() const { return entry_ == kNotFound; } + + size_t raw_value() const { return entry_; } + uint32_t as_uint32() const { + DCHECK_LE(entry_, std::numeric_limits<uint32_t>::max()); + return static_cast<uint32_t>(entry_); + } + int as_int() const { + DCHECK(entry_ >= 0 && entry_ <= std::numeric_limits<int>::max()); + return static_cast<int>(entry_); + } + + bool operator==(const InternalIndex& other) { return entry_ == other.entry_; } + + // Iteration support. + InternalIndex operator*() { return *this; } + bool operator!=(const InternalIndex& other) { return entry_ != other.entry_; } + InternalIndex& operator++() { + entry_++; + return *this; + } + + class Range { + public: + explicit Range(size_t max) : min_(0), max_(max) {} + Range(size_t min, size_t max) : min_(min), max_(max) {} + + InternalIndex begin() { return InternalIndex(min_); } + InternalIndex end() { return InternalIndex(max_); } + + private: + size_t min_; + size_t max_; + }; + + private: + static const size_t kNotFound = std::numeric_limits<size_t>::max(); + + size_t entry_; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_INTERNAL_INDEX_H_ diff --git a/chromium/v8/src/objects/intl-objects.cc b/chromium/v8/src/objects/intl-objects.cc index dbf212aaf82..a6a2fdd2299 100644 --- a/chromium/v8/src/objects/intl-objects.cc +++ b/chromium/v8/src/objects/intl-objects.cc @@ -20,6 +20,7 @@ #include "src/objects/js-collator-inl.h" #include "src/objects/js-date-time-format-inl.h" #include "src/objects/js-locale-inl.h" +#include "src/objects/js-locale.h" #include "src/objects/js-number-format-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/property-descriptor.h" @@ -32,6 +33,7 @@ #include "unicode/datefmt.h" #include "unicode/decimfmt.h" #include "unicode/formattedvalue.h" +#include "unicode/localebuilder.h" #include "unicode/locid.h" #include "unicode/normalizer2.h" #include "unicode/numberformatter.h" @@ -177,12 +179,13 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat, template <typename T> MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor, - Handle<Object> locales, Handle<Object> options) { + Handle<Object> locales, Handle<Object> options, + const char* method) { Handle<Map> map; ASSIGN_RETURN_ON_EXCEPTION( isolate, map, JSFunction::GetDerivedMap(isolate, constructor, constructor), T); - return T::New(isolate, map, locales, options); + return T::New(isolate, map, locales, options, method); } } // namespace @@ -783,6 +786,11 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate, } std::string locale(locale_str->ToCString().get()); + if (!IsStructurallyValidLanguageTag(locale)) { + THROW_NEW_ERROR_RETURN_VALUE( + isolate, NewRangeError(MessageTemplate::kLocaleBadParameters), + Nothing<std::string>()); + } return Intl::CanonicalizeLanguageTag(isolate, locale); } @@ -995,11 +1003,9 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate, } } -MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate, - Handle<String> string1, - Handle<String> string2, - Handle<Object> locales, - Handle<Object> options) { +MaybeHandle<Object> Intl::StringLocaleCompare( + Isolate* isolate, Handle<String> string1, Handle<String> string2, + Handle<Object> locales, Handle<Object> options, const char* method) { // We only cache the instance when both locales and options are undefined, // as that is the only case when the specified side-effects of examining // those arguments are unobservable. @@ -1025,7 +1031,7 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate, Handle<JSCollator> collator; ASSIGN_RETURN_ON_EXCEPTION( isolate, collator, - New<JSCollator>(isolate, constructor, locales, options), Object); + New<JSCollator>(isolate, constructor, locales, options, method), Object); if (can_cache) { isolate->set_icu_object_in_cache( Isolate::ICUObjectCacheType::kDefaultCollator, @@ -1084,15 +1090,11 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate, MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate, Handle<Object> num, Handle<Object> locales, - Handle<Object> options) { + Handle<Object> options, + const char* method) { Handle<Object> numeric_obj; - if (FLAG_harmony_intl_bigint) { - ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumeric(isolate, num), String); - } else { - ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumber(isolate, num), String); - } + ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj, + Object::ToNumeric(isolate, num), String); // We only cache the instance when both locales and options are undefined, // as that is the only case when the specified side-effects of examining @@ -1119,7 +1121,8 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate, // 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »). ASSIGN_RETURN_ON_EXCEPTION( isolate, number_format, - New<JSNumberFormat>(isolate, constructor, locales, options), String); + New<JSNumberFormat>(isolate, constructor, locales, options, method), + String); if (can_cache) { isolate->set_icu_object_in_cache( @@ -1203,40 +1206,18 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions( int mxfd = 0; Handle<Object> mnfd_obj; Handle<Object> mxfd_obj; - if (FLAG_harmony_intl_numberformat_unified) { - // 6. Let mnfd be ? Get(options, "minimumFractionDigits"). - Handle<String> mnfd_str = factory->minimumFractionDigits_string(); - ASSIGN_RETURN_ON_EXCEPTION_VALUE( - isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str), - Nothing<NumberFormatDigitOptions>()); - - // 8. Let mnfd be ? Get(options, "maximumFractionDigits"). - Handle<String> mxfd_str = factory->maximumFractionDigits_string(); - ASSIGN_RETURN_ON_EXCEPTION_VALUE( - isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str), - Nothing<NumberFormatDigitOptions>()); - } else { - // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20, - // mnfdDefault). - if (!Intl::GetNumberOption(isolate, options, - factory->minimumFractionDigits_string(), 0, 20, - mnfd_default) - .To(&mnfd)) { - return Nothing<NumberFormatDigitOptions>(); - } - // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ). - int mxfd_actual_default = std::max(mnfd, mxfd_default); + // 6. Let mnfd be ? Get(options, "minimumFractionDigits"). + Handle<String> mnfd_str = factory->minimumFractionDigits_string(); + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str), + Nothing<NumberFormatDigitOptions>()); - // 8. Let mxfd be ? GetNumberOption(options, - // "maximumFractionDigits", mnfd, 20, mxfdActualDefault). - if (!Intl::GetNumberOption(isolate, options, - factory->maximumFractionDigits_string(), mnfd, - 20, mxfd_actual_default) - .To(&mxfd)) { - return Nothing<NumberFormatDigitOptions>(); - } - } + // 8. Let mxfd be ? Get(options, "maximumFractionDigits"). + Handle<String> mxfd_str = factory->maximumFractionDigits_string(); + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str), + Nothing<NumberFormatDigitOptions>()); // 9. Let mnsd be ? Get(options, "minimumSignificantDigits"). Handle<Object> mnsd_obj; @@ -1285,47 +1266,44 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions( digit_options.minimum_significant_digits = 0; digit_options.maximum_significant_digits = 0; - if (FLAG_harmony_intl_numberformat_unified) { - // 15. Else If mnfd is not undefined or mxfd is not undefined, then - if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) { - // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault). - Handle<String> mnfd_str = factory->minimumFractionDigits_string(); - if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default, - mnfd_str) - .To(&mnfd)) { - return Nothing<NumberFormatDigitOptions>(); - } - - // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ). - int mxfd_actual_default = std::max(mnfd, mxfd_default); + // 15. Else If mnfd is not undefined or mxfd is not undefined, then + if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) { + // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault). + Handle<String> mnfd_str = factory->minimumFractionDigits_string(); + if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default, mnfd_str) + .To(&mnfd)) { + return Nothing<NumberFormatDigitOptions>(); + } - // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20, - // mxfdActualDefault). - Handle<String> mxfd_str = factory->maximumFractionDigits_string(); - if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20, - mxfd_actual_default, mxfd_str) - .To(&mxfd)) { - return Nothing<NumberFormatDigitOptions>(); - } - // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd. - digit_options.minimum_fraction_digits = mnfd; - - // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd. - digit_options.maximum_fraction_digits = mxfd; - // Else If intlObj.[[Notation]] is "compact", then - } else if (notation_is_compact) { - // a. Set intlObj.[[RoundingType]] to "compact-rounding". - // Set minimum_significant_digits to -1 to represent roundingtype is - // "compact-rounding". - digit_options.minimum_significant_digits = -1; - // 17. Else, - } else { - // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault. - digit_options.minimum_fraction_digits = mnfd_default; + // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ). + int mxfd_actual_default = std::max(mnfd, mxfd_default); - // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault. - digit_options.maximum_fraction_digits = mxfd_default; + // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20, + // mxfdActualDefault). + Handle<String> mxfd_str = factory->maximumFractionDigits_string(); + if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20, mxfd_actual_default, + mxfd_str) + .To(&mxfd)) { + return Nothing<NumberFormatDigitOptions>(); } + // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd. + digit_options.minimum_fraction_digits = mnfd; + + // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd. + digit_options.maximum_fraction_digits = mxfd; + // Else If intlObj.[[Notation]] is "compact", then + } else if (notation_is_compact) { + // a. Set intlObj.[[RoundingType]] to "compact-rounding". + // Set minimum_significant_digits to -1 to represent roundingtype is + // "compact-rounding". + digit_options.minimum_significant_digits = -1; + // 17. Else, + } else { + // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault. + digit_options.minimum_fraction_digits = mnfd_default; + + // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault. + digit_options.maximum_fraction_digits = mxfd_default; } } return Just(digit_options); @@ -1605,14 +1583,16 @@ bool IsValidCollation(const icu::Locale& locale, const std::string& value) { } // namespace +bool Intl::IsWellFormedCalendar(const std::string& value) { + return JSLocale::Is38AlphaNumList(value); +} + bool Intl::IsValidCalendar(const icu::Locale& locale, const std::string& value) { return IsValidExtension<icu::Calendar>(locale, "calendar", value); } -namespace { - -bool IsValidNumberingSystem(const std::string& value) { +bool Intl::IsValidNumberingSystem(const std::string& value) { std::set<std::string> invalid_values = {"native", "traditio", "finance"}; if (invalid_values.find(value) != invalid_values.end()) return false; UErrorCode status = U_ZERO_ERROR; @@ -1621,11 +1601,19 @@ bool IsValidNumberingSystem(const std::string& value) { return U_SUCCESS(status) && numbering_system.get() != nullptr; } +namespace { + +bool IsWellFormedNumberingSystem(const std::string& value) { + return JSLocale::Is38AlphaNumList(value); +} + std::map<std::string, std::string> LookupAndValidateUnicodeExtensions( icu::Locale* icu_locale, const std::set<std::string>& relevant_keys) { std::map<std::string, std::string> extensions; UErrorCode status = U_ZERO_ERROR; + icu::LocaleBuilder builder; + builder.setLocale(*icu_locale).clearExtensions(); std::unique_ptr<icu::StringEnumeration> keywords( icu_locale->createKeywords(status)); if (U_FAILURE(status)) return extensions; @@ -1682,20 +1670,19 @@ std::map<std::string, std::string> LookupAndValidateUnicodeExtensions( std::set<std::string> valid_values = {"upper", "lower", "false"}; is_valid_value = valid_values.find(bcp47_value) != valid_values.end(); } else if (strcmp("nu", bcp47_key) == 0) { - is_valid_value = IsValidNumberingSystem(bcp47_value); + is_valid_value = Intl::IsValidNumberingSystem(bcp47_value); } if (is_valid_value) { extensions.insert( std::pair<std::string, std::string>(bcp47_key, bcp47_value)); - continue; + builder.setUnicodeLocaleKeyword(bcp47_key, bcp47_value); } } - status = U_ZERO_ERROR; - icu_locale->setUnicodeKeywordValue( - bcp47_key == nullptr ? keyword : bcp47_key, nullptr, status); - CHECK(U_SUCCESS(status)); } + status = U_ZERO_ERROR; + *icu_locale = builder.build(status); + return extensions; } @@ -2003,7 +1990,7 @@ Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate, empty_values, method, result); MAYBE_RETURN(maybe, Nothing<bool>()); if (maybe.FromJust() && *result != nullptr) { - if (!IsValidNumberingSystem(result->get())) { + if (!IsWellFormedNumberingSystem(result->get())) { THROW_NEW_ERROR_RETURN_VALUE( isolate, NewRangeError( @@ -2120,5 +2107,9 @@ MaybeHandle<String> Intl::FormattedToString( return Intl::ToString(isolate, result); } +bool Intl::IsStructurallyValidLanguageTag(const std::string& tag) { + return JSLocale::StartsWithUnicodeLanguageId(tag); +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/objects/intl-objects.h b/chromium/v8/src/objects/intl-objects.h index 4d4d3245fd3..0c4a77b745f 100644 --- a/chromium/v8/src/objects/intl-objects.h +++ b/chromium/v8/src/objects/intl-objects.h @@ -10,6 +10,7 @@ #define V8_OBJECTS_INTL_OBJECTS_H_ #include <map> +#include <memory> #include <set> #include <string> @@ -164,7 +165,7 @@ class Intl { V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare( Isolate* isolate, Handle<String> s1, Handle<String> s2, - Handle<Object> locales, Handle<Object> options); + Handle<Object> locales, Handle<Object> options, const char* method); V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings( Isolate* isolate, const icu::Collator& collator, Handle<String> s1, @@ -173,7 +174,7 @@ class Intl { // ecma402/#sup-properties-of-the-number-prototype-object V8_WARN_UNUSED_RESULT static MaybeHandle<String> NumberToLocaleString( Isolate* isolate, Handle<Object> num, Handle<Object> locales, - Handle<Object> options); + Handle<Object> options, const char* method); // ecma402/#sec-setnfdigitoptions struct NumberFormatDigitOptions { @@ -239,14 +240,14 @@ class Intl { Handle<JSFunction> constructor, bool has_initialized_slot); // enum for "caseFirst" option: shared by Intl.Locale and Intl.Collator. - enum class CaseFirst { kUpper, kLower, kFalse, kUndefined }; + enum class CaseFirst { kUndefined, kUpper, kLower, kFalse }; // Shared function to read the "caseFirst" option. V8_WARN_UNUSED_RESULT static Maybe<CaseFirst> GetCaseFirst( Isolate* isolate, Handle<JSReceiver> options, const char* method); // enum for "hourCycle" option: shared by Intl.Locale and Intl.DateTimeFormat. - enum class HourCycle { kH11, kH12, kH23, kH24, kUndefined }; + enum class HourCycle { kUndefined, kH11, kH12, kH23, kH24 }; static HourCycle ToHourCycle(const std::string& str); @@ -270,6 +271,12 @@ class Intl { static bool IsValidCalendar(const icu::Locale& locale, const std::string& value); + // Check the numberingSystem is valid. + static bool IsValidNumberingSystem(const std::string& value); + + // Check the calendar is well formed. + static bool IsWellFormedCalendar(const std::string& value); + struct ResolvedLocale { std::string locale; icu::Locale icu_locale; @@ -336,6 +343,8 @@ class Intl { static const std::set<std::string>& GetAvailableLocalesForLocale(); static const std::set<std::string>& GetAvailableLocalesForDateFormat(); + + static bool IsStructurallyValidLanguageTag(const std::string& tag); }; } // namespace internal diff --git a/chromium/v8/src/objects/js-array-buffer-inl.h b/chromium/v8/src/objects/js-array-buffer-inl.h index 9151be6da49..4ed347baa80 100644 --- a/chromium/v8/src/objects/js-array-buffer-inl.h +++ b/chromium/v8/src/objects/js-array-buffer-inl.h @@ -48,14 +48,6 @@ size_t JSArrayBuffer::allocation_length() const { if (backing_store() == nullptr) { return 0; } - // If this buffer is managed by the WasmMemoryTracker - if (is_wasm_memory()) { - const auto* data = - GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData( - backing_store()); - DCHECK_NOT_NULL(data); - return data->allocation_length; - } return byte_length(); } @@ -63,25 +55,9 @@ void* JSArrayBuffer::allocation_base() const { if (backing_store() == nullptr) { return nullptr; } - // If this buffer is managed by the WasmMemoryTracker - if (is_wasm_memory()) { - const auto* data = - GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData( - backing_store()); - DCHECK_NOT_NULL(data); - return data->allocation_base; - } return backing_store(); } -bool JSArrayBuffer::is_wasm_memory() const { - return IsWasmMemoryBit::decode(bit_field()); -} - -void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) { - set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory)); -} - void JSArrayBuffer::clear_padding() { if (FIELD_SIZE(kOptionalPaddingOffset) != 0) { DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset)); @@ -105,6 +81,8 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_detachable, JSArrayBuffer::IsDetachableBit) BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_detached, JSArrayBuffer::WasDetachedBit) +BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory, + JSArrayBuffer::IsAsmJsMemoryBit) BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared, JSArrayBuffer::IsSharedBit) @@ -136,31 +114,61 @@ void JSTypedArray::set_length(size_t value) { WriteField<size_t>(kLengthOffset, value); } -void* JSTypedArray::external_pointer() const { - return reinterpret_cast<void*>(ReadField<Address>(kExternalPointerOffset)); +Address JSTypedArray::external_pointer() const { + return ReadField<Address>(kExternalPointerOffset); +} + +void JSTypedArray::set_external_pointer(Address value) { + WriteField<Address>(kExternalPointerOffset, value); } -void JSTypedArray::set_external_pointer(void* value) { - WriteField<Address>(kExternalPointerOffset, reinterpret_cast<Address>(value)); +Address JSTypedArray::ExternalPointerCompensationForOnHeapArray( + Isolate* isolate) { +#ifdef V8_COMPRESS_POINTERS + return GetIsolateRoot(isolate); +#else + return 0; +#endif +} + +void JSTypedArray::RemoveExternalPointerCompensationForSerialization() { + DCHECK(is_on_heap()); + Isolate* isolate = GetIsolateForPtrCompr(*this); + set_external_pointer(external_pointer() - + ExternalPointerCompensationForOnHeapArray(isolate)); } ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset) void* JSTypedArray::DataPtr() { - return reinterpret_cast<void*>( - base_pointer().ptr() + reinterpret_cast<intptr_t>(external_pointer())); + // Zero-extend Tagged_t to Address according to current compression scheme + // so that the addition with |external_pointer| (which already contains + // compensated offset value) will decompress the tagged value. + // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for details. + return reinterpret_cast<void*>(external_pointer() + + static_cast<Tagged_t>(base_pointer().ptr())); +} + +void JSTypedArray::SetOffHeapDataPtr(void* base, Address offset) { + set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER); + Address address = reinterpret_cast<Address>(base) + offset; + set_external_pointer(address); + DCHECK_EQ(address, reinterpret_cast<Address>(DataPtr())); +} + +void JSTypedArray::SetOnHeapDataPtr(HeapObject base, Address offset) { + set_base_pointer(base); + Isolate* isolate = GetIsolateForPtrCompr(*this); + set_external_pointer(offset + + ExternalPointerCompensationForOnHeapArray(isolate)); + DCHECK_EQ(base.ptr() + offset, reinterpret_cast<Address>(DataPtr())); } bool JSTypedArray::is_on_heap() const { DisallowHeapAllocation no_gc; // Checking that buffer()->backing_store() is not nullptr is not sufficient; // it will be nullptr when byte_length is 0 as well. - return base_pointer().ptr() == elements().ptr(); -} - -// static -void* JSTypedArray::ExternalPointerForOnHeapArray() { - return reinterpret_cast<void*>(ByteArray::kHeaderSize - kHeapObjectTag); + return base_pointer() == elements(); } // static diff --git a/chromium/v8/src/objects/js-array-buffer.cc b/chromium/v8/src/objects/js-array-buffer.cc index a506920f952..d3f5a0a9520 100644 --- a/chromium/v8/src/objects/js-array-buffer.cc +++ b/chromium/v8/src/objects/js-array-buffer.cc @@ -5,6 +5,7 @@ #include "src/objects/js-array-buffer.h" #include "src/objects/js-array-buffer-inl.h" +#include "src/execution/protectors-inl.h" #include "src/logging/counters.h" #include "src/objects/property-descriptor.h" @@ -31,167 +32,105 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s, *index = result; return true; } - -inline int ConvertToMb(size_t size) { - return static_cast<int>(size / static_cast<size_t>(MB)); -} - } // anonymous namespace -void JSArrayBuffer::Detach() { - CHECK(is_detachable()); - CHECK(!was_detached()); - CHECK(is_external()); - set_backing_store(nullptr); - set_byte_length(0); - set_was_detached(true); - set_is_detachable(false); - // Invalidate the detaching protector. - Isolate* const isolate = GetIsolate(); - if (isolate->IsArrayBufferDetachingIntact()) { - isolate->InvalidateArrayBufferDetachingProtector(); +void JSArrayBuffer::Setup(SharedFlag shared, + std::shared_ptr<BackingStore> backing_store) { + clear_padding(); + set_bit_field(0); + set_is_shared(shared == SharedFlag::kShared); + set_is_detachable(shared != SharedFlag::kShared); + for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) { + SetEmbedderField(i, Smi::kZero); + } + if (!backing_store) { + set_backing_store(nullptr); + set_byte_length(0); + } else { + Attach(std::move(backing_store)); } } -void JSArrayBuffer::FreeBackingStoreFromMainThread() { - if (allocation_base() == nullptr) { - return; - } - FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(), - backing_store(), is_wasm_memory()}); - // Zero out the backing store and allocation base to avoid dangling - // pointers. - set_backing_store(nullptr); +void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) { + DCHECK_NOT_NULL(backing_store); + DCHECK_EQ(is_shared(), backing_store->is_shared()); + set_backing_store(backing_store->buffer_start()); + set_byte_length(backing_store->byte_length()); + if (backing_store->is_wasm_memory()) set_is_detachable(false); + if (!backing_store->free_on_destruct()) set_is_external(true); + GetIsolate()->heap()->RegisterBackingStore(*this, std::move(backing_store)); } -// static -void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { - if (allocation.is_wasm_memory) { - wasm::WasmMemoryTracker* memory_tracker = - isolate->wasm_engine()->memory_tracker(); - memory_tracker->FreeWasmMemory(isolate, allocation.backing_store); - } else { - isolate->array_buffer_allocator()->Free(allocation.allocation_base, - allocation.length); +void JSArrayBuffer::Detach(bool force_for_wasm_memory) { + if (was_detached()) return; + + if (force_for_wasm_memory) { + // Skip the is_detachable() check. + } else if (!is_detachable()) { + // Not detachable, do nothing. + return; } -} -void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate, - bool is_external, void* data, size_t byte_length, - SharedFlag shared_flag, bool is_wasm_memory) { - DCHECK_EQ(array_buffer->GetEmbedderFieldCount(), - v8::ArrayBuffer::kEmbedderFieldCount); - DCHECK_LE(byte_length, JSArrayBuffer::kMaxByteLength); - for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) { - array_buffer->SetEmbedderField(i, Smi::kZero); + Isolate* const isolate = GetIsolate(); + if (backing_store()) { + auto backing_store = isolate->heap()->UnregisterBackingStore(*this); + CHECK_IMPLIES(force_for_wasm_memory, backing_store->is_wasm_memory()); } - array_buffer->set_byte_length(byte_length); - array_buffer->set_bit_field(0); - array_buffer->clear_padding(); - array_buffer->set_is_external(is_external); - array_buffer->set_is_detachable(shared_flag == SharedFlag::kNotShared); - array_buffer->set_is_shared(shared_flag == SharedFlag::kShared); - array_buffer->set_is_wasm_memory(is_wasm_memory); - // Initialize backing store at last to avoid handling of |JSArrayBuffers| that - // are currently being constructed in the |ArrayBufferTracker|. The - // registration method below handles the case of registering a buffer that has - // already been promoted. - array_buffer->set_backing_store(data); - if (data && !is_external) { - isolate->heap()->RegisterNewArrayBuffer(*array_buffer); + if (Protectors::IsArrayBufferDetachingIntact(isolate)) { + Protectors::InvalidateArrayBufferDetaching(isolate); } -} -void JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer> array_buffer, - Isolate* isolate) { - Setup(array_buffer, isolate, false, nullptr, 0, SharedFlag::kNotShared); + DCHECK(!is_shared()); + DCHECK(!is_asmjs_memory()); + set_backing_store(nullptr); + set_byte_length(0); + set_was_detached(true); } -bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer, - Isolate* isolate, - size_t allocated_length, - bool initialize, - SharedFlag shared_flag) { - void* data; - CHECK_NOT_NULL(isolate->array_buffer_allocator()); - if (allocated_length != 0) { - if (allocated_length >= MB) - isolate->counters()->array_buffer_big_allocations()->AddSample( - ConvertToMb(allocated_length)); - if (shared_flag == SharedFlag::kShared) - isolate->counters()->shared_array_allocations()->AddSample( - ConvertToMb(allocated_length)); - if (initialize) { - data = isolate->array_buffer_allocator()->Allocate(allocated_length); - } else { - data = isolate->array_buffer_allocator()->AllocateUninitialized( - allocated_length); - } - if (data == nullptr) { - isolate->counters()->array_buffer_new_size_failures()->AddSample( - ConvertToMb(allocated_length)); - SetupAsEmpty(array_buffer, isolate); - return false; - } - } else { - data = nullptr; - } - - const bool is_external = false; - JSArrayBuffer::Setup(array_buffer, isolate, is_external, data, - allocated_length, shared_flag); - return true; +std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() { + return GetIsolate()->heap()->LookupBackingStore(*this); } -Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer( - Handle<JSTypedArray> typed_array) { - DCHECK(typed_array->is_on_heap()); +Handle<JSArrayBuffer> JSTypedArray::GetBuffer() { + Isolate* isolate = GetIsolate(); + Handle<JSTypedArray> self(*this, isolate); + DCHECK(IsTypedArrayElementsKind(self->GetElementsKind())); + + Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(self->buffer()), + isolate); + if (!is_on_heap()) { + // Already is off heap, so return the existing buffer. + return array_buffer; + } - Isolate* isolate = typed_array->GetIsolate(); + // The existing array buffer should be empty. + DCHECK_NULL(array_buffer->backing_store()); - DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind())); + // Allocate a new backing store and attach it to the existing array buffer. + size_t byte_length = self->byte_length(); + auto backing_store = + BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared, + InitializedFlag::kUninitialized); - Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()), - isolate); - // This code does not know how to materialize from wasm buffers. - DCHECK(!buffer->is_wasm_memory()); + if (!backing_store) { + isolate->heap()->FatalProcessOutOfMemory("JSTypedArray::GetBuffer"); + } - void* backing_store = - isolate->array_buffer_allocator()->AllocateUninitialized( - typed_array->byte_length()); - if (backing_store == nullptr) { - isolate->heap()->FatalProcessOutOfMemory( - "JSTypedArray::MaterializeArrayBuffer"); + // Copy the elements into the backing store of the array buffer. + if (byte_length > 0) { + memcpy(backing_store->buffer_start(), self->DataPtr(), byte_length); } - buffer->set_is_external(false); - DCHECK_EQ(buffer->byte_length(), typed_array->byte_length()); - // Initialize backing store at last to avoid handling of |JSArrayBuffers| that - // are currently being constructed in the |ArrayBufferTracker|. The - // registration method below handles the case of registering a buffer that has - // already been promoted. - buffer->set_backing_store(backing_store); - // RegisterNewArrayBuffer expects a valid length for adjusting counters. - isolate->heap()->RegisterNewArrayBuffer(*buffer); - memcpy(buffer->backing_store(), typed_array->DataPtr(), - typed_array->byte_length()); - typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array()); - typed_array->set_external_pointer(backing_store); - typed_array->set_base_pointer(Smi::kZero); - DCHECK(!typed_array->is_on_heap()); + // Attach the backing store to the array buffer. + array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store)); - return buffer; -} + // Clear the elements of the typed array. + self->set_elements(ReadOnlyRoots(isolate).empty_byte_array()); + self->SetOffHeapDataPtr(array_buffer->backing_store(), 0); + DCHECK(!self->is_on_heap()); -Handle<JSArrayBuffer> JSTypedArray::GetBuffer() { - if (!is_on_heap()) { - Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()), - GetIsolate()); - return array_buffer; - } - Handle<JSTypedArray> self(*this, GetIsolate()); - return MaterializeArrayBuffer(self); + return array_buffer; } // ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc diff --git a/chromium/v8/src/objects/js-array-buffer.h b/chromium/v8/src/objects/js-array-buffer.h index 7bf2e1ae94b..71adb42ae84 100644 --- a/chromium/v8/src/objects/js-array-buffer.h +++ b/chromium/v8/src/objects/js-array-buffer.h @@ -5,6 +5,7 @@ #ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_ #define V8_OBJECTS_JS_ARRAY_BUFFER_H_ +#include "src/objects/backing-store.h" #include "src/objects/js-objects.h" // Has to be the last include (doesn't have include guards): @@ -13,9 +14,6 @@ namespace v8 { namespace internal { -// Whether a JSArrayBuffer is a SharedArrayBuffer or not. -enum class SharedFlag : uint32_t { kNotShared, kShared }; - class JSArrayBuffer : public JSObject { public: // The maximum length for JSArrayBuffer's supported by V8. @@ -51,8 +49,8 @@ class JSArrayBuffer : public JSObject { V(IsExternalBit, bool, 1, _) \ V(IsDetachableBit, bool, 1, _) \ V(WasDetachedBit, bool, 1, _) \ - V(IsSharedBit, bool, 1, _) \ - V(IsWasmMemoryBit, bool, 1, _) + V(IsAsmJsMemoryBit, bool, 1, _) \ + V(IsSharedBit, bool, 1, _) DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS) #undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS @@ -61,57 +59,45 @@ class JSArrayBuffer : public JSObject { // memory block once all ArrayBuffers referencing it are collected by the GC. DECL_BOOLEAN_ACCESSORS(is_external) - // [is_detachable]: false indicates that this buffer cannot be detached. + // [is_detachable]: false => this buffer cannot be detached. DECL_BOOLEAN_ACCESSORS(is_detachable) - // [was_detached]: true if the buffer was previously detached. + // [was_detached]: true => the buffer was previously detached. DECL_BOOLEAN_ACCESSORS(was_detached) + // [is_asmjs_memory]: true => this buffer was once used as asm.js memory. + DECL_BOOLEAN_ACCESSORS(is_asmjs_memory) + // [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer. DECL_BOOLEAN_ACCESSORS(is_shared) - // [is_wasm_memory]: whether the buffer is tracked by the WasmMemoryTracker. - DECL_BOOLEAN_ACCESSORS(is_wasm_memory) - DECL_CAST(JSArrayBuffer) - void Detach(); - - struct Allocation { - Allocation(void* allocation_base, size_t length, void* backing_store, - bool is_wasm_memory) - : allocation_base(allocation_base), - length(length), - backing_store(backing_store), - is_wasm_memory(is_wasm_memory) {} - - void* allocation_base; - size_t length; - void* backing_store; - bool is_wasm_memory; - }; - - V8_EXPORT_PRIVATE void FreeBackingStoreFromMainThread(); - V8_EXPORT_PRIVATE static void FreeBackingStore(Isolate* isolate, - Allocation allocation); - - V8_EXPORT_PRIVATE static void Setup( - Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external, - void* data, size_t allocated_length, - SharedFlag shared_flag = SharedFlag::kNotShared, - bool is_wasm_memory = false); - - // Initialize the object as empty one to avoid confusing heap verifier if - // the failure happened in the middle of JSArrayBuffer construction. - V8_EXPORT_PRIVATE static void SetupAsEmpty(Handle<JSArrayBuffer> array_buffer, - Isolate* isolate); - - // Returns false if array buffer contents could not be allocated. - // In this case, |array_buffer| will not be set up. - V8_EXPORT_PRIVATE static bool SetupAllocatingData( - Handle<JSArrayBuffer> array_buffer, Isolate* isolate, - size_t allocated_length, bool initialize = true, - SharedFlag shared_flag = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT; + // Initializes the fields of the ArrayBuffer. The provided backing_store can + // be nullptr. If it is not nullptr, then the function registers it with + // src/heap/array-buffer-tracker.h. + V8_EXPORT_PRIVATE void Setup(SharedFlag shared, + std::shared_ptr<BackingStore> backing_store); + + // Attaches the backing store to an already constructed empty ArrayBuffer. + // This is intended to be used only in ArrayBufferConstructor builtin. + V8_EXPORT_PRIVATE void Attach(std::shared_ptr<BackingStore> backing_store); + // Detach the backing store from this array buffer if it is detachable. + // This sets the internal pointer and length to 0 and unregisters the backing + // store from the array buffer tracker. If the array buffer is not detachable, + // this is a nop. + // + // Array buffers that wrap wasm memory objects are special in that they + // are normally not detachable, but can become detached as a side effect + // of growing the underlying memory object. The {force_for_wasm_memory} flag + // is used by the implementation of Wasm memory growth in order to bypass the + // non-detachable check. + V8_EXPORT_PRIVATE void Detach(bool force_for_wasm_memory = false); + + // Get a reference to backing store of this array buffer, if there is a + // backing store. Returns nullptr if there is no backing store (e.g. detached + // or a zero-length array buffer). + std::shared_ptr<BackingStore> GetBackingStore(); // Dispatched behavior. DECL_PRINTER(JSArrayBuffer) @@ -187,12 +173,6 @@ class JSTypedArray : public JSArrayBufferView { // [length]: length of typed array in elements. DECL_PRIMITIVE_ACCESSORS(length, size_t) - // [external_pointer]: TODO(v8:4153) - DECL_PRIMITIVE_ACCESSORS(external_pointer, void*) - - // [base_pointer]: TODO(v8:4153) - DECL_ACCESSORS(base_pointer, Object) - // ES6 9.4.5.3 V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty( Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key, @@ -208,10 +188,26 @@ class JSTypedArray : public JSArrayBufferView { // Use with care: returns raw pointer into heap. inline void* DataPtr(); + inline void SetOffHeapDataPtr(void* base, Address offset); + inline void SetOnHeapDataPtr(HeapObject base, Address offset); + // Whether the buffer's backing store is on-heap or off-heap. inline bool is_on_heap() const; - static inline void* ExternalPointerForOnHeapArray(); + // Note: this is a pointer compression specific optimization. + // Normally, on-heap typed arrays contain HeapObject value in |base_pointer| + // field and an offset in |external_pointer|. + // When pointer compression is enabled we want to combine decompression with + // the offset addition. In order to do that we add an isolate root to the + // |external_pointer| value and therefore the data pointer computation can + // is a simple addition of a (potentially sign-extended) |base_pointer| loaded + // as Tagged_t value and an |external_pointer| value. + // For full-pointer mode the compensation value is zero. + static inline Address ExternalPointerCompensationForOnHeapArray( + Isolate* isolate); + + // Subtracts external pointer compensation from the external pointer value. + inline void RemoveExternalPointerCompensationForSerialization(); static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate, Handle<Object> receiver, @@ -250,8 +246,13 @@ class JSTypedArray : public JSArrayBufferView { #endif private: - static Handle<JSArrayBuffer> MaterializeArrayBuffer( - Handle<JSTypedArray> typed_array); + friend class Deserializer; + + // [base_pointer]: TODO(v8:4153) + DECL_ACCESSORS(base_pointer, Object) + + // [external_pointer]: TODO(v8:4153) + DECL_PRIMITIVE_ACCESSORS(external_pointer, Address) OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView); }; diff --git a/chromium/v8/src/objects/js-array.h b/chromium/v8/src/objects/js-array.h index eb581c104e0..c990151b275 100644 --- a/chromium/v8/src/objects/js-array.h +++ b/chromium/v8/src/objects/js-array.h @@ -108,7 +108,7 @@ class JSArray : public JSObject { static const int kPreallocatedArrayElements = 4; DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSARRAY_FIELDS) + TORQUE_GENERATED_JS_ARRAY_FIELDS) static const int kLengthDescriptorIndex = 0; @@ -178,7 +178,7 @@ class JSArrayIterator : public JSObject { inline void set_kind(IterationKind kind); DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSARRAY_ITERATOR_FIELDS) + TORQUE_GENERATED_JS_ARRAY_ITERATOR_FIELDS) private: DECL_INT_ACCESSORS(raw_kind) diff --git a/chromium/v8/src/objects/js-break-iterator.cc b/chromium/v8/src/objects/js-break-iterator.cc index 31ed3f86117..1a9d0964118 100644 --- a/chromium/v8/src/objects/js-break-iterator.cc +++ b/chromium/v8/src/objects/js-break-iterator.cc @@ -17,7 +17,7 @@ namespace internal { MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New( Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options_obj) { + Handle<Object> options_obj, const char* service) { Factory* factory = isolate->factory(); // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales). @@ -31,15 +31,14 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New( if (options_obj->IsUndefined(isolate)) { options = factory->NewJSObjectWithNullProto(); } else { - ASSIGN_RETURN_ON_EXCEPTION( - isolate, options, - Object::ToObject(isolate, options_obj, "Intl.JSV8BreakIterator"), - JSV8BreakIterator); + ASSIGN_RETURN_ON_EXCEPTION(isolate, options, + Object::ToObject(isolate, options_obj, service), + JSV8BreakIterator); } // Extract locale string Maybe<Intl::MatcherOption> maybe_locale_matcher = - Intl::GetLocaleMatcher(isolate, options, "Intl.JSV8BreakIterator"); + Intl::GetLocaleMatcher(isolate, options, service); MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSV8BreakIterator>()); Intl::MatcherOption matcher = maybe_locale_matcher.FromJust(); @@ -49,7 +48,7 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New( // Extract type from options Maybe<Type> maybe_type = Intl::GetStringOption<Type>( - isolate, options, "type", "Intl.v8BreakIterator", + isolate, options, "type", service, {"word", "character", "sentence", "line"}, {Type::WORD, Type::CHARACTER, Type::SENTENCE, Type::LINE}, Type::WORD); MAYBE_RETURN(maybe_type, MaybeHandle<JSV8BreakIterator>()); diff --git a/chromium/v8/src/objects/js-break-iterator.h b/chromium/v8/src/objects/js-break-iterator.h index 4b40192c813..ea66fe6732b 100644 --- a/chromium/v8/src/objects/js-break-iterator.h +++ b/chromium/v8/src/objects/js-break-iterator.h @@ -31,7 +31,7 @@ class JSV8BreakIterator : public JSObject { public: V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> New( Isolate* isolate, Handle<Map> map, Handle<Object> input_locales, - Handle<Object> input_options); + Handle<Object> input_options, const char* service); static Handle<JSObject> ResolvedOptions( Isolate* isolate, Handle<JSV8BreakIterator> break_iterator); @@ -72,7 +72,7 @@ class JSV8BreakIterator : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSV8BREAK_ITERATOR_FIELDS) + TORQUE_GENERATED_JS_V8_BREAK_ITERATOR_FIELDS) private: DECL_INT_ACCESSORS(raw_type) diff --git a/chromium/v8/src/objects/js-collator.cc b/chromium/v8/src/objects/js-collator.cc index 0413e2acd1e..39178b3acf3 100644 --- a/chromium/v8/src/objects/js-collator.cc +++ b/chromium/v8/src/objects/js-collator.cc @@ -243,7 +243,8 @@ void SetCaseFirstOption(icu::Collator* icu_collator, // static MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options_obj) { + Handle<Object> options_obj, + const char* service) { // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales). Maybe<std::vector<std::string>> maybe_requested_locales = Intl::CanonicalizeLocaleList(isolate, locales); @@ -258,9 +259,9 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, } else { // 3. Else // 3. a. Let options be ? ToObject(options). - ASSIGN_RETURN_ON_EXCEPTION( - isolate, options_obj, - Object::ToObject(isolate, options_obj, "Intl.Collator"), JSCollator); + ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj, + Object::ToObject(isolate, options_obj, service), + JSCollator); } // At this point, options_obj can either be a JSObject or a JSProxy only. @@ -269,7 +270,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, // 4. Let usage be ? GetOption(options, "usage", "string", « "sort", // "search" », "sort"). Maybe<Usage> maybe_usage = Intl::GetStringOption<Usage>( - isolate, options, "usage", "Intl.Collator", {"sort", "search"}, + isolate, options, "usage", service, {"sort", "search"}, {Usage::SORT, Usage::SEARCH}, Usage::SORT); MAYBE_RETURN(maybe_usage, MaybeHandle<JSCollator>()); Usage usage = maybe_usage.FromJust(); @@ -278,7 +279,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, // « "lookup", "best fit" », "best fit"). // 10. Set opt.[[localeMatcher]] to matcher. Maybe<Intl::MatcherOption> maybe_locale_matcher = - Intl::GetLocaleMatcher(isolate, options, "Intl.Collator"); + Intl::GetLocaleMatcher(isolate, options, service); MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSCollator>()); Intl::MatcherOption matcher = maybe_locale_matcher.FromJust(); @@ -293,14 +294,14 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, // // 13. Set opt.[[kn]] to numeric. bool numeric; - Maybe<bool> found_numeric = Intl::GetBoolOption(isolate, options, "numeric", - "Intl.Collator", &numeric); + Maybe<bool> found_numeric = + Intl::GetBoolOption(isolate, options, "numeric", service, &numeric); MAYBE_RETURN(found_numeric, MaybeHandle<JSCollator>()); // 14. Let caseFirst be ? GetOption(options, "caseFirst", "string", // « "upper", "lower", "false" », undefined). Maybe<Intl::CaseFirst> maybe_case_first = - Intl::GetCaseFirst(isolate, options, "Intl.Collator"); + Intl::GetCaseFirst(isolate, options, service); MAYBE_RETURN(maybe_case_first, MaybeHandle<JSCollator>()); Intl::CaseFirst case_first = maybe_case_first.FromJust(); @@ -411,7 +412,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, // 24. Let sensitivity be ? GetOption(options, "sensitivity", // "string", « "base", "accent", "case", "variant" », undefined). Maybe<Sensitivity> maybe_sensitivity = Intl::GetStringOption<Sensitivity>( - isolate, options, "sensitivity", "Intl.Collator", + isolate, options, "sensitivity", service, {"base", "accent", "case", "variant"}, {Sensitivity::kBase, Sensitivity::kAccent, Sensitivity::kCase, Sensitivity::kVariant}, @@ -451,9 +452,8 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map, // 27.Let ignorePunctuation be ? GetOption(options, // "ignorePunctuation", "boolean", undefined, false). bool ignore_punctuation; - Maybe<bool> found_ignore_punctuation = - Intl::GetBoolOption(isolate, options, "ignorePunctuation", - "Intl.Collator", &ignore_punctuation); + Maybe<bool> found_ignore_punctuation = Intl::GetBoolOption( + isolate, options, "ignorePunctuation", service, &ignore_punctuation); MAYBE_RETURN(found_ignore_punctuation, MaybeHandle<JSCollator>()); // 28. Set collator.[[IgnorePunctuation]] to ignorePunctuation. diff --git a/chromium/v8/src/objects/js-collator.h b/chromium/v8/src/objects/js-collator.h index e9114afeb1e..0147b80ebb0 100644 --- a/chromium/v8/src/objects/js-collator.h +++ b/chromium/v8/src/objects/js-collator.h @@ -34,7 +34,7 @@ class JSCollator : public JSObject { // ecma402/#sec-initializecollator V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> New( Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options); + Handle<Object> options, const char* service); // ecma402/#sec-intl.collator.prototype.resolvedoptions static Handle<JSObject> ResolvedOptions(Isolate* isolate, @@ -48,7 +48,7 @@ class JSCollator : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSCOLLATOR_FIELDS) + TORQUE_GENERATED_JS_COLLATOR_FIELDS) DECL_ACCESSORS(icu_collator, Managed<icu::Collator>) DECL_ACCESSORS(bound_compare, Object) diff --git a/chromium/v8/src/objects/js-collection-iterator.h b/chromium/v8/src/objects/js-collection-iterator.h index b193aa84cdd..0a408376776 100644 --- a/chromium/v8/src/objects/js-collection-iterator.h +++ b/chromium/v8/src/objects/js-collection-iterator.h @@ -22,6 +22,10 @@ class JSCollectionIterator public: void JSCollectionIteratorPrint(std::ostream& os, const char* name); + // JSCollectionIterator is abstract, but also defines the size for all of its + // concrete subclasses. + static constexpr int kSize = kHeaderSize; + TQ_OBJECT_CONSTRUCTORS(JSCollectionIterator) }; diff --git a/chromium/v8/src/objects/js-date-time-format.cc b/chromium/v8/src/objects/js-date-time-format.cc index 29fcfb0d7cb..835f3dc43ab 100644 --- a/chromium/v8/src/objects/js-date-time-format.cc +++ b/chromium/v8/src/objects/js-date-time-format.cc @@ -79,16 +79,6 @@ static std::vector<PatternItem> BuildPatternItems() { kNarrowLongShort), PatternItem("year", {{"yy", "2-digit"}, {"y", "numeric"}}, k2DigitNumeric)}; - if (FLAG_harmony_intl_dateformat_quarter) { - items.push_back(PatternItem("quarter", - {{"QQQQQ", "narrow"}, - {"QQQQ", "long"}, - {"QQQ", "short"}, - {"qqqqq", "narrow"}, - {"qqqq", "long"}, - {"qqq", "short"}}, - kNarrowLongShort)); - } // Sometimes we get L instead of M for month - standalone name. items.push_back(PatternItem("month", {{"MMMMM", "narrow"}, @@ -641,7 +631,8 @@ Isolate::ICUObjectCacheType ConvertToCacheType( MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime( Isolate* isolate, Handle<Object> date, Handle<Object> locales, - Handle<Object> options, RequiredOption required, DefaultsOption defaults) { + Handle<Object> options, RequiredOption required, DefaultsOption defaults, + const char* method) { Isolate::ICUObjectCacheType cache_type = ConvertToCacheType(defaults); Factory* factory = isolate->factory(); @@ -691,7 +682,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime( Handle<JSDateTimeFormat> date_time_format; ASSIGN_RETURN_ON_EXCEPTION( isolate, date_time_format, - JSDateTimeFormat::New(isolate, map, locales, internal_options), String); + JSDateTimeFormat::New(isolate, map, locales, internal_options, method), + String); if (can_cache) { isolate->set_icu_object_in_cache( @@ -775,13 +767,10 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions( // 4. If required is "date" or "any", then if (required == RequiredOption::kAny || required == RequiredOption::kDate) { - // a. For each of the property names "weekday", "year", "quarter", "month", + // a. For each of the property names "weekday", "year", "month", // "day", do std::vector<Handle<String>> list( {factory->weekday_string(), factory->year_string()}); - if (FLAG_harmony_intl_dateformat_quarter) { - list.push_back(factory->quarter_string()); - } list.push_back(factory->month_string()); list.push_back(factory->day_string()); Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list); @@ -941,7 +930,7 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale, std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat( const icu::Locale& icu_locale, const icu::UnicodeString& skeleton, - icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references) + icu::DateTimePatternGenerator* generator) { // See https://github.com/tc39/ecma402/issues/225 . The best pattern // generation needs to be done in the base locale according to the // current spec however odd it may be. See also crbug.com/826549 . @@ -954,8 +943,8 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat( // has to be discussed. Revisit once the spec is clarified/revised. icu::UnicodeString pattern; UErrorCode status = U_ZERO_ERROR; - pattern = generator.getBestPattern(skeleton, UDATPG_MATCH_HOUR_FIELD_LENGTH, - status); + pattern = generator->getBestPattern(skeleton, UDATPG_MATCH_HOUR_FIELD_LENGTH, + status); CHECK(U_SUCCESS(status)); // Make formatter from skeleton. Calendar and numbering system are added @@ -971,9 +960,9 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat( class DateFormatCache { public: - icu::SimpleDateFormat* Create( - const icu::Locale& icu_locale, const icu::UnicodeString& skeleton, - icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references) + icu::SimpleDateFormat* Create(const icu::Locale& icu_locale, + const icu::UnicodeString& skeleton, + icu::DateTimePatternGenerator* generator) { std::string key; skeleton.toUTF8String<std::string>(key); key += ":"; @@ -1002,7 +991,7 @@ class DateFormatCache { std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache( const icu::Locale& icu_locale, const icu::UnicodeString& skeleton, - icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references) + icu::DateTimePatternGenerator* generator) { static base::LazyInstance<DateFormatCache>::type cache = LAZY_INSTANCE_INITIALIZER; return std::unique_ptr<icu::SimpleDateFormat>( @@ -1138,8 +1127,7 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input, std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern( JSDateTimeFormat::DateTimeStyle date_style, JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale, - Intl::HourCycle hc, - icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references) + Intl::HourCycle hc, icu::DateTimePatternGenerator* generator) { std::unique_ptr<icu::SimpleDateFormat> result; if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) { if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) { @@ -1164,10 +1152,40 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern( UNREACHABLE(); } } + + UErrorCode status = U_ZERO_ERROR; + // Somehow we fail to create the instance. + if (result.get() == nullptr) { + icu::Locale modified_locale(icu_locale); + // Fallback to the locale without "nu". + if (!icu_locale.getUnicodeKeywordValue<std::string>("nu", status).empty()) { + status = U_ZERO_ERROR; + modified_locale.setUnicodeKeywordValue("nu", nullptr, status); + return DateTimeStylePattern(date_style, time_style, modified_locale, hc, + generator); + } + status = U_ZERO_ERROR; + // Fallback to the locale without "hc". + if (!icu_locale.getUnicodeKeywordValue<std::string>("hc", status).empty()) { + status = U_ZERO_ERROR; + modified_locale.setUnicodeKeywordValue("hc", nullptr, status); + return DateTimeStylePattern(date_style, time_style, modified_locale, hc, + generator); + } + status = U_ZERO_ERROR; + // Fallback to the locale without "ca". + if (!icu_locale.getUnicodeKeywordValue<std::string>("ca", status).empty()) { + status = U_ZERO_ERROR; + modified_locale.setUnicodeKeywordValue("ca", nullptr, status); + return DateTimeStylePattern(date_style, time_style, modified_locale, hc, + generator); + } + return nullptr; + } icu::UnicodeString pattern; pattern = result->toPattern(pattern); - UErrorCode status = U_ZERO_ERROR; + status = U_ZERO_ERROR; icu::UnicodeString skeleton = icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status); CHECK(U_SUCCESS(status)); @@ -1185,7 +1203,8 @@ class DateTimePatternGeneratorCache { public: // Return a clone copy that the caller have to free. icu::DateTimePatternGenerator* CreateGenerator(const icu::Locale& locale) { - std::string key(locale.getBaseName()); + std::string key(FLAG_harmony_intl_other_calendars ? locale.getName() + : locale.getBaseName()); base::MutexGuard guard(&mutex_); auto it = map_.find(key); if (it != map_.end()) { @@ -1193,7 +1212,8 @@ class DateTimePatternGeneratorCache { } UErrorCode status = U_ZERO_ERROR; map_[key].reset(icu::DateTimePatternGenerator::createInstance( - icu::Locale(key.c_str()), status)); + FLAG_harmony_intl_other_calendars ? locale : icu::Locale(key.c_str()), + status)); // Fallback to use "root". if (U_FAILURE(status)) { status = U_ZERO_ERROR; @@ -1216,7 +1236,7 @@ enum FormatMatcherOption { kBestFit, kBasic }; // ecma402/#sec-initializedatetimeformat MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> input_options) { + Handle<Object> input_options, const char* service) { Factory* factory = isolate->factory(); // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales). Maybe<std::vector<std::string>> maybe_requested_locales = @@ -1235,6 +1255,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // 4. Let matcher be ? GetOption(options, "localeMatcher", "string", // « "lookup", "best fit" », "best fit"). // 5. Set opt.[[localeMatcher]] to matcher. + Maybe<Intl::MatcherOption> maybe_locale_matcher = + Intl::GetLocaleMatcher(isolate, options, service); + MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>()); + Intl::MatcherOption locale_matcher = maybe_locale_matcher.FromJust(); std::unique_ptr<char[]> calendar_str = nullptr; std::unique_ptr<char[]> numbering_system_str = nullptr; @@ -1242,13 +1266,12 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( const std::vector<const char*> empty_values = {}; // 6. Let calendar be ? GetOption(options, "calendar", // "string", undefined, undefined). - Maybe<bool> maybe_calendar = - Intl::GetStringOption(isolate, options, "calendar", empty_values, - "Intl.NumberFormat", &calendar_str); + Maybe<bool> maybe_calendar = Intl::GetStringOption( + isolate, options, "calendar", empty_values, service, &calendar_str); MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>()); if (maybe_calendar.FromJust() && calendar_str != nullptr) { icu::Locale default_locale; - if (!Intl::IsValidCalendar(default_locale, calendar_str.get())) { + if (!Intl::IsWellFormedCalendar(calendar_str.get())) { THROW_NEW_ERROR( isolate, NewRangeError( @@ -1261,26 +1284,21 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // 8. Let numberingSystem be ? GetOption(options, "numberingSystem", // "string", undefined, undefined). Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem( - isolate, options, "Intl.NumberFormat", &numbering_system_str); + isolate, options, service, &numbering_system_str); MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>()); } - Maybe<Intl::MatcherOption> maybe_locale_matcher = - Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat"); - MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>()); - Intl::MatcherOption locale_matcher = maybe_locale_matcher.FromJust(); - // 6. Let hour12 be ? GetOption(options, "hour12", "boolean", undefined, // undefined). bool hour12; - Maybe<bool> maybe_get_hour12 = Intl::GetBoolOption( - isolate, options, "hour12", "Intl.DateTimeFormat", &hour12); + Maybe<bool> maybe_get_hour12 = + Intl::GetBoolOption(isolate, options, "hour12", service, &hour12); MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>()); // 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11", // "h12", "h23", "h24" », undefined). Maybe<Intl::HourCycle> maybe_hour_cycle = - Intl::GetHourCycle(isolate, options, "Intl.DateTimeFormat"); + Intl::GetHourCycle(isolate, options, service); MAYBE_RETURN(maybe_hour_cycle, MaybeHandle<JSDateTimeFormat>()); Intl::HourCycle hour_cycle = maybe_hour_cycle.FromJust(); @@ -1309,12 +1327,14 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( DCHECK(!icu_locale.isBogus()); UErrorCode status = U_ZERO_ERROR; - if (calendar_str != nullptr) { + if (calendar_str != nullptr && + Intl::IsValidCalendar(icu_locale, calendar_str.get())) { icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status); CHECK(U_SUCCESS(status)); } - if (numbering_system_str != nullptr) { + if (numbering_system_str != nullptr && + Intl::IsValidNumberingSystem(numbering_system_str.get())) { icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status); CHECK(U_SUCCESS(status)); } @@ -1322,9 +1342,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // 17. Let timeZone be ? Get(options, "timeZone"). const std::vector<const char*> empty_values; std::unique_ptr<char[]> timezone = nullptr; - Maybe<bool> maybe_timezone = - Intl::GetStringOption(isolate, options, "timeZone", empty_values, - "Intl.DateTimeFormat", &timezone); + Maybe<bool> maybe_timezone = Intl::GetStringOption( + isolate, options, "timeZone", empty_values, service, &timezone); MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>()); std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get()); @@ -1409,43 +1428,40 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( DateTimeStyle time_style = DateTimeStyle::kUndefined; std::unique_ptr<icu::SimpleDateFormat> icu_date_format; - if (FLAG_harmony_intl_datetime_style) { - // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", « - // "full", "long", "medium", "short" », undefined). - Maybe<DateTimeStyle> maybe_date_style = - Intl::GetStringOption<DateTimeStyle>( - isolate, options, "dateStyle", "Intl.DateTimeFormat", - {"full", "long", "medium", "short"}, - {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium, - DateTimeStyle::kShort}, - DateTimeStyle::kUndefined); - MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>()); - // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to - // dateStyle. - date_style = maybe_date_style.FromJust(); - - // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", « - // "full", "long", "medium", "short" »). - Maybe<DateTimeStyle> maybe_time_style = - Intl::GetStringOption<DateTimeStyle>( - isolate, options, "timeStyle", "Intl.DateTimeFormat", - {"full", "long", "medium", "short"}, - {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium, - DateTimeStyle::kShort}, - DateTimeStyle::kUndefined); - MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>()); - - // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to - // timeStyle. - time_style = maybe_time_style.FromJust(); - - // 32. If dateStyle or timeStyle are not undefined, then - if (date_style != DateTimeStyle::kUndefined || - time_style != DateTimeStyle::kUndefined) { - icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale, - hc, *generator); - } + // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", « + // "full", "long", "medium", "short" », undefined). + Maybe<DateTimeStyle> maybe_date_style = Intl::GetStringOption<DateTimeStyle>( + isolate, options, "dateStyle", service, + {"full", "long", "medium", "short"}, + {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium, + DateTimeStyle::kShort}, + DateTimeStyle::kUndefined); + MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>()); + // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to + // dateStyle. + date_style = maybe_date_style.FromJust(); + + // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", « + // "full", "long", "medium", "short" »). + Maybe<DateTimeStyle> maybe_time_style = Intl::GetStringOption<DateTimeStyle>( + isolate, options, "timeStyle", service, + {"full", "long", "medium", "short"}, + {DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium, + DateTimeStyle::kShort}, + DateTimeStyle::kUndefined); + MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>()); + + // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to + // timeStyle. + time_style = maybe_time_style.FromJust(); + + // 32. If dateStyle or timeStyle are not undefined, then + if (date_style != DateTimeStyle::kUndefined || + time_style != DateTimeStyle::kUndefined) { + icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale, + hc, generator.get()); } + // 33. Else, if (icu_date_format.get() == nullptr) { bool has_hour_option = false; @@ -1456,9 +1472,9 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // i. Let prop be the name given in the Property column of the row. // ii. Let value be ? GetOption(options, prop, "string", « the strings // given in the Values column of the row », undefined). - Maybe<bool> maybe_get_option = Intl::GetStringOption( - isolate, options, item.property.c_str(), item.allowed_values, - "Intl.DateTimeFormat", &input); + Maybe<bool> maybe_get_option = + Intl::GetStringOption(isolate, options, item.property.c_str(), + item.allowed_values, service, &input); MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>()); if (maybe_get_option.FromJust()) { if (item.property == "hour") { @@ -1487,8 +1503,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // « "basic", "best fit" », "best fit"). Maybe<FormatMatcherOption> maybe_format_matcher = Intl::GetStringOption<FormatMatcherOption>( - isolate, options, "formatMatcher", "Intl.DateTimeFormat", - {"best fit", "basic"}, + isolate, options, "formatMatcher", service, {"best fit", "basic"}, {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic}, FormatMatcherOption::kBestFit); MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>()); @@ -1496,13 +1511,13 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust(); icu::UnicodeString skeleton_ustr(skeleton.c_str()); - icu_date_format = - CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator); + icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, + generator.get()); if (icu_date_format.get() == nullptr) { // Remove extensions and try again. icu_locale = icu::Locale(icu_locale.getBaseName()); - icu_date_format = - CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, *generator); + icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr, + generator.get()); if (icu_date_format.get() == nullptr) { FATAL("Failed to create ICU date format, are ICU data files missing?"); } @@ -1561,12 +1576,16 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New( isolate->factory()->NewFastOrSlowJSObjectFromMap(map)); DisallowHeapAllocation no_gc; date_time_format->set_flags(0); - date_time_format->set_hour_cycle(hc); if (date_style != DateTimeStyle::kUndefined) { date_time_format->set_date_style(date_style); } if (time_style != DateTimeStyle::kUndefined) { date_time_format->set_time_style(time_style); + date_time_format->set_hour_cycle(hc); + } + if ((date_style == DateTimeStyle::kUndefined) && + (time_style == DateTimeStyle::kUndefined)) { + date_time_format->set_hour_cycle(hc); } date_time_format->set_icu_locale(*managed_locale); date_time_format->set_icu_simple_date_format(*managed_format); @@ -1585,11 +1604,9 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) { return isolate->factory()->literal_string(); case UDAT_YEAR_FIELD: case UDAT_EXTENDED_YEAR_FIELD: - case UDAT_YEAR_NAME_FIELD: return isolate->factory()->year_string(); - case UDAT_QUARTER_FIELD: - case UDAT_STANDALONE_QUARTER_FIELD: - return isolate->factory()->quarter_string(); + case UDAT_YEAR_NAME_FIELD: + return isolate->factory()->yearName_string(); case UDAT_MONTH_FIELD: case UDAT_STANDALONE_MONTH_FIELD: return isolate->factory()->month_string(); @@ -1624,6 +1641,11 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) { return isolate->factory()->era_string(); case UDAT_FRACTIONAL_SECOND_FIELD: return isolate->factory()->fractionalSecond_string(); + case UDAT_RELATED_YEAR_FIELD: + return isolate->factory()->relatedYear_string(); + + case UDAT_QUARTER_FIELD: + case UDAT_STANDALONE_QUARTER_FIELD: default: // Other UDAT_*_FIELD's cannot show up because there is no way to specify // them via options of Intl.DateTimeFormat. diff --git a/chromium/v8/src/objects/js-date-time-format.h b/chromium/v8/src/objects/js-date-time-format.h index f4a8ccc8f5c..acf99b36185 100644 --- a/chromium/v8/src/objects/js-date-time-format.h +++ b/chromium/v8/src/objects/js-date-time-format.h @@ -34,7 +34,7 @@ class JSDateTimeFormat : public JSObject { public: V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> New( Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options); + Handle<Object> options, const char* service); V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions( Isolate* isolate, Handle<JSDateTimeFormat> date_time_format); @@ -82,7 +82,8 @@ class JSDateTimeFormat : public JSObject { V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime( Isolate* isolate, Handle<Object> date, Handle<Object> locales, - Handle<Object> options, RequiredOption required, DefaultsOption defaults); + Handle<Object> options, RequiredOption required, DefaultsOption defaults, + const char* method); V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales(); @@ -94,7 +95,7 @@ class JSDateTimeFormat : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSDATE_TIME_FORMAT_FIELDS) + TORQUE_GENERATED_JS_DATE_TIME_FORMAT_FIELDS) inline void set_hour_cycle(Intl::HourCycle hour_cycle); inline Intl::HourCycle hour_cycle() const; diff --git a/chromium/v8/src/objects/js-list-format.cc b/chromium/v8/src/objects/js-list-format.cc index 4f303b18745..90b93e308ad 100644 --- a/chromium/v8/src/objects/js-list-format.cc +++ b/chromium/v8/src/objects/js-list-format.cc @@ -252,40 +252,22 @@ namespace { // Extract String from JSArray into array of UnicodeString Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray( Isolate* isolate, Handle<JSArray> array) { - Factory* factory = isolate->factory(); - // In general, ElementsAccessor::Get actually isn't guaranteed to give us the - // elements in order. But if it is a holey array, it will cause the exception - // with the IsString check. + // Thanks to iterable-to-list preprocessing, we never see dictionary-mode + // arrays here, so the loop below can construct an entry from the index. + DCHECK(array->HasFastElements(isolate)); auto* accessor = array->GetElementsAccessor(); uint32_t length = accessor->NumberOfElements(*array); - // ecma402 #sec-createpartsfromlist - // 2. If list contains any element value such that Type(value) is not String, - // throw a TypeError exception. - // - // Per spec it looks like we're supposed to throw a TypeError exception if the - // item isn't already a string, rather than coercing to a string. std::vector<icu::UnicodeString> result; for (uint32_t i = 0; i < length; i++) { - DCHECK(accessor->HasElement(*array, i)); - Handle<Object> item = accessor->Get(array, i); - DCHECK(!item.is_null()); - if (!item->IsString()) { - THROW_NEW_ERROR_RETURN_VALUE( - isolate, - NewTypeError(MessageTemplate::kArrayItemNotType, - factory->list_string(), - // TODO(ftang): For dictionary-mode arrays, i isn't - // actually the index in the array but the index in the - // dictionary. - factory->NewNumber(i), factory->String_string()), - Nothing<std::vector<icu::UnicodeString>>()); - } + InternalIndex entry(i); + DCHECK(accessor->HasEntry(*array, entry)); + Handle<Object> item = accessor->Get(array, entry); + DCHECK(item->IsString()); Handle<String> item_str = Handle<String>::cast(item); if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str); result.push_back(Intl::ToICUUnicodeString(isolate, item_str)); } - DCHECK(!array->HasDictionaryElements()); return Just(result); } @@ -294,9 +276,6 @@ MaybeHandle<T> FormatListCommon( Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list, MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) { DCHECK(!list->IsUndefined()); - // ecma402 #sec-createpartsfromlist - // 2. If list contains any element value such that Type(value) is not String, - // throw a TypeError exception. Maybe<std::vector<icu::UnicodeString>> maybe_array = ToUnicodeStringArray(isolate, list); MAYBE_RETURN(maybe_array, Handle<T>()); diff --git a/chromium/v8/src/objects/js-list-format.h b/chromium/v8/src/objects/js-list-format.h index df937722e65..1ff76790f95 100644 --- a/chromium/v8/src/objects/js-list-format.h +++ b/chromium/v8/src/objects/js-list-format.h @@ -104,7 +104,7 @@ class JSListFormat : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSLIST_FORMAT_FIELDS) + TORQUE_GENERATED_JS_LIST_FORMAT_FIELDS) OBJECT_CONSTRUCTORS(JSListFormat, JSObject); }; diff --git a/chromium/v8/src/objects/js-locale.cc b/chromium/v8/src/objects/js-locale.cc index 4a66ea9eca7..9e8053b1dc8 100644 --- a/chromium/v8/src/objects/js-locale.cc +++ b/chromium/v8/src/objects/js-locale.cc @@ -168,10 +168,20 @@ bool IsUnicodeVariantSubtag(const std::string& value) { bool IsExtensionSingleton(const std::string& value) { return IsAlphanum(value, 1, 1); } +} // namespace + +bool JSLocale::Is38AlphaNumList(const std::string& value) { + std::size_t found = value.find("-"); + if (found == std::string::npos) { + return IsAlphanum(value, 3, 8); + } + return IsAlphanum(value.substr(0, found), 3, 8) && + JSLocale::Is38AlphaNumList(value.substr(found + 1)); +} // TODO(ftang) Replace the following check w/ icu::LocaleBuilder // once ICU64 land in March 2019. -bool StartsWithUnicodeLanguageId(const std::string& value) { +bool JSLocale::StartsWithUnicodeLanguageId(const std::string& value) { // unicode_language_id = // unicode_language_subtag (sep unicode_script_subtag)? // (sep unicode_region_subtag)? (sep unicode_variant_subtag)* ; @@ -207,6 +217,7 @@ bool StartsWithUnicodeLanguageId(const std::string& value) { return true; } +namespace { Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag, Handle<JSReceiver> options, icu::LocaleBuilder* builder) { @@ -223,7 +234,7 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag, CHECK_NOT_NULL(*bcp47_tag); // 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError // exception. - if (!StartsWithUnicodeLanguageId(*bcp47_tag)) { + if (!JSLocale::StartsWithUnicodeLanguageId(*bcp47_tag)) { return Just(false); } UErrorCode status = U_ZERO_ERROR; diff --git a/chromium/v8/src/objects/js-locale.h b/chromium/v8/src/objects/js-locale.h index e1806e6b7f8..f2fca3ce14a 100644 --- a/chromium/v8/src/objects/js-locale.h +++ b/chromium/v8/src/objects/js-locale.h @@ -49,6 +49,13 @@ class JSLocale : public JSObject { static Handle<String> ToString(Isolate* isolate, Handle<JSLocale> locale); static std::string ToString(Handle<JSLocale> locale); + // Help function to validate locale by other Intl objects. + static bool StartsWithUnicodeLanguageId(const std::string& value); + + // Help function to check well-formed + // "(3*8alphanum) *("-" (3*8alphanum)) sequence" sequence + static bool Is38AlphaNumList(const std::string& value); + DECL_CAST(JSLocale) DECL_ACCESSORS(icu_locale, Managed<icu::Locale>) @@ -58,7 +65,7 @@ class JSLocale : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSLOCALE_FIELDS) + TORQUE_GENERATED_JS_LOCALE_FIELDS) OBJECT_CONSTRUCTORS(JSLocale, JSObject); }; diff --git a/chromium/v8/src/objects/js-number-format-inl.h b/chromium/v8/src/objects/js-number-format-inl.h index afdfef89f2c..f68252ab0b8 100644 --- a/chromium/v8/src/objects/js-number-format-inl.h +++ b/chromium/v8/src/objects/js-number-format-inl.h @@ -26,46 +26,8 @@ ACCESSORS(JSNumberFormat, icu_number_formatter, kIcuNumberFormatterOffset) ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset) -// Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits -// uncondictionally while the unified number proposal eventually will only -// record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits -// Since LocalizedNumberFormatter can only remember one set, and during -// 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to -// address that prior to the unified number proposal, we have to add these two -// 5 bits int into flags to remember the (Min|Max)imumFractionDigits while -// (Min|Max)imumSignaficantDigits is present. -// TODO(ftang) remove the following once we ship int-number-format-unified -// * SMI_ACCESSORS of flags -// * Four inline functions: (set_)?(min|max)imum_fraction_digits - SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset) -inline int JSNumberFormat::minimum_fraction_digits() const { - return MinimumFractionDigitsBits::decode(flags()); -} - -inline void JSNumberFormat::set_minimum_fraction_digits(int digits) { - DCHECK_GE(MinimumFractionDigitsBits::kMax, digits); - DCHECK_LE(0, digits); - DCHECK_GE(20, digits); - int hints = flags(); - hints = MinimumFractionDigitsBits::update(hints, digits); - set_flags(hints); -} - -inline int JSNumberFormat::maximum_fraction_digits() const { - return MaximumFractionDigitsBits::decode(flags()); -} - -inline void JSNumberFormat::set_maximum_fraction_digits(int digits) { - DCHECK_GE(MaximumFractionDigitsBits::kMax, digits); - DCHECK_LE(0, digits); - DCHECK_GE(20, digits); - int hints = flags(); - hints = MaximumFractionDigitsBits::update(hints, digits); - set_flags(hints); -} - inline void JSNumberFormat::set_style(Style style) { DCHECK_GE(StyleBits::kMax, style); int hints = flags(); diff --git a/chromium/v8/src/objects/js-number-format.cc b/chromium/v8/src/objects/js-number-format.cc index ff564975d6f..c065a3f725d 100644 --- a/chromium/v8/src/objects/js-number-format.cc +++ b/chromium/v8/src/objects/js-number-format.cc @@ -33,7 +33,6 @@ namespace { // [[CurrencyDisplay]] is one of the values "code", "symbol", "name", // or "narrowSymbol" identifying the display of the currency number format. -// Note: "narrowSymbol" is added in proposal-unified-intl-numberformat enum class CurrencyDisplay { CODE, SYMBOL, @@ -621,12 +620,11 @@ JSNumberFormat::SetDigitOptionsToFormatter( result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo( digit_options.minimum_integer_digits)); } - if (FLAG_harmony_intl_numberformat_unified) { - // Value -1 of minimum_significant_digits represent the roundingtype is - // "compact-rounding". - if (digit_options.minimum_significant_digits < 0) { - return result; - } + + // Value -1 of minimum_significant_digits represent the roundingtype is + // "compact-rounding". + if (digit_options.minimum_significant_digits < 0) { + return result; } icu::number::Precision precision = (digit_options.minimum_significant_digits > 0) @@ -704,15 +702,12 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions( isolate, options, factory->currencyDisplay_string(), CurrencyDisplayString(isolate, skeleton), Just(kDontThrow)) .FromJust()); - if (FLAG_harmony_intl_numberformat_unified) { - CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->currencySign_string(), - CurrencySignString(isolate, skeleton), Just(kDontThrow)) - .FromJust()); - } + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->currencySign_string(), + CurrencySignString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); } - if (FLAG_harmony_intl_numberformat_unified) { if (style == JSNumberFormat::Style::UNIT) { std::string unit = UnitFromSkeleton(skeleton); if (!unit.empty()) { @@ -727,7 +722,6 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions( UnitDisplayString(isolate, skeleton), Just(kDontThrow)) .FromJust()); } - } CHECK( JSReceiver::CreateDataProperty( @@ -735,45 +729,25 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions( factory->NewNumberFromInt(MinimumIntegerDigitsFromSkeleton(skeleton)), Just(kDontThrow)) .FromJust()); + int32_t minimum = 0, maximum = 0; - bool output_fraction = - FractionDigitsFromSkeleton(skeleton, &minimum, &maximum); - - if (!FLAG_harmony_intl_numberformat_unified && !output_fraction) { - // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits - // uncondictionally while the unified number proposal eventually will only - // record either (Min|Max)imumFractionDigits or - // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only - // remember one set, and during 2019-1-17 ECMA402 meeting that the committee - // decide not to take a PR to address that prior to the unified number - // proposal, we have to add these two 5 bits int into flags to remember the - // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is - // present. - // TODO(ftang) remove the following two lines once we ship - // int-number-format-unified - output_fraction = true; - minimum = number_format->minimum_fraction_digits(); - maximum = number_format->maximum_fraction_digits(); - } - if (output_fraction) { + if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) { CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->minimumFractionDigits_string(), + isolate, options, factory->minimumSignificantDigits_string(), factory->NewNumberFromInt(minimum), Just(kDontThrow)) .FromJust()); CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->maximumFractionDigits_string(), + isolate, options, factory->maximumSignificantDigits_string(), factory->NewNumberFromInt(maximum), Just(kDontThrow)) .FromJust()); - } - minimum = 0; - maximum = 0; - if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) { + } else { + FractionDigitsFromSkeleton(skeleton, &minimum, &maximum); CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->minimumSignificantDigits_string(), + isolate, options, factory->minimumFractionDigits_string(), factory->NewNumberFromInt(minimum), Just(kDontThrow)) .FromJust()); CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->maximumSignificantDigits_string(), + isolate, options, factory->maximumFractionDigits_string(), factory->NewNumberFromInt(maximum), Just(kDontThrow)) .FromJust()); } @@ -783,24 +757,22 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions( factory->ToBoolean(UseGroupingFromSkeleton(skeleton)), Just(kDontThrow)) .FromJust()); - if (FLAG_harmony_intl_numberformat_unified) { - Notation notation = NotationFromSkeleton(skeleton); - CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->notation_string(), - NotationAsString(isolate, notation), Just(kDontThrow)) - .FromJust()); - // Only output compactDisplay when notation is compact. - if (notation == Notation::COMPACT) { - CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->compactDisplay_string(), - CompactDisplayString(isolate, skeleton), Just(kDontThrow)) - .FromJust()); - } + Notation notation = NotationFromSkeleton(skeleton); + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->notation_string(), + NotationAsString(isolate, notation), Just(kDontThrow)) + .FromJust()); + // Only output compactDisplay when notation is compact. + if (notation == Notation::COMPACT) { CHECK(JSReceiver::CreateDataProperty( - isolate, options, factory->signDisplay_string(), - SignDisplayString(isolate, skeleton), Just(kDontThrow)) + isolate, options, factory->compactDisplay_string(), + CompactDisplayString(isolate, skeleton), Just(kDontThrow)) .FromJust()); } + CHECK(JSReceiver::CreateDataProperty( + isolate, options, factory->signDisplay_string(), + SignDisplayString(isolate, skeleton), Just(kDontThrow)) + .FromJust()); return options; } @@ -837,7 +809,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat( MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options_obj) { + Handle<Object> options_obj, + const char* service) { Factory* factory = isolate->factory(); // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales). @@ -854,10 +827,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, } else { // 3. Else // 3. a. Let options be ? ToObject(options). - ASSIGN_RETURN_ON_EXCEPTION( - isolate, options_obj, - Object::ToObject(isolate, options_obj, "Intl.NumberFormat"), - JSNumberFormat); + ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj, + Object::ToObject(isolate, options_obj, service), + JSNumberFormat); } // At this point, options_obj can either be a JSObject or a JSProxy only. @@ -868,7 +840,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, // "lookup", "best fit" », "best fit"). // 6. Set opt.[[localeMatcher]] to matcher. Maybe<Intl::MatcherOption> maybe_locale_matcher = - Intl::GetLocaleMatcher(isolate, options, "Intl.NumberFormat"); + Intl::GetLocaleMatcher(isolate, options, service); MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>()); Intl::MatcherOption matcher = maybe_locale_matcher.FromJust(); @@ -877,7 +849,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`, // `"string"`, *undefined*, *undefined*). Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem( - isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str); + isolate, options, service, &numbering_system_str); // 8. If _numberingSystem_ is not *undefined*, then // a. If _numberingSystem_ does not match the // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError* @@ -895,7 +867,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, requested_locales, matcher, relevant_extension_keys); UErrorCode status = U_ZERO_ERROR; - if (numbering_system_str != nullptr) { + if (numbering_system_str != nullptr && + Intl::IsValidNumberingSystem(numbering_system_str.get())) { r.icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status); CHECK(U_SUCCESS(status)); @@ -913,21 +886,15 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, .roundingMode(UNUM_ROUND_HALFUP); // 12. Let style be ? GetOption(options, "style", "string", « "decimal", - // "percent", "currency" », "decimal"). - const char* service = "Intl.NumberFormat"; + // "percent", "currency", "unit" », "decimal"). - std::vector<const char*> style_str_values({"decimal", "percent", "currency"}); - std::vector<JSNumberFormat::Style> style_enum_values( - {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT, - JSNumberFormat::Style::CURRENCY}); - if (FLAG_harmony_intl_numberformat_unified) { - style_str_values.push_back("unit"); - style_enum_values.push_back(JSNumberFormat::Style::UNIT); - } Maybe<JSNumberFormat::Style> maybe_style = Intl::GetStringOption<JSNumberFormat::Style>( - isolate, options, "style", service, style_str_values, - style_enum_values, JSNumberFormat::Style::DECIMAL); + isolate, options, "style", service, + {"decimal", "percent", "currency", "unit"}, + {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT, + JSNumberFormat::Style::CURRENCY, JSNumberFormat::Style::UNIT}, + JSNumberFormat::Style::DECIMAL); MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>()); JSNumberFormat::Style style = maybe_style.FromJust(); @@ -977,99 +944,87 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, } // 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay", - // "string", « "code", "symbol", "name" », "symbol"). - std::vector<const char*> currency_display_str_values( - {"code", "symbol", "name"}); - std::vector<CurrencyDisplay> currency_display_enum_values( - {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME}); - if (FLAG_harmony_intl_numberformat_unified) { - currency_display_str_values.push_back("narrowSymbol"); - currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL); - } + // "string", « "code", "symbol", "name", "narrowSymbol" », "symbol"). Maybe<CurrencyDisplay> maybe_currency_display = Intl::GetStringOption<CurrencyDisplay>( isolate, options, "currencyDisplay", service, - currency_display_str_values, currency_display_enum_values, + {"code", "symbol", "name", "narrowSymbol"}, + {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, + CurrencyDisplay::NAME, CurrencyDisplay::NARROW_SYMBOL}, CurrencyDisplay::SYMBOL); MAYBE_RETURN(maybe_currency_display, MaybeHandle<JSNumberFormat>()); CurrencyDisplay currency_display = maybe_currency_display.FromJust(); CurrencySign currency_sign = CurrencySign::STANDARD; - if (FLAG_harmony_intl_numberformat_unified) { - // Let currencySign be ? GetOption(options, "currencySign", "string", « - // "standard", "accounting" », "standard"). - Maybe<CurrencySign> maybe_currency_sign = - Intl::GetStringOption<CurrencySign>( - isolate, options, "currencySign", service, - {"standard", "accounting"}, - {CurrencySign::STANDARD, CurrencySign::ACCOUNTING}, - CurrencySign::STANDARD); - MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>()); - currency_sign = maybe_currency_sign.FromJust(); - - // Let unit be ? GetOption(options, "unit", "string", undefined, undefined). - std::unique_ptr<char[]> unit_cstr; - Maybe<bool> found_unit = Intl::GetStringOption( - isolate, options, "unit", empty_values, service, &unit_cstr); - MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>()); - - std::string unit; - if (found_unit.FromJust()) { - DCHECK_NOT_NULL(unit_cstr.get()); - unit = unit_cstr.get(); + // Let currencySign be ? GetOption(options, "currencySign", "string", « + // "standard", "accounting" », "standard"). + Maybe<CurrencySign> maybe_currency_sign = Intl::GetStringOption<CurrencySign>( + isolate, options, "currencySign", service, {"standard", "accounting"}, + {CurrencySign::STANDARD, CurrencySign::ACCOUNTING}, + CurrencySign::STANDARD); + MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>()); + currency_sign = maybe_currency_sign.FromJust(); + + // Let unit be ? GetOption(options, "unit", "string", undefined, undefined). + std::unique_ptr<char[]> unit_cstr; + Maybe<bool> found_unit = Intl::GetStringOption( + isolate, options, "unit", empty_values, service, &unit_cstr); + MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>()); + + std::string unit; + if (found_unit.FromJust()) { + DCHECK_NOT_NULL(unit_cstr.get()); + unit = unit_cstr.get(); + } + + // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", « + // "short", "narrow", "long" », "short"). + Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>( + isolate, options, "unitDisplay", service, {"short", "narrow", "long"}, + {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG}, + UnitDisplay::SHORT); + MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>()); + UnitDisplay unit_display = maybe_unit_display.FromJust(); + + // If style is "unit", then + if (style == JSNumberFormat::Style::UNIT) { + // If unit is undefined, throw a TypeError exception. + if (unit == "") { + THROW_NEW_ERROR(isolate, + NewTypeError(MessageTemplate::kInvalidUnit, + factory->NewStringFromAsciiChecked(service), + factory->empty_string()), + JSNumberFormat); } - // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", « - // "short", "narrow", "long" », "short"). - Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>( - isolate, options, "unitDisplay", service, {"short", "narrow", "long"}, - {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG}, - UnitDisplay::SHORT); - MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>()); - UnitDisplay unit_display = maybe_unit_display.FromJust(); - - // If style is "unit", then - if (style == JSNumberFormat::Style::UNIT) { - // If unit is undefined, throw a TypeError exception. - if (unit == "") { - THROW_NEW_ERROR( - isolate, - NewTypeError(MessageTemplate::kInvalidUnit, - factory->NewStringFromStaticChars("Intl.NumberFormat"), - factory->empty_string()), - JSNumberFormat); - } - - // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a - // RangeError exception. - Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed = - IsWellFormedUnitIdentifier(isolate, unit); - if (maybe_wellformed.IsNothing()) { - THROW_NEW_ERROR( - isolate, - NewRangeError( - MessageTemplate::kInvalidUnit, - factory->NewStringFromStaticChars("Intl.NumberFormat"), - factory->NewStringFromAsciiChecked(unit.c_str())), - JSNumberFormat); - } - std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair = - maybe_wellformed.FromJust(); + // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a + // RangeError exception. + Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed = + IsWellFormedUnitIdentifier(isolate, unit); + if (maybe_wellformed.IsNothing()) { + THROW_NEW_ERROR( + isolate, + NewRangeError(MessageTemplate::kInvalidUnit, + factory->NewStringFromAsciiChecked(service), + factory->NewStringFromAsciiChecked(unit.c_str())), + JSNumberFormat); + } + std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair = + maybe_wellformed.FromJust(); - // Set intlObj.[[Unit]] to unit. - if (unit_pair.first != icu::NoUnit::base()) { - icu_number_formatter = icu_number_formatter.unit(unit_pair.first); - } - if (unit_pair.second != icu::NoUnit::base()) { - icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second); - } + // Set intlObj.[[Unit]] to unit. + if (unit_pair.first != icu::NoUnit::base()) { + icu_number_formatter = icu_number_formatter.unit(unit_pair.first); + } + if (unit_pair.second != icu::NoUnit::base()) { + icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second); + } - // The default unitWidth is SHORT in ICU and that mapped from - // Symbol so we can skip the setting for optimization. - if (unit_display != UnitDisplay::SHORT) { - icu_number_formatter = - icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display)); - } + // The default unitWidth is SHORT in ICU and that mapped from + // Symbol so we can skip the setting for optimization. + if (unit_display != UnitDisplay::SHORT) { + icu_number_formatter = + icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display)); } } @@ -1125,18 +1080,16 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, } Notation notation = Notation::STANDARD; - if (FLAG_harmony_intl_numberformat_unified) { - // 25. Let notation be ? GetOption(options, "notation", "string", « - // "standard", "scientific", "engineering", "compact" », "standard"). - Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>( - isolate, options, "notation", service, - {"standard", "scientific", "engineering", "compact"}, - {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING, - Notation::COMPACT}, - Notation::STANDARD); - MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>()); - notation = maybe_notation.FromJust(); - } + // 25. Let notation be ? GetOption(options, "notation", "string", « + // "standard", "scientific", "engineering", "compact" », "standard"). + Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>( + isolate, options, "notation", service, + {"standard", "scientific", "engineering", "compact"}, + {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING, + Notation::COMPACT}, + Notation::STANDARD); + MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>()); + notation = maybe_notation.FromJust(); // 27. Perform ? SetNumberFormatDigitOptions(numberFormat, options, // mnfdDefault, mxfdDefault). @@ -1149,24 +1102,21 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter( icu_number_formatter, digit_options); - if (FLAG_harmony_intl_numberformat_unified) { - // 28. Let compactDisplay be ? GetOption(options, "compactDisplay", - // "string", « "short", "long" », "short"). - Maybe<CompactDisplay> maybe_compact_display = - Intl::GetStringOption<CompactDisplay>( - isolate, options, "compactDisplay", service, {"short", "long"}, - {CompactDisplay::SHORT, CompactDisplay::LONG}, - CompactDisplay::SHORT); - MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>()); - CompactDisplay compact_display = maybe_compact_display.FromJust(); - - // 26. Set numberFormat.[[Notation]] to notation. - // The default notation in ICU is Simple, which mapped from STANDARD - // so we can skip setting it. - if (notation != Notation::STANDARD) { - icu_number_formatter = icu_number_formatter.notation( - ToICUNotation(notation, compact_display)); - } + // 28. Let compactDisplay be ? GetOption(options, "compactDisplay", + // "string", « "short", "long" », "short"). + Maybe<CompactDisplay> maybe_compact_display = + Intl::GetStringOption<CompactDisplay>( + isolate, options, "compactDisplay", service, {"short", "long"}, + {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT); + MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>()); + CompactDisplay compact_display = maybe_compact_display.FromJust(); + + // 26. Set numberFormat.[[Notation]] to notation. + // The default notation in ICU is Simple, which mapped from STANDARD + // so we can skip setting it. + if (notation != Notation::STANDARD) { + icu_number_formatter = + icu_number_formatter.notation(ToICUNotation(notation, compact_display)); } // 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean", // undefined, true). @@ -1180,27 +1130,25 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, UNumberGroupingStrategy::UNUM_GROUPING_OFF); } - if (FLAG_harmony_intl_numberformat_unified) { - // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", « - // "auto", "never", "always", "exceptZero" », "auto"). - Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>( - isolate, options, "signDisplay", service, - {"auto", "never", "always", "exceptZero"}, - {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS, - SignDisplay::EXCEPT_ZERO}, - SignDisplay::AUTO); - MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>()); - SignDisplay sign_display = maybe_sign_display.FromJust(); - - // 33. Set numberFormat.[[SignDisplay]] to signDisplay. - // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from - // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting - // under that values for optimization. - if (sign_display != SignDisplay::AUTO || - currency_sign != CurrencySign::STANDARD) { - icu_number_formatter = icu_number_formatter.sign( - ToUNumberSignDisplay(sign_display, currency_sign)); - } + // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", « + // "auto", "never", "always", "exceptZero" », "auto"). + Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>( + isolate, options, "signDisplay", service, + {"auto", "never", "always", "exceptZero"}, + {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS, + SignDisplay::EXCEPT_ZERO}, + SignDisplay::AUTO); + MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>()); + SignDisplay sign_display = maybe_sign_display.FromJust(); + + // 33. Set numberFormat.[[SignDisplay]] to signDisplay. + // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from + // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting + // under that values for optimization. + if (sign_display != SignDisplay::AUTO || + currency_sign != CurrencySign::STANDARD) { + icu_number_formatter = icu_number_formatter.sign( + ToUNumberSignDisplay(sign_display, currency_sign)); } // 25. Let dataLocaleData be localeData.[[<dataLocale>]]. @@ -1231,24 +1179,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate, number_format->set_style(style); number_format->set_locale(*locale_str); - if (digit_options.minimum_significant_digits > 0) { - // The current ECMA 402 spec mandates recording (Min|Max)imumFractionDigits - // unconditionally, while the unified number proposal eventually will only - // record either (Min|Max)imumFractionDigits or - // (Min|Max)imumSignificantDigits. Since LocalizedNumberFormatter can only - // remember one set, and during 2019-1-17 ECMA402 meeting the committee - // decided not to take a PR to address that prior to the unified number - // proposal, we have to add these two 5-bit ints into flags to remember the - // (Min|Max)imumFractionDigits while (Min|Max)imumSignificantDigits is - // present. - // TODO(ftang) remove the following two lines once we ship - // int-number-format-unified - number_format->set_minimum_fraction_digits( - digit_options.minimum_fraction_digits); - number_format->set_maximum_fraction_digits( - digit_options.maximum_fraction_digits); - } - number_format->set_icu_number_formatter(*managed_number_formatter); number_format->set_bound_format(*factory->undefined_value()); diff --git a/chromium/v8/src/objects/js-number-format.h b/chromium/v8/src/objects/js-number-format.h index 2979ab10f43..a5196f8d517 100644 --- a/chromium/v8/src/objects/js-number-format.h +++ b/chromium/v8/src/objects/js-number-format.h @@ -36,7 +36,7 @@ class JSNumberFormat : public JSObject { // ecma402/#sec-initializenumberformat V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> New( Isolate* isolate, Handle<Map> map, Handle<Object> locales, - Handle<Object> options); + Handle<Object> options, const char* service); // ecma402/#sec-unwrapnumberformat V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> UnwrapNumberFormat( @@ -72,26 +72,6 @@ class JSNumberFormat : public JSObject { DECL_PRINTER(JSNumberFormat) DECL_VERIFIER(JSNumberFormat) - // Current ECMA 402 spec mandates to record (Min|Max)imumFractionDigits - // unconditionally while the unified number proposal eventually will only - // record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits - // Since LocalizedNumberFormatter can only remember one set, and during - // 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to - // address that prior to the unified number proposal, we have to add these two - // 5 bits int into flags to remember the (Min|Max)imumFractionDigits while - // (Min|Max)imumSignaficantDigits is present. - // TODO(ftang) remove the following once we ship int-number-format-unified - // * Four inline functions: (set_)?(min|max)imum_fraction_digits - // * kFlagsOffset - // * #define FLAGS_BIT_FIELDS - // * DECL_INT_ACCESSORS(flags) - - inline int minimum_fraction_digits() const; - inline void set_minimum_fraction_digits(int digits); - - inline int maximum_fraction_digits() const; - inline void set_maximum_fraction_digits(int digits); - // [[Style]] is one of the values "decimal", "percent", "currency", // or "unit" identifying the style of the number format. // Note: "unit" is added in proposal-unified-intl-numberformat @@ -102,19 +82,15 @@ class JSNumberFormat : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS) + TORQUE_GENERATED_JS_NUMBER_FORMAT_FIELDS) // Bit positions in |flags|. #define FLAGS_BIT_FIELDS(V, _) \ - V(MinimumFractionDigitsBits, int, 5, _) \ - V(MaximumFractionDigitsBits, int, 5, _) \ V(StyleBits, Style, 2, _) DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS) #undef FLAGS_BIT_FIELDS - STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax); - STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax); STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax); STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax); STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax); diff --git a/chromium/v8/src/objects/js-objects-inl.h b/chromium/v8/src/objects/js-objects-inl.h index f8fe069d3dd..a6b9e9ad83d 100644 --- a/chromium/v8/src/objects/js-objects-inl.h +++ b/chromium/v8/src/objects/js-objects-inl.h @@ -31,16 +31,19 @@ namespace internal { OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject) TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject) TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction) TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction) TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate) -OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject) -OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject) +OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction) +OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSSpecialObject) TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy) JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {} OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject) TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper) -OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator) NEVER_READ_ONLY_SPACE_IMPL(JSReceiver) @@ -49,7 +52,6 @@ CAST_ACCESSOR(JSGlobalObject) CAST_ACCESSOR(JSIteratorResult) CAST_ACCESSOR(JSMessageObject) CAST_ACCESSOR(JSReceiver) -CAST_ACCESSOR(JSStringIterator) MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate, Handle<JSReceiver> receiver, @@ -375,7 +377,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value) { } } -void JSObject::WriteToField(int descriptor, PropertyDetails details, +void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details, Object value) { DCHECK_EQ(kField, details.location()); DCHECK_EQ(kData, details.kind()); @@ -540,7 +542,9 @@ Code JSFunction::code() const { void JSFunction::set_code(Code value) { DCHECK(!ObjectInYoungGeneration(value)); RELAXED_WRITE_FIELD(*this, kCodeOffset, value); +#ifndef V8_DISABLE_WRITE_BARRIERS MarkingBarrier(*this, RawField(kCodeOffset), value); +#endif } void JSFunction::set_code_no_write_barrier(Code value) { @@ -1007,8 +1011,7 @@ inline int JSGlobalProxy::SizeWithEmbedderFields(int embedder_field_count) { ACCESSORS(JSIteratorResult, value, Object, kValueOffset) ACCESSORS(JSIteratorResult, done, Object, kDoneOffset) -ACCESSORS(JSStringIterator, string, String, kStringOffset) -SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset) +TQ_SMI_ACCESSORS(JSStringIterator, index) // If the fast-case backing storage takes up much more memory than a dictionary // backing storage would, the object should have slow elements. diff --git a/chromium/v8/src/objects/js-objects.cc b/chromium/v8/src/objects/js-objects.cc index 3666f5afbe2..ea0917f18fe 100644 --- a/chromium/v8/src/objects/js-objects.cc +++ b/chromium/v8/src/objects/js-objects.cc @@ -216,15 +216,19 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign( } Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate); - int length = map->NumberOfOwnDescriptors(); bool stable = true; - for (int i = 0; i < length; i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { + HandleScope inner_scope(isolate); + Handle<Name> next_key(descriptors->GetKey(i), isolate); Handle<Object> prop_value; // Directly decode from the descriptor array if |from| did not change shape. if (stable) { + DCHECK_EQ(from->map(), *map); + DCHECK_EQ(*descriptors, map->instance_descriptors()); + PropertyDetails details = descriptors->GetDetails(i); if (!details.IsEnumerable()) continue; if (details.kind() == kData) { @@ -232,7 +236,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign( prop_value = handle(descriptors->GetStrongValue(i), isolate); } else { Representation representation = details.representation(); - FieldIndex index = FieldIndex::ForDescriptor(*map, i); + FieldIndex index = FieldIndex::ForPropertyIndex( + *map, details.field_index(), representation); prop_value = JSObject::FastPropertyAt(from, representation, index); } } else { @@ -240,6 +245,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign( isolate, prop_value, JSReceiver::GetProperty(isolate, from, next_key), Nothing<bool>()); stable = from->map() == *map; + *descriptors.location() = map->instance_descriptors().ptr(); } } else { // If the map did change, do a slower lookup. We are still guaranteed that @@ -260,7 +266,10 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign( Object::SetProperty(&it, prop_value, StoreOrigin::kNamed, Just(ShouldThrow::kThrowOnError)); if (result.IsNothing()) return result; - if (stable) stable = from->map() == *map; + if (stable) { + stable = from->map() == *map; + *descriptors.location() = map->instance_descriptors().ptr(); + } } else { if (excluded_properties != nullptr && HasExcludedProperty(excluded_properties, next_key)) { @@ -1094,8 +1103,7 @@ Maybe<bool> SetPropertyWithInterceptorInternal( Maybe<bool> DefinePropertyWithInterceptorInternal( LookupIterator* it, Handle<InterceptorInfo> interceptor, - Maybe<ShouldThrow> should_throw, - PropertyDescriptor& desc) { // NOLINT(runtime/references) + Maybe<ShouldThrow> should_throw, PropertyDescriptor* desc) { Isolate* isolate = it->isolate(); // Make sure that the top context does not change when doing callbacks or // interceptor calls. @@ -1116,23 +1124,23 @@ Maybe<bool> DefinePropertyWithInterceptorInternal( std::unique_ptr<v8::PropertyDescriptor> descriptor( new v8::PropertyDescriptor()); - if (PropertyDescriptor::IsAccessorDescriptor(&desc)) { + if (PropertyDescriptor::IsAccessorDescriptor(desc)) { descriptor.reset(new v8::PropertyDescriptor( - v8::Utils::ToLocal(desc.get()), v8::Utils::ToLocal(desc.set()))); - } else if (PropertyDescriptor::IsDataDescriptor(&desc)) { - if (desc.has_writable()) { + v8::Utils::ToLocal(desc->get()), v8::Utils::ToLocal(desc->set()))); + } else if (PropertyDescriptor::IsDataDescriptor(desc)) { + if (desc->has_writable()) { descriptor.reset(new v8::PropertyDescriptor( - v8::Utils::ToLocal(desc.value()), desc.writable())); + v8::Utils::ToLocal(desc->value()), desc->writable())); } else { descriptor.reset( - new v8::PropertyDescriptor(v8::Utils::ToLocal(desc.value()))); + new v8::PropertyDescriptor(v8::Utils::ToLocal(desc->value()))); } } - if (desc.has_enumerable()) { - descriptor->set_enumerable(desc.enumerable()); + if (desc->has_enumerable()) { + descriptor->set_enumerable(desc->enumerable()); } - if (desc.has_configurable()) { - descriptor->set_configurable(desc.configurable()); + if (desc->has_configurable()) { + descriptor->set_configurable(desc->configurable()); } if (it->IsElement()) { @@ -1166,7 +1174,7 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty( if (it->state() == LookupIterator::INTERCEPTOR) { if (it->HolderIsReceiverOrHiddenPrototype()) { Maybe<bool> result = DefinePropertyWithInterceptorInternal( - it, it->GetInterceptor(), should_throw, *desc); + it, it->GetInterceptor(), should_throw, desc); if (result.IsNothing() || result.FromJust()) { return result; } @@ -1834,8 +1842,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries( if (!map->OnlyHasSimpleProperties()) return Just(false); Handle<JSObject> object(JSObject::cast(*receiver), isolate); - Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate); + int number_of_own_descriptors = map->NumberOfOwnDescriptors(); int number_of_own_elements = object->GetElementsAccessor()->GetCapacity(*object, object->elements()); @@ -1857,15 +1865,25 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries( Nothing<bool>()); } - bool stable = object->map() == *map; + // We may have already lost stability, if CollectValuesOrEntries had + // side-effects. + bool stable = *map == object->map(); + if (stable) { + *descriptors.location() = map->instance_descriptors().ptr(); + } + + for (InternalIndex index : InternalIndex::Range(number_of_own_descriptors)) { + HandleScope inner_scope(isolate); - for (int index = 0; index < number_of_own_descriptors; index++) { Handle<Name> next_key(descriptors->GetKey(index), isolate); if (!next_key->IsString()) continue; Handle<Object> prop_value; // Directly decode from the descriptor array if |from| did not change shape. if (stable) { + DCHECK_EQ(object->map(), *map); + DCHECK_EQ(*descriptors, map->instance_descriptors()); + PropertyDetails details = descriptors->GetDetails(index); if (!details.IsEnumerable()) continue; if (details.kind() == kData) { @@ -1873,7 +1891,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries( prop_value = handle(descriptors->GetStrongValue(index), isolate); } else { Representation representation = details.representation(); - FieldIndex field_index = FieldIndex::ForDescriptor(*map, index); + FieldIndex field_index = FieldIndex::ForPropertyIndex( + *map, details.field_index(), representation); prop_value = JSObject::FastPropertyAt(object, representation, field_index); } @@ -1883,6 +1902,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries( JSReceiver::GetProperty(isolate, object, next_key), Nothing<bool>()); stable = object->map() == *map; + *descriptors.location() = map->instance_descriptors().ptr(); } } else { // If the map did change, do a slower lookup. We are still guaranteed that @@ -2121,15 +2141,15 @@ int JSObject::GetHeaderSize(InstanceType type, return JSWeakSet::kSize; case JS_PROMISE_TYPE: return JSPromise::kSize; - case JS_REGEXP_TYPE: + case JS_REG_EXP_TYPE: return JSRegExp::kSize; - case JS_REGEXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: return JSRegExpStringIterator::kSize; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return JSObject::kHeaderSize; case JS_MESSAGE_OBJECT_TYPE: return JSMessageObject::kSize; - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: return JSObject::kHeaderSize; case JS_ERROR_TYPE: return JSObject::kHeaderSize; @@ -2138,38 +2158,38 @@ int JSObject::GetHeaderSize(InstanceType type, case JS_MODULE_NAMESPACE_TYPE: return JSModuleNamespace::kHeaderSize; #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: return JSV8BreakIterator::kSize; - case JS_INTL_COLLATOR_TYPE: + case JS_COLLATOR_TYPE: return JSCollator::kSize; - case JS_INTL_DATE_TIME_FORMAT_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: return JSDateTimeFormat::kSize; - case JS_INTL_LIST_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: return JSListFormat::kSize; - case JS_INTL_LOCALE_TYPE: + case JS_LOCALE_TYPE: return JSLocale::kSize; - case JS_INTL_NUMBER_FORMAT_TYPE: + case JS_NUMBER_FORMAT_TYPE: return JSNumberFormat::kSize; - case JS_INTL_PLURAL_RULES_TYPE: + case JS_PLURAL_RULES_TYPE: return JSPluralRules::kSize; - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: return JSRelativeTimeFormat::kSize; - case JS_INTL_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: return JSSegmentIterator::kSize; - case JS_INTL_SEGMENTER_TYPE: + case JS_SEGMENTER_TYPE: return JSSegmenter::kSize; #endif // V8_INTL_SUPPORT - case WASM_GLOBAL_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: return WasmGlobalObject::kSize; - case WASM_INSTANCE_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: return WasmInstanceObject::kSize; - case WASM_MEMORY_TYPE: + case WASM_MEMORY_OBJECT_TYPE: return WasmMemoryObject::kSize; - case WASM_MODULE_TYPE: + case WASM_MODULE_OBJECT_TYPE: return WasmModuleObject::kSize; - case WASM_TABLE_TYPE: + case WASM_TABLE_OBJECT_TYPE: return WasmTableObject::kSize; - case WASM_EXCEPTION_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: return WasmExceptionObject::kSize; default: UNREACHABLE(); @@ -2377,7 +2397,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { accumulator->Add("<JSWeakSet>"); break; } - case JS_REGEXP_TYPE: { + case JS_REG_EXP_TYPE: { accumulator->Add("<JSRegExp"); JSRegExp regexp = JSRegExp::cast(*this); if (regexp.source().IsString()) { @@ -2506,7 +2526,7 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map, PrintF(file, "[migrating]"); DescriptorArray o = original_map.instance_descriptors(); DescriptorArray n = new_map.instance_descriptors(); - for (int i = 0; i < original_map.NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : original_map.IterateOwnDescriptors()) { Representation o_r = o.GetDetails(i).representation(); Representation n_r = n.GetDetails(i).representation(); if (!o_r.Equals(n_r)) { @@ -2703,7 +2723,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, // number of properties. DCHECK(old_nof <= new_nof); - for (int i = 0; i < old_nof; i++) { + for (InternalIndex i : InternalIndex::Range(old_nof)) { PropertyDetails details = new_descriptors->GetDetails(i); if (details.location() != kField) continue; DCHECK_EQ(kData, details.kind()); @@ -2753,7 +2773,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, } } - for (int i = old_nof; i < new_nof; i++) { + for (InternalIndex i : InternalIndex::Range(old_nof, new_nof)) { PropertyDetails details = new_descriptors->GetDetails(i); if (details.location() != kField) continue; DCHECK_EQ(kData, details.kind()); @@ -2776,9 +2796,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, Heap* heap = isolate->heap(); - int old_instance_size = old_map->instance_size(); - - heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); + // Invalidate slots manually later in case of tagged to untagged translation. + // In all other cases the recorded slot remains dereferenceable. + heap->NotifyObjectLayoutChange(*object, no_allocation, + InvalidateRecordedSlots::kNo); // Copy (real) inobject properties. If necessary, stop at number_of_fields to // avoid overwriting |one_pointer_filler_map|. @@ -2795,7 +2816,8 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, index, HeapNumber::cast(value).value_as_bits()); if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) { // Transition from tagged to untagged slot. - heap->ClearRecordedSlot(*object, object->RawField(index.offset())); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object); + chunk->InvalidateRecordedSlots(*object); } else { #ifdef DEBUG heap->VerifyClearedSlot(*object, object->RawField(index.offset())); @@ -2809,6 +2831,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object, object->SetProperties(*array); // Create filler object past the new instance size. + int old_instance_size = old_map->instance_size(); int new_instance_size = new_map->instance_size(); int instance_size_delta = old_instance_size - new_instance_size; DCHECK_GE(instance_size_delta, 0); @@ -2851,7 +2874,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, NameDictionary::New(isolate, property_count); Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate); - for (int i = 0; i < real_size; i++) { + for (InternalIndex i : InternalIndex::Range(real_size)) { PropertyDetails details = descs->GetDetails(i); Handle<Name> key(descs->GetKey(isolate, i), isolate); Handle<Object> value; @@ -2891,10 +2914,15 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, DisallowHeapAllocation no_allocation; Heap* heap = isolate->heap(); - int old_instance_size = map->instance_size(); - heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation); + + // Invalidate slots manually later in case the new map has in-object + // properties. If not, it is not possible to store an untagged value + // in a recorded slot. + heap->NotifyObjectLayoutChange(*object, no_allocation, + InvalidateRecordedSlots::kNo); // Resize the object in the heap if necessary. + int old_instance_size = map->instance_size(); int new_instance_size = new_map->instance_size(); int instance_size_delta = old_instance_size - new_instance_size; DCHECK_GE(instance_size_delta, 0); @@ -2914,10 +2942,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object, // garbage. int inobject_properties = new_map->GetInObjectProperties(); if (inobject_properties) { - Heap* heap = isolate->heap(); - heap->ClearRecordedSlotRange( - object->address() + map->GetInObjectPropertyOffset(0), - object->address() + new_instance_size); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object); + chunk->InvalidateRecordedSlots(*object); for (int i = 0; i < inobject_properties; i++) { FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); @@ -3047,7 +3073,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) { Handle<PropertyArray> array = isolate->factory()->NewPropertyArray(external); - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { PropertyDetails details = descriptors->GetDetails(i); Representation representation = details.representation(); if (!representation.IsDouble()) continue; @@ -3344,8 +3370,8 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object, } // Allocate the instance descriptor. - Handle<DescriptorArray> descriptors = DescriptorArray::Allocate( - isolate, instance_descriptor_length, 0, AllocationType::kOld); + Handle<DescriptorArray> descriptors = + DescriptorArray::Allocate(isolate, instance_descriptor_length, 0); int number_of_allocated_fields = number_of_fields + unused_property_fields - inobject_props; @@ -3410,7 +3436,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object, } current_offset += details.field_width_in_words(); } - descriptors->Set(i, &d); + descriptors->Set(InternalIndex(i), &d); } DCHECK(current_offset == number_of_fields); @@ -3441,6 +3467,8 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object, } void JSObject::RequireSlowElements(NumberDictionary dictionary) { + DCHECK_NE(dictionary, + ReadOnlyRoots(GetIsolate()).empty_slow_element_dictionary()); if (dictionary.requires_slow_elements()) return; dictionary.set_requires_slow_elements(); if (map().is_prototype_map()) { @@ -3603,8 +3631,7 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) { DCHECK(!map.is_dictionary_map()); DescriptorArray descriptors = map.instance_descriptors(); - int number_of_own_descriptors = map.NumberOfOwnDescriptors(); - for (int i = 0; i < number_of_own_descriptors; i++) { + for (InternalIndex i : map.IterateOwnDescriptors()) { if (descriptors.GetKey(i).IsPrivate()) continue; PropertyDetails details = descriptors.GetDetails(i); if (details.IsConfigurable()) return false; @@ -3709,7 +3736,9 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object, object->HasSlowArgumentsElements()); // Make sure that we never go back to fast case. - object->RequireSlowElements(*dictionary); + if (*dictionary != ReadOnlyRoots(isolate).empty_slow_element_dictionary()) { + object->RequireSlowElements(*dictionary); + } } // Do a map transition, other objects with this map may still @@ -4136,10 +4165,9 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object, Object JSObject::SlowReverseLookup(Object value) { if (HasFastProperties()) { - int number_of_own_descriptors = map().NumberOfOwnDescriptors(); DescriptorArray descs = map().instance_descriptors(); bool value_is_number = value.IsNumber(); - for (int i = 0; i < number_of_own_descriptors; i++) { + for (InternalIndex i : map().IterateOwnDescriptors()) { PropertyDetails details = descs.GetDetails(i); if (details.location() == kField) { DCHECK_EQ(kData, details.kind()); @@ -5187,16 +5215,16 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) { case JS_FUNCTION_TYPE: case JS_GENERATOR_OBJECT_TYPE: #ifdef V8_INTL_SUPPORT - case JS_INTL_COLLATOR_TYPE: - case JS_INTL_DATE_TIME_FORMAT_TYPE: - case JS_INTL_LIST_FORMAT_TYPE: - case JS_INTL_LOCALE_TYPE: - case JS_INTL_NUMBER_FORMAT_TYPE: - case JS_INTL_PLURAL_RULES_TYPE: - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: - case JS_INTL_SEGMENT_ITERATOR_TYPE: - case JS_INTL_SEGMENTER_TYPE: - case JS_INTL_V8_BREAK_ITERATOR_TYPE: + case JS_COLLATOR_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: + case JS_LOCALE_TYPE: + case JS_NUMBER_FORMAT_TYPE: + case JS_PLURAL_RULES_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENTER_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: #endif case JS_ASYNC_FUNCTION_OBJECT_TYPE: case JS_ASYNC_GENERATOR_OBJECT_TYPE: @@ -5205,9 +5233,9 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) { case JS_OBJECT_TYPE: case JS_ERROR_TYPE: case JS_FINALIZATION_GROUP_TYPE: - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: case JS_PROMISE_TYPE: - case JS_REGEXP_TYPE: + case JS_REG_EXP_TYPE: case JS_SET_TYPE: case JS_SPECIAL_API_OBJECT_TYPE: case JS_TYPED_ARRAY_TYPE: @@ -5215,11 +5243,11 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) { case JS_WEAK_MAP_TYPE: case JS_WEAK_REF_TYPE: case JS_WEAK_SET_TYPE: - case WASM_GLOBAL_TYPE: - case WASM_INSTANCE_TYPE: - case WASM_MEMORY_TYPE: - case WASM_MODULE_TYPE: - case WASM_TABLE_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: + case WASM_MEMORY_OBJECT_TYPE: + case WASM_MODULE_OBJECT_TYPE: + case WASM_TABLE_OBJECT_TYPE: return true; case BIGINT_TYPE: diff --git a/chromium/v8/src/objects/js-objects.h b/chromium/v8/src/objects/js-objects.h index a9510642f1b..f38cbe16e69 100644 --- a/chromium/v8/src/objects/js-objects.h +++ b/chromium/v8/src/objects/js-objects.h @@ -6,6 +6,8 @@ #define V8_OBJECTS_JS_OBJECTS_H_ #include "src/objects/embedder-data-slot.h" +// TODO(jkummerow): Consider forward-declaring instead. +#include "src/objects/internal-index.h" #include "src/objects/objects.h" #include "src/objects/property-array.h" #include "torque-generated/class-definitions-tq.h" @@ -264,7 +266,7 @@ class JSReceiver : public HeapObject { static const int kHashMask = PropertyArray::HashField::kMask; DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_JSRECEIVER_FIELDS) + TORQUE_GENERATED_JS_RECEIVER_FIELDS) bool HasProxyInPrototype(Isolate* isolate); V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetPrivateEntries( @@ -631,7 +633,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> { FieldIndex index, Object value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits); - inline void WriteToField(int descriptor, PropertyDetails details, + inline void WriteToField(InternalIndex descriptor, PropertyDetails details, Object value); // Access to in object properties. @@ -806,6 +808,29 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> { TQ_OBJECT_CONSTRUCTORS(JSObject) }; +// An abstract superclass for JSObjects that may have elements while having an +// empty fixed array as elements backing store. It doesn't carry any +// functionality but allows function classes to be identified in the type +// system. +class JSCustomElementsObject + : public TorqueGeneratedJSCustomElementsObject<JSCustomElementsObject, + JSObject> { + public: + STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize); + TQ_OBJECT_CONSTRUCTORS(JSCustomElementsObject) +}; + +// An abstract superclass for JSObjects that require non-standard element +// access. It doesn't carry any functionality but allows function classes to be +// identified in the type system. +class JSSpecialObject + : public TorqueGeneratedJSSpecialObject<JSSpecialObject, + JSCustomElementsObject> { + public: + STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize); + TQ_OBJECT_CONSTRUCTORS(JSSpecialObject) +}; + // JSAccessorPropertyDescriptor is just a JSObject with a specific initial // map. This initial map adds in-object properties for "get", "set", // "enumerable" and "configurable" properties, as assigned by the @@ -893,9 +918,21 @@ class JSIteratorResult : public JSObject { OBJECT_CONSTRUCTORS(JSIteratorResult, JSObject); }; +// An abstract superclass for classes representing JavaScript function values. +// It doesn't carry any functionality but allows function classes to be +// identified in the type system. +class JSFunctionOrBoundFunction + : public TorqueGeneratedJSFunctionOrBoundFunction<JSFunctionOrBoundFunction, + JSObject> { + public: + STATIC_ASSERT(kHeaderSize == JSObject::kHeaderSize); + TQ_OBJECT_CONSTRUCTORS(JSFunctionOrBoundFunction) +}; + // JSBoundFunction describes a bound function exotic object. class JSBoundFunction - : public TorqueGeneratedJSBoundFunction<JSBoundFunction, JSObject> { + : public TorqueGeneratedJSBoundFunction<JSBoundFunction, + JSFunctionOrBoundFunction> { public: static MaybeHandle<String> GetName(Isolate* isolate, Handle<JSBoundFunction> function); @@ -916,7 +953,7 @@ class JSBoundFunction }; // JSFunction describes JavaScript functions. -class JSFunction : public JSObject { +class JSFunction : public JSFunctionOrBoundFunction { public: // [prototype_or_initial_map]: DECL_ACCESSORS(prototype_or_initial_map, HeapObject) @@ -1119,13 +1156,13 @@ class JSFunction : public JSObject { // ES6 section 19.2.3.5 Function.prototype.toString ( ). static Handle<String> ToString(Handle<JSFunction> function); - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSFUNCTION_FIELDS) + DEFINE_FIELD_OFFSET_CONSTANTS(JSFunctionOrBoundFunction::kHeaderSize, + TORQUE_GENERATED_JS_FUNCTION_FIELDS) static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset; static constexpr int kSizeWithPrototype = kSize; - OBJECT_CONSTRUCTORS(JSFunction, JSObject); + OBJECT_CONSTRUCTORS(JSFunction, JSFunctionOrBoundFunction); }; // JSGlobalProxy's prototype must be a JSGlobalObject or null, @@ -1137,7 +1174,7 @@ class JSFunction : public JSObject { // Accessing a JSGlobalProxy requires security check. class JSGlobalProxy - : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSObject> { + : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSSpecialObject> { public: inline bool IsDetachedFrom(JSGlobalObject global) const; @@ -1151,7 +1188,7 @@ class JSGlobalProxy }; // JavaScript global object. -class JSGlobalObject : public JSObject { +class JSGlobalObject : public JSSpecialObject { public: // [native context]: the natives corresponding to this global object. DECL_ACCESSORS(native_context, NativeContext) @@ -1179,15 +1216,16 @@ class JSGlobalObject : public JSObject { DECL_VERIFIER(JSGlobalObject) // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSGLOBAL_OBJECT_FIELDS) + DEFINE_FIELD_OFFSET_CONSTANTS(JSSpecialObject::kHeaderSize, + TORQUE_GENERATED_JS_GLOBAL_OBJECT_FIELDS) - OBJECT_CONSTRUCTORS(JSGlobalObject, JSObject); + OBJECT_CONSTRUCTORS(JSGlobalObject, JSSpecialObject); }; // Representation for JS Wrapper objects, String, Number, Boolean, etc. class JSPrimitiveWrapper - : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper, JSObject> { + : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper, + JSCustomElementsObject> { public: // Dispatched behavior. DECL_PRINTER(JSPrimitiveWrapper) @@ -1319,7 +1357,7 @@ class JSMessageObject : public JSObject { DECL_VERIFIER(JSMessageObject) DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSMESSAGE_OBJECT_FIELDS) + TORQUE_GENERATED_JS_MESSAGE_OBJECT_FIELDS) // TODO(v8:8989): [torque] Support marker constants. static const int kPointerFieldsEndOffset = kStartPositionOffset; @@ -1370,25 +1408,18 @@ class JSAsyncFromSyncIterator TQ_OBJECT_CONSTRUCTORS(JSAsyncFromSyncIterator) }; -class JSStringIterator : public JSObject { +class JSStringIterator + : public TorqueGeneratedJSStringIterator<JSStringIterator, JSObject> { public: // Dispatched behavior. DECL_PRINTER(JSStringIterator) DECL_VERIFIER(JSStringIterator) - DECL_CAST(JSStringIterator) - - // [string]: the [[IteratedString]] inobject property. - DECL_ACCESSORS(string, String) - // [index]: The [[StringIteratorNextIndex]] inobject property. inline int index() const; inline void set_index(int value); - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSSTRING_ITERATOR_FIELDS) - - OBJECT_CONSTRUCTORS(JSStringIterator, JSObject); + TQ_OBJECT_CONSTRUCTORS(JSStringIterator) }; } // namespace internal diff --git a/chromium/v8/src/objects/js-plural-rules.cc b/chromium/v8/src/objects/js-plural-rules.cc index 84fe9b6d52a..bf928416f41 100644 --- a/chromium/v8/src/objects/js-plural-rules.cc +++ b/chromium/v8/src/objects/js-plural-rules.cc @@ -241,17 +241,18 @@ Handle<JSObject> JSPluralRules::ResolvedOptions( JSNumberFormat::MinimumIntegerDigitsFromSkeleton(skeleton), "minimumIntegerDigits"); int32_t min = 0, max = 0; - JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max); - - CreateDataPropertyForOptions(isolate, options, min, "minimumFractionDigits"); - - CreateDataPropertyForOptions(isolate, options, max, "maximumFractionDigits"); if (JSNumberFormat::SignificantDigitsFromSkeleton(skeleton, &min, &max)) { CreateDataPropertyForOptions(isolate, options, min, "minimumSignificantDigits"); CreateDataPropertyForOptions(isolate, options, max, "maximumSignificantDigits"); + } else { + JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max); + CreateDataPropertyForOptions(isolate, options, min, + "minimumFractionDigits"); + CreateDataPropertyForOptions(isolate, options, max, + "maximumFractionDigits"); } // 6. Let pluralCategories be a List of Strings representing the diff --git a/chromium/v8/src/objects/js-plural-rules.h b/chromium/v8/src/objects/js-plural-rules.h index 840efb07ed4..0303266894c 100644 --- a/chromium/v8/src/objects/js-plural-rules.h +++ b/chromium/v8/src/objects/js-plural-rules.h @@ -68,7 +68,7 @@ class JSPluralRules : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSPLURAL_RULES_FIELDS) + TORQUE_GENERATED_JS_PLURAL_RULES_FIELDS) DECL_ACCESSORS(locale, String) DECL_INT_ACCESSORS(flags) diff --git a/chromium/v8/src/objects/js-proxy.h b/chromium/v8/src/objects/js-proxy.h index 8e29c08bc14..c6bb844fe57 100644 --- a/chromium/v8/src/objects/js-proxy.h +++ b/chromium/v8/src/objects/js-proxy.h @@ -128,7 +128,7 @@ class JSProxyRevocableResult : public JSObject { public: // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS( - JSObject::kHeaderSize, TORQUE_GENERATED_JSPROXY_REVOCABLE_RESULT_FIELDS) + JSObject::kHeaderSize, TORQUE_GENERATED_JS_PROXY_REVOCABLE_RESULT_FIELDS) // Indices of in-object properties. static const int kProxyIndex = 0; diff --git a/chromium/v8/src/objects/js-regexp-inl.h b/chromium/v8/src/objects/js-regexp-inl.h index b69d1cca975..885bc4804d7 100644 --- a/chromium/v8/src/objects/js-regexp-inl.h +++ b/chromium/v8/src/objects/js-regexp-inl.h @@ -7,6 +7,7 @@ #include "src/objects/js-regexp.h" +#include "src/objects/js-array-inl.h" #include "src/objects/objects-inl.h" // Needed for write barriers #include "src/objects/smi.h" #include "src/objects/string.h" @@ -18,9 +19,18 @@ namespace v8 { namespace internal { TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp) +OBJECT_CONSTRUCTORS_IMPL(JSRegExpResult, JSArray) +OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultIndices, JSArray) + +CAST_ACCESSOR(JSRegExpResult) +CAST_ACCESSOR(JSRegExpResultIndices) ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset) +ACCESSORS(JSRegExpResult, cached_indices_or_match_info, Object, + kCachedIndicesOrMatchInfoOffset) +ACCESSORS(JSRegExpResult, names, Object, kNamesOffset) + JSRegExp::Type JSRegExp::TypeTag() const { Object data = this->data(); if (data.IsUndefined()) return JSRegExp::NOT_COMPILED; diff --git a/chromium/v8/src/objects/js-regexp.cc b/chromium/v8/src/objects/js-regexp.cc new file mode 100644 index 00000000000..c7f96fe278c --- /dev/null +++ b/chromium/v8/src/objects/js-regexp.cc @@ -0,0 +1,118 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/js-regexp.h" + +#include "src/objects/js-array-inl.h" +#include "src/objects/js-regexp-inl.h" + +namespace v8 { +namespace internal { +Handle<JSArray> JSRegExpResult::GetAndCacheIndices( + Isolate* isolate, Handle<JSRegExpResult> regexp_result) { + // Check for cached indices. + Handle<Object> indices_or_match_info( + regexp_result->cached_indices_or_match_info(), isolate); + if (indices_or_match_info->IsRegExpMatchInfo()) { + // Build and cache indices for next lookup. + // TODO(joshualitt): Instead of caching the indices, we could call + // ReconfigureToDataProperty on 'indices' setting its value to this + // newly created array. However, care would have to be taken to ensure + // a new map is not created each time. + Handle<RegExpMatchInfo> match_info( + RegExpMatchInfo::cast(regexp_result->cached_indices_or_match_info()), + isolate); + Handle<Object> maybe_names(regexp_result->names(), isolate); + indices_or_match_info = + JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names); + + // Cache the result and clear the names array. + regexp_result->set_cached_indices_or_match_info(*indices_or_match_info); + regexp_result->set_names(ReadOnlyRoots(isolate).undefined_value()); + } + return Handle<JSArray>::cast(indices_or_match_info); +} + +Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices( + Isolate* isolate, Handle<RegExpMatchInfo> match_info, + Handle<Object> maybe_names) { + Handle<JSRegExpResultIndices> indices(Handle<JSRegExpResultIndices>::cast( + isolate->factory()->NewJSObjectFromMap( + isolate->regexp_result_indices_map()))); + + // Initialize indices length to avoid having a partially initialized object + // should GC be triggered by creating a NewFixedArray. + indices->set_length(Smi::kZero); + + // Build indices array from RegExpMatchInfo. + int num_indices = match_info->NumberOfCaptureRegisters(); + int num_results = num_indices >> 1; + Handle<FixedArray> indices_array = + isolate->factory()->NewFixedArray(num_results); + JSArray::SetContent(indices, indices_array); + + for (int i = 0; i < num_results; i++) { + int base_offset = i * 2; + int start_offset = match_info->Capture(base_offset); + int end_offset = match_info->Capture(base_offset + 1); + + // Any unmatched captures are set to undefined, otherwise we set them to a + // subarray of the indices. + if (start_offset == -1) { + indices_array->set(i, ReadOnlyRoots(isolate).undefined_value()); + } else { + Handle<FixedArray> indices_sub_array( + isolate->factory()->NewFixedArray(2)); + indices_sub_array->set(0, Smi::FromInt(start_offset)); + indices_sub_array->set(1, Smi::FromInt(end_offset)); + Handle<JSArray> indices_sub_jsarray = + isolate->factory()->NewJSArrayWithElements(indices_sub_array, + PACKED_SMI_ELEMENTS, 2); + indices_array->set(i, *indices_sub_jsarray); + } + } + + // If there are no capture groups, set the groups property to undefined. + FieldIndex groups_index = FieldIndex::ForDescriptor( + indices->map(), InternalIndex(kGroupsDescriptorIndex)); + if (maybe_names->IsUndefined(isolate)) { + indices->RawFastPropertyAtPut(groups_index, + ReadOnlyRoots(isolate).undefined_value()); + return indices; + } + + // Create a groups property which returns a dictionary of named captures to + // their corresponding capture indices. + Handle<FixedArray> names(Handle<FixedArray>::cast(maybe_names)); + int num_names = names->length() >> 1; + Handle<NameDictionary> group_names = NameDictionary::New(isolate, num_names); + for (int i = 0; i < num_names; i++) { + int base_offset = i * 2; + int name_offset = base_offset; + int index_offset = base_offset + 1; + Handle<String> name(String::cast(names->get(name_offset)), isolate); + Handle<Smi> smi_index(Smi::cast(names->get(index_offset)), isolate); + Handle<Object> capture_indices(indices_array->get(smi_index->value()), + isolate); + if (!capture_indices->IsUndefined(isolate)) { + capture_indices = Handle<JSArray>::cast(capture_indices); + } + group_names = NameDictionary::Add( + isolate, group_names, name, capture_indices, PropertyDetails::Empty()); + } + + // Convert group_names to a JSObject and store at the groups property of the + // result indices. + Handle<FixedArrayBase> elements = isolate->factory()->empty_fixed_array(); + Handle<HeapObject> null = + Handle<HeapObject>::cast(isolate->factory()->null_value()); + Handle<JSObject> js_group_names = + isolate->factory()->NewSlowJSObjectWithPropertiesAndElements( + null, group_names, elements); + indices->RawFastPropertyAtPut(groups_index, *js_group_names); + return indices; +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/objects/js-regexp.h b/chromium/v8/src/objects/js-regexp.h index b3ef06bd5cb..03efd4913c0 100644 --- a/chromium/v8/src/objects/js-regexp.h +++ b/chromium/v8/src/objects/js-regexp.h @@ -96,7 +96,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> { Handle<String> flags_string); bool MarkedForTierUp(); - void ResetTierUp(); + void ResetLastTierUpTick(); + void TierUpTick(); void MarkTierUpForNextExec(); inline Type TypeTag() const; @@ -176,9 +177,13 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> { // Maps names of named capture groups (at indices 2i) to their corresponding // (1-based) capture group indices (at indices 2i + 1). static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6; - static const int kIrregexpTierUpTicksIndex = kDataIndex + 7; + // Tier-up ticks are set to the value of the tier-up ticks flag. The value is + // decremented on each execution of the bytecode, so that the tier-up + // happens once the ticks reach zero. + // This value is ignored if the regexp-tier-up flag isn't turned on. + static const int kIrregexpTicksUntilTierUpIndex = kDataIndex + 7; - static const int kIrregexpDataSize = kIrregexpTierUpTicksIndex + 1; + static const int kIrregexpDataSize = kIrregexpTicksUntilTierUpIndex + 1; // In-object fields. static const int kLastIndexFieldIndex = 0; @@ -195,6 +200,10 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> { // The uninitialized value for a regexp code object. static const int kUninitializedValue = -1; + // The heuristic value for the length of the subject string for which we + // tier-up to the compiler immediately, instead of using the interpreter. + static constexpr int kTierUpForSubjectLengthValue = 1000; + TQ_OBJECT_CONSTRUCTORS(JSRegExp) }; @@ -208,18 +217,63 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags) // After creation the result must be treated as a JSArray in all regards. class JSRegExpResult : public JSArray { public: + DECL_CAST(JSRegExpResult) + + // TODO(joshualitt): We would like to add printers and verifiers to + // JSRegExpResult, and maybe JSRegExpResultIndices, but both have the same + // instance type as JSArray. + + // cached_indices_or_match_info and names, are used to construct the + // JSRegExpResultIndices returned from the indices property lazily. + DECL_ACCESSORS(cached_indices_or_match_info, Object) + DECL_ACCESSORS(names, Object) + // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kSize, - TORQUE_GENERATED_JSREG_EXP_RESULT_FIELDS) + TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS) + + static Handle<JSArray> GetAndCacheIndices( + Isolate* isolate, Handle<JSRegExpResult> regexp_result); // Indices of in-object properties. static const int kIndexIndex = 0; static const int kInputIndex = 1; static const int kGroupsIndex = 2; - static const int kInObjectPropertyCount = 3; - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult); + // Private internal only fields. + static const int kCachedIndicesOrMatchInfoIndex = 3; + static const int kNamesIndex = 4; + static const int kInObjectPropertyCount = 5; + + OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray); +}; + +// JSRegExpResultIndices is just a JSArray with a specific initial map. +// This initial map adds in-object properties for "group" +// properties, as assigned by RegExp.prototype.exec, which allows +// faster creation of RegExp exec results. +// This class just holds constants used when creating the result. +// After creation the result must be treated as a JSArray in all regards. +class JSRegExpResultIndices : public JSArray { + public: + DECL_CAST(JSRegExpResultIndices) + + // Layout description. + DEFINE_FIELD_OFFSET_CONSTANTS( + JSArray::kSize, TORQUE_GENERATED_JS_REG_EXP_RESULT_INDICES_FIELDS) + + static Handle<JSRegExpResultIndices> BuildIndices( + Isolate* isolate, Handle<RegExpMatchInfo> match_info, + Handle<Object> maybe_names); + + // Indices of in-object properties. + static const int kGroupsIndex = 0; + static const int kInObjectPropertyCount = 1; + + // Descriptor index of groups. + static const int kGroupsDescriptorIndex = 1; + + OBJECT_CONSTRUCTORS(JSRegExpResultIndices, JSArray); }; } // namespace internal diff --git a/chromium/v8/src/objects/js-relative-time-format.cc b/chromium/v8/src/objects/js-relative-time-format.cc index 28f8c757ee1..edf3e26c225 100644 --- a/chromium/v8/src/objects/js-relative-time-format.cc +++ b/chromium/v8/src/objects/js-relative-time-format.cc @@ -112,7 +112,8 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New( // 14. Let dataLocale be r.[[DataLocale]]. icu::Locale icu_locale = r.icu_locale; UErrorCode status = U_ZERO_ERROR; - if (numbering_system_str != nullptr) { + if (numbering_system_str != nullptr && + Intl::IsValidNumberingSystem(numbering_system_str.get())) { icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status); CHECK(U_SUCCESS(status)); } diff --git a/chromium/v8/src/objects/js-relative-time-format.h b/chromium/v8/src/objects/js-relative-time-format.h index 6e405e345e9..c6423679885 100644 --- a/chromium/v8/src/objects/js-relative-time-format.h +++ b/chromium/v8/src/objects/js-relative-time-format.h @@ -107,7 +107,7 @@ class JSRelativeTimeFormat : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSRELATIVE_TIME_FORMAT_FIELDS) + TORQUE_GENERATED_JS_RELATIVE_TIME_FORMAT_FIELDS) private: static Style getStyle(const char* str); diff --git a/chromium/v8/src/objects/js-segment-iterator.h b/chromium/v8/src/objects/js-segment-iterator.h index cadb99e79d2..1c71af88649 100644 --- a/chromium/v8/src/objects/js-segment-iterator.h +++ b/chromium/v8/src/objects/js-segment-iterator.h @@ -91,7 +91,7 @@ class JSSegmentIterator : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSSEGMENT_ITERATOR_FIELDS) + TORQUE_GENERATED_JS_SEGMENT_ITERATOR_FIELDS) OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject); }; diff --git a/chromium/v8/src/objects/js-segmenter.h b/chromium/v8/src/objects/js-segmenter.h index 641cf106fbd..209c4682b37 100644 --- a/chromium/v8/src/objects/js-segmenter.h +++ b/chromium/v8/src/objects/js-segmenter.h @@ -78,7 +78,7 @@ class JSSegmenter : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSSEGMENTER_FIELDS) + TORQUE_GENERATED_JS_SEGMENTER_FIELDS) private: static Granularity GetGranularity(const char* str); diff --git a/chromium/v8/src/objects/js-weak-refs-inl.h b/chromium/v8/src/objects/js-weak-refs-inl.h index 004ffd6d791..06351536112 100644 --- a/chromium/v8/src/objects/js-weak-refs-inl.h +++ b/chromium/v8/src/objects/js-weak-refs-inl.h @@ -17,38 +17,21 @@ namespace v8 { namespace internal { -OBJECT_CONSTRUCTORS_IMPL(WeakCell, HeapObject) -OBJECT_CONSTRUCTORS_IMPL(JSWeakRef, JSObject) +TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef) OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroup, JSObject) -OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator, JSObject) +TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationGroupCleanupIterator) ACCESSORS(JSFinalizationGroup, native_context, NativeContext, kNativeContextOffset) ACCESSORS(JSFinalizationGroup, cleanup, Object, kCleanupOffset) -ACCESSORS(JSFinalizationGroup, active_cells, Object, kActiveCellsOffset) -ACCESSORS(JSFinalizationGroup, cleared_cells, Object, kClearedCellsOffset) +ACCESSORS(JSFinalizationGroup, active_cells, HeapObject, kActiveCellsOffset) +ACCESSORS(JSFinalizationGroup, cleared_cells, HeapObject, kClearedCellsOffset) ACCESSORS(JSFinalizationGroup, key_map, Object, kKeyMapOffset) SMI_ACCESSORS(JSFinalizationGroup, flags, kFlagsOffset) ACCESSORS(JSFinalizationGroup, next, Object, kNextOffset) CAST_ACCESSOR(JSFinalizationGroup) -ACCESSORS(WeakCell, finalization_group, Object, kFinalizationGroupOffset) -ACCESSORS(WeakCell, target, HeapObject, kTargetOffset) -ACCESSORS(WeakCell, holdings, Object, kHoldingsOffset) -ACCESSORS(WeakCell, next, Object, kNextOffset) -ACCESSORS(WeakCell, prev, Object, kPrevOffset) -ACCESSORS(WeakCell, key, Object, kKeyOffset) -ACCESSORS(WeakCell, key_list_next, Object, kKeyListNextOffset) -ACCESSORS(WeakCell, key_list_prev, Object, kKeyListPrevOffset) -CAST_ACCESSOR(WeakCell) - -CAST_ACCESSOR(JSWeakRef) -ACCESSORS(JSWeakRef, target, HeapObject, kTargetOffset) - -ACCESSORS(JSFinalizationGroupCleanupIterator, finalization_group, - JSFinalizationGroup, kFinalizationGroupOffset) -CAST_ACCESSOR(JSFinalizationGroupCleanupIterator) - void JSFinalizationGroup::Register( Handle<JSFinalizationGroup> finalization_group, Handle<JSReceiver> target, Handle<Object> holdings, Handle<Object> key, Isolate* isolate) { @@ -101,7 +84,7 @@ bool JSFinalizationGroup::Unregister( Handle<ObjectHashTable> key_map = handle(ObjectHashTable::cast(finalization_group->key_map()), isolate); Object value = key_map->Lookup(unregister_token); - Object undefined = ReadOnlyRoots(isolate).undefined_value(); + HeapObject undefined = ReadOnlyRoots(isolate).undefined_value(); while (value.IsWeakCell()) { WeakCell weak_cell = WeakCell::cast(value); weak_cell.RemoveFromFinalizationGroupCells(isolate); diff --git a/chromium/v8/src/objects/js-weak-refs.h b/chromium/v8/src/objects/js-weak-refs.h index 723e0e31358..8d61b125a1f 100644 --- a/chromium/v8/src/objects/js-weak-refs.h +++ b/chromium/v8/src/objects/js-weak-refs.h @@ -28,8 +28,8 @@ class JSFinalizationGroup : public JSObject { DECL_ACCESSORS(native_context, NativeContext) DECL_ACCESSORS(cleanup, Object) - DECL_ACCESSORS(active_cells, Object) - DECL_ACCESSORS(cleared_cells, Object) + DECL_ACCESSORS(active_cells, HeapObject) + DECL_ACCESSORS(cleared_cells, HeapObject) DECL_ACCESSORS(key_map, Object) // For storing a list of JSFinalizationGroup objects in NativeContext. @@ -66,7 +66,7 @@ class JSFinalizationGroup : public JSObject { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS) + TORQUE_GENERATED_JS_FINALIZATION_GROUP_FIELDS) // Bitfields in flags. using ScheduledForCleanupField = BitField<bool, 0, 1>; @@ -75,32 +75,10 @@ class JSFinalizationGroup : public JSObject { }; // Internal object for storing weak references in JSFinalizationGroup. -class WeakCell : public HeapObject { +class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> { public: DECL_PRINTER(WeakCell) EXPORT_DECL_VERIFIER(WeakCell) - DECL_CAST(WeakCell) - - DECL_ACCESSORS(finalization_group, Object) - DECL_ACCESSORS(target, HeapObject) - DECL_ACCESSORS(holdings, Object) - - // For storing doubly linked lists of WeakCells in JSFinalizationGroup's - // "active_cells" and "cleared_cells" lists. - DECL_ACCESSORS(prev, Object) - DECL_ACCESSORS(next, Object) - - // For storing doubly linked lists of WeakCells per key in - // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its - // key, so that we can remove the key from the key_map when we remove the last - // WeakCell associated with it. - DECL_ACCESSORS(key, Object) - DECL_ACCESSORS(key_list_prev, Object) - DECL_ACCESSORS(key_list_next, Object) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_WEAK_CELL_FIELDS) class BodyDescriptor; @@ -115,40 +93,27 @@ class WeakCell : public HeapObject { inline void RemoveFromFinalizationGroupCells(Isolate* isolate); - OBJECT_CONSTRUCTORS(WeakCell, HeapObject); + TQ_OBJECT_CONSTRUCTORS(WeakCell) }; -class JSWeakRef : public JSObject { +class JSWeakRef : public TorqueGeneratedJSWeakRef<JSWeakRef, JSObject> { public: DECL_PRINTER(JSWeakRef) EXPORT_DECL_VERIFIER(JSWeakRef) - DECL_CAST(JSWeakRef) - - DECL_ACCESSORS(target, HeapObject) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, - TORQUE_GENERATED_JSWEAK_REF_FIELDS) class BodyDescriptor; - OBJECT_CONSTRUCTORS(JSWeakRef, JSObject); + TQ_OBJECT_CONSTRUCTORS(JSWeakRef) }; -class JSFinalizationGroupCleanupIterator : public JSObject { +class JSFinalizationGroupCleanupIterator + : public TorqueGeneratedJSFinalizationGroupCleanupIterator< + JSFinalizationGroupCleanupIterator, JSObject> { public: DECL_PRINTER(JSFinalizationGroupCleanupIterator) DECL_VERIFIER(JSFinalizationGroupCleanupIterator) - DECL_CAST(JSFinalizationGroupCleanupIterator) - - DECL_ACCESSORS(finalization_group, JSFinalizationGroup) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS( - JSObject::kHeaderSize, - TORQUE_GENERATED_JSFINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS) - OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject); + TQ_OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator) }; } // namespace internal diff --git a/chromium/v8/src/objects/keys.cc b/chromium/v8/src/objects/keys.cc index 7496399cad4..0231df18d35 100644 --- a/chromium/v8/src/objects/keys.cc +++ b/chromium/v8/src/objects/keys.cc @@ -279,9 +279,13 @@ void FastKeyAccumulator::Prepare() { is_receiver_simple_enum_ = false; has_empty_prototype_ = true; JSReceiver last_prototype; + may_have_elements_ = MayHaveElements(*receiver_); for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd(); iter.Advance()) { JSReceiver current = iter.GetCurrent<JSReceiver>(); + if (!may_have_elements_) { + may_have_elements_ = MayHaveElements(current); + } bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current); if (has_no_properties) continue; last_prototype = current; @@ -338,13 +342,12 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate, Handle<DescriptorArray> descriptors = Handle<DescriptorArray>(map->instance_descriptors(), isolate); isolate->counters()->enum_cache_misses()->Increment(); - int nod = map->NumberOfOwnDescriptors(); // Create the keys array. int index = 0; bool fields_only = true; keys = isolate->factory()->NewFixedArray(enum_length); - for (int i = 0; i < nod; i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { DisallowHeapAllocation no_gc; PropertyDetails details = descriptors->GetDetails(i); if (details.IsDontEnum()) continue; @@ -361,7 +364,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate, if (fields_only) { indices = isolate->factory()->NewFixedArray(enum_length); index = 0; - for (int i = 0; i < nod; i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { DisallowHeapAllocation no_gc; PropertyDetails details = descriptors->GetDetails(i); if (details.IsDontEnum()) continue; @@ -499,12 +502,21 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow( accumulator.set_is_for_in(is_for_in_); accumulator.set_skip_indices(skip_indices_); accumulator.set_last_non_empty_prototype(last_non_empty_prototype_); + accumulator.set_may_have_elements(may_have_elements_); MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_), MaybeHandle<FixedArray>()); return accumulator.GetKeys(keys_conversion); } +bool FastKeyAccumulator::MayHaveElements(JSReceiver receiver) { + if (!receiver.IsJSObject()) return true; + JSObject object = JSObject::cast(receiver); + if (object.HasEnumerableElements()) return true; + if (object.HasIndexedInterceptor()) return true; + return false; +} + namespace { enum IndexedOrNamed { kIndexed, kNamed }; @@ -518,13 +530,14 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties( uint32_t length = accessor->GetCapacity(*result, result->elements()); for (uint32_t i = 0; i < length; i++) { - if (!accessor->HasEntry(*result, i)) continue; + InternalIndex entry(i); + if (!accessor->HasEntry(*result, entry)) continue; // args are invalid after args.Call(), create a new one in every iteration. PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(), *receiver, *object, Just(kDontThrow)); - Handle<Object> element = accessor->Get(result, i); + Handle<Object> element = accessor->Get(result, entry); Handle<Object> attributes; if (type == kIndexed) { uint32_t number; @@ -624,7 +637,7 @@ base::Optional<int> CollectOwnPropertyNamesInternal( int first_skipped = -1; PropertyFilter filter = keys->filter(); KeyCollectionMode mode = keys->mode(); - for (int i = start_index; i < limit; i++) { + for (InternalIndex i : InternalIndex::Range(start_index, limit)) { bool is_shadowing_key = false; PropertyDetails details = descs->GetDetails(i); @@ -645,7 +658,7 @@ base::Optional<int> CollectOwnPropertyNamesInternal( Name key = descs->GetKey(i); if (skip_symbols == key.IsSymbol()) { - if (first_skipped == -1) first_skipped = i; + if (first_skipped == -1) first_skipped = i.as_int(); continue; } if (key.FilterKey(keys->filter())) continue; @@ -689,13 +702,15 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver, Map map = object->map(); int nof_descriptors = map.NumberOfOwnDescriptors(); if (enum_keys->length() != nof_descriptors) { - Handle<DescriptorArray> descs = - Handle<DescriptorArray>(map.instance_descriptors(), isolate_); - for (int i = 0; i < nof_descriptors; i++) { - PropertyDetails details = descs->GetDetails(i); - if (!details.IsDontEnum()) continue; - Object key = descs->GetKey(i); - this->AddShadowingKey(key); + if (map.prototype(isolate_) != ReadOnlyRoots(isolate_).null_value()) { + Handle<DescriptorArray> descs = + Handle<DescriptorArray>(map.instance_descriptors(), isolate_); + for (InternalIndex i : InternalIndex::Range(nof_descriptors)) { + PropertyDetails details = descs->GetDetails(i); + if (!details.IsDontEnum()) continue; + Object key = descs->GetKey(i); + this->AddShadowingKey(key); + } } } } else if (object->IsJSGlobalObject()) { @@ -823,7 +838,9 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver, return Just(true); } - MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>()); + if (may_have_elements_) { + MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>()); + } MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>()); return Just(true); } diff --git a/chromium/v8/src/objects/keys.h b/chromium/v8/src/objects/keys.h index 5d8632e2a77..4c2307a20b4 100644 --- a/chromium/v8/src/objects/keys.h +++ b/chromium/v8/src/objects/keys.h @@ -93,6 +93,7 @@ class KeyAccumulator final { void set_last_non_empty_prototype(Handle<JSReceiver> object) { last_non_empty_prototype_ = object; } + void set_may_have_elements(bool value) { may_have_elements_ = value; } // Shadowing keys are used to filter keys. This happens when non-enumerable // keys appear again on the prototype chain. void AddShadowingKey(Object key); @@ -125,6 +126,7 @@ class KeyAccumulator final { // For all the keys on the first receiver adding a shadowing key we can skip // the shadow check. bool skip_shadow_check_ = true; + bool may_have_elements_ = true; DISALLOW_COPY_AND_ASSIGN(KeyAccumulator); }; @@ -149,6 +151,7 @@ class FastKeyAccumulator { bool is_receiver_simple_enum() { return is_receiver_simple_enum_; } bool has_empty_prototype() { return has_empty_prototype_; } + bool may_have_elements() { return may_have_elements_; } MaybeHandle<FixedArray> GetKeys( GetKeysConversion convert = GetKeysConversion::kKeepNumbers); @@ -160,6 +163,8 @@ class FastKeyAccumulator { MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(); + bool MayHaveElements(JSReceiver receiver); + Isolate* isolate_; Handle<JSReceiver> receiver_; Handle<JSReceiver> last_non_empty_prototype_; @@ -169,6 +174,7 @@ class FastKeyAccumulator { bool skip_indices_ = false; bool is_receiver_simple_enum_ = false; bool has_empty_prototype_ = false; + bool may_have_elements_ = true; DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator); }; diff --git a/chromium/v8/src/objects/layout-descriptor-inl.h b/chromium/v8/src/objects/layout-descriptor-inl.h index ad0a058a92c..30fe1321293 100644 --- a/chromium/v8/src/objects/layout-descriptor-inl.h +++ b/chromium/v8/src/objects/layout-descriptor-inl.h @@ -169,7 +169,7 @@ int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors, } else { layout_descriptor_length = 0; - for (int i = 0; i < num_descriptors; i++) { + for (InternalIndex i : InternalIndex::Range(num_descriptors)) { PropertyDetails details = descriptors.GetDetails(i); if (!InobjectUnboxedField(inobject_properties, details)) continue; int field_index = details.field_index(); @@ -188,7 +188,7 @@ LayoutDescriptor LayoutDescriptor::Initialize( DisallowHeapAllocation no_allocation; int inobject_properties = map.GetInObjectProperties(); - for (int i = 0; i < num_descriptors; i++) { + for (InternalIndex i : InternalIndex::Range(num_descriptors)) { PropertyDetails details = descriptors.GetDetails(i); if (!InobjectUnboxedField(inobject_properties, details)) { DCHECK(details.location() != kField || diff --git a/chromium/v8/src/objects/layout-descriptor.cc b/chromium/v8/src/objects/layout-descriptor.cc index 76421aaf4f5..2b588a58bf1 100644 --- a/chromium/v8/src/objects/layout-descriptor.cc +++ b/chromium/v8/src/objects/layout-descriptor.cc @@ -258,9 +258,8 @@ LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map, bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) { if (FLAG_unbox_double_fields) { DescriptorArray descriptors = map.instance_descriptors(); - int nof_descriptors = map.NumberOfOwnDescriptors(); int last_field_index = 0; - for (int i = 0; i < nof_descriptors; i++) { + for (InternalIndex i : map.IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(i); if (details.location() != kField) continue; FieldIndex field_index = FieldIndex::ForDescriptor(map, i); diff --git a/chromium/v8/src/objects/literal-objects.cc b/chromium/v8/src/objects/literal-objects.cc index 95beb6cbdb6..98c41cbfb5f 100644 --- a/chromium/v8/src/objects/literal-objects.cc +++ b/chromium/v8/src/objects/literal-objects.cc @@ -31,11 +31,11 @@ void AddToDescriptorArrayTemplate( Isolate* isolate, Handle<DescriptorArray> descriptor_array_template, Handle<Name> name, ClassBoilerplate::ValueKind value_kind, Handle<Object> value) { - int entry = descriptor_array_template->Search( + InternalIndex entry = descriptor_array_template->Search( *name, descriptor_array_template->number_of_descriptors()); // TODO(ishell): deduplicate properties at AST level, this will allow us to // avoid creation of closures that will be overwritten anyway. - if (entry == DescriptorArray::kNotFound) { + if (entry.is_not_found()) { // Entry not found, add new one. Descriptor d; if (value_kind == ClassBoilerplate::kData) { @@ -412,8 +412,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate( ObjectDescriptor static_desc(kMinimumClassPropertiesCount); ObjectDescriptor instance_desc(kMinimumPrototypePropertiesCount); - for (int i = 0; i < expr->properties()->length(); i++) { - ClassLiteral::Property* property = expr->properties()->at(i); + for (int i = 0; i < expr->public_members()->length(); i++) { + ClassLiteral::Property* property = expr->public_members()->at(i); ObjectDescriptor& desc = property->is_static() ? static_desc : instance_desc; if (property->is_computed_name()) { @@ -477,14 +477,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate( // int dynamic_argument_index = ClassBoilerplate::kFirstDynamicArgumentIndex; - for (int i = 0; i < expr->properties()->length(); i++) { - ClassLiteral::Property* property = expr->properties()->at(i); - - // Private members are not processed using the class boilerplate. - if (property->is_private()) { - continue; - } - + for (int i = 0; i < expr->public_members()->length(); i++) { + ClassLiteral::Property* property = expr->public_members()->at(i); ClassBoilerplate::ValueKind value_kind; switch (property->kind()) { case ClassLiteral::Property::METHOD: diff --git a/chromium/v8/src/objects/lookup-inl.h b/chromium/v8/src/objects/lookup-inl.h index 648398be5ef..49a42e71313 100644 --- a/chromium/v8/src/objects/lookup-inl.h +++ b/chromium/v8/src/objects/lookup-inl.h @@ -10,6 +10,7 @@ #include "src/handles/handles-inl.h" #include "src/heap/factory-inl.h" #include "src/objects/api-callbacks.h" +#include "src/objects/internal-index.h" #include "src/objects/map-inl.h" #include "src/objects/name-inl.h" #include "src/objects/objects-inl.h" @@ -136,11 +137,11 @@ void LookupIterator::UpdateProtector() { } } -int LookupIterator::descriptor_number() const { +InternalIndex LookupIterator::descriptor_number() const { DCHECK(!IsElement()); DCHECK(has_property_); DCHECK(holder_->HasFastProperties(isolate_)); - return number_; + return InternalIndex(number_); } int LookupIterator::dictionary_entry() const { diff --git a/chromium/v8/src/objects/lookup.cc b/chromium/v8/src/objects/lookup.cc index 4646b71a9ec..7f626cc2233 100644 --- a/chromium/v8/src/objects/lookup.cc +++ b/chromium/v8/src/objects/lookup.cc @@ -249,10 +249,10 @@ void LookupIterator::InternalUpdateProtector() { } if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) && - !isolate_->IsPromiseSpeciesLookupChainIntact() && + !Protectors::IsPromiseSpeciesLookupChainIntact(isolate_) && !Protectors::IsRegExpSpeciesLookupChainProtectorIntact( native_context) && - !isolate_->IsTypedArraySpeciesLookupChainIntact()) { + !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) { return; } // Setting the constructor property could change an instance's @@species @@ -263,8 +263,8 @@ void LookupIterator::InternalUpdateProtector() { Protectors::InvalidateArraySpeciesLookupChain(isolate_); return; } else if (receiver->IsJSPromise(isolate_)) { - if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return; - isolate_->InvalidatePromiseSpeciesProtector(); + if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidatePromiseSpeciesLookupChain(isolate_); return; } else if (receiver->IsJSRegExp(isolate_)) { if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact( @@ -275,8 +275,8 @@ void LookupIterator::InternalUpdateProtector() { native_context); return; } else if (receiver->IsJSTypedArray(isolate_)) { - if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return; - isolate_->InvalidateTypedArraySpeciesProtector(); + if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_); return; } if (receiver->map(isolate_).is_prototype_map()) { @@ -294,8 +294,8 @@ void LookupIterator::InternalUpdateProtector() { Protectors::InvalidateArraySpeciesLookupChain(isolate_); } else if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_PROTOTYPE_INDEX)) { - if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return; - isolate_->InvalidatePromiseSpeciesProtector(); + if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidatePromiseSpeciesLookupChain(isolate_); } else if (isolate_->IsInAnyContext(*receiver, Context::REGEXP_PROTOTYPE_INDEX)) { if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact( @@ -307,8 +307,8 @@ void LookupIterator::InternalUpdateProtector() { } else if (isolate_->IsInAnyContext( receiver->map(isolate_).prototype(isolate_), Context::TYPED_ARRAY_PROTOTYPE_INDEX)) { - if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return; - isolate_->InvalidateTypedArraySpeciesProtector(); + if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_); } } } else if (*name_ == roots.next_string()) { @@ -317,26 +317,26 @@ void LookupIterator::InternalUpdateProtector() { *receiver, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) { // Setting the next property of %ArrayIteratorPrototype% also needs to // invalidate the array iterator protector. - if (!isolate_->IsArrayIteratorLookupChainIntact()) return; - isolate_->InvalidateArrayIteratorProtector(); + if (!Protectors::IsArrayIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateArrayIteratorLookupChain(isolate_); } else if (receiver->IsJSMapIterator() || isolate_->IsInAnyContext( *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) { - if (!isolate_->IsMapIteratorLookupChainIntact()) return; - isolate_->InvalidateMapIteratorProtector(); + if (!Protectors::IsMapIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateMapIteratorLookupChain(isolate_); } else if (receiver->IsJSSetIterator() || isolate_->IsInAnyContext( *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) { - if (!isolate_->IsSetIteratorLookupChainIntact()) return; - isolate_->InvalidateSetIteratorProtector(); + if (!Protectors::IsSetIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateSetIteratorLookupChain(isolate_); } else if (receiver->IsJSStringIterator() || isolate_->IsInAnyContext( *receiver, Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) { // Setting the next property of %StringIteratorPrototype% invalidates the // string iterator protector. - if (!isolate_->IsStringIteratorLookupChainIntact()) return; - isolate_->InvalidateStringIteratorProtector(); + if (!Protectors::IsStringIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateStringIteratorLookupChain(isolate_); } } else if (*name_ == roots.species_symbol()) { // Fetching the context in here since the operation is rather expensive. @@ -345,10 +345,10 @@ void LookupIterator::InternalUpdateProtector() { } if (!Protectors::IsArraySpeciesLookupChainIntact(isolate_) && - !isolate_->IsPromiseSpeciesLookupChainIntact() && + !Protectors::IsPromiseSpeciesLookupChainIntact(isolate_) && !Protectors::IsRegExpSpeciesLookupChainProtectorIntact( native_context) && - !isolate_->IsTypedArraySpeciesLookupChainIntact()) { + !Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) { return; } // Setting the Symbol.species property of any Array, Promise or TypedArray @@ -360,8 +360,8 @@ void LookupIterator::InternalUpdateProtector() { Protectors::InvalidateArraySpeciesLookupChain(isolate_); } else if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) { - if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return; - isolate_->InvalidatePromiseSpeciesProtector(); + if (!Protectors::IsPromiseSpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidatePromiseSpeciesLookupChain(isolate_); } else if (isolate_->IsInAnyContext(*receiver, Context::REGEXP_FUNCTION_INDEX)) { if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact( @@ -371,37 +371,37 @@ void LookupIterator::InternalUpdateProtector() { Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate_, native_context); } else if (IsTypedArrayFunctionInAnyContext(isolate_, *receiver)) { - if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return; - isolate_->InvalidateTypedArraySpeciesProtector(); + if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate_)) return; + Protectors::InvalidateTypedArraySpeciesLookupChain(isolate_); } } else if (*name_ == roots.is_concat_spreadable_symbol()) { - if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return; - isolate_->InvalidateIsConcatSpreadableProtector(); + if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate_)) return; + Protectors::InvalidateIsConcatSpreadableLookupChain(isolate_); } else if (*name_ == roots.iterator_symbol()) { if (receiver->IsJSArray(isolate_)) { - if (!isolate_->IsArrayIteratorLookupChainIntact()) return; - isolate_->InvalidateArrayIteratorProtector(); + if (!Protectors::IsArrayIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateArrayIteratorLookupChain(isolate_); } else if (receiver->IsJSSet(isolate_) || receiver->IsJSSetIterator() || isolate_->IsInAnyContext( *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX) || isolate_->IsInAnyContext(*receiver, Context::INITIAL_SET_PROTOTYPE_INDEX)) { - if (isolate_->IsSetIteratorLookupChainIntact()) { - isolate_->InvalidateSetIteratorProtector(); + if (Protectors::IsSetIteratorLookupChainIntact(isolate_)) { + Protectors::InvalidateSetIteratorLookupChain(isolate_); } } else if (receiver->IsJSMapIterator() || isolate_->IsInAnyContext( *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) { - if (isolate_->IsMapIteratorLookupChainIntact()) { - isolate_->InvalidateMapIteratorProtector(); + if (Protectors::IsMapIteratorLookupChainIntact(isolate_)) { + Protectors::InvalidateMapIteratorLookupChain(isolate_); } } else if (isolate_->IsInAnyContext( *receiver, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) { - if (isolate_->IsMapIteratorLookupChainIntact()) { - isolate_->InvalidateMapIteratorProtector(); + if (Protectors::IsMapIteratorLookupChainIntact(isolate_)) { + Protectors::InvalidateMapIteratorLookupChain(isolate_); } - if (isolate_->IsSetIteratorLookupChainIntact()) { - isolate_->InvalidateSetIteratorProtector(); + if (Protectors::IsSetIteratorLookupChainIntact(isolate_)) { + Protectors::InvalidateSetIteratorLookupChain(isolate_); } } else if (isolate_->IsInAnyContext( *receiver, Context::INITIAL_STRING_PROTOTYPE_INDEX)) { @@ -409,18 +409,18 @@ void LookupIterator::InternalUpdateProtector() { // the string iterator protector. Symbol.iterator can also be set on a // String wrapper, but not on a primitive string. We only support // protector for primitive strings. - if (!isolate_->IsStringIteratorLookupChainIntact()) return; - isolate_->InvalidateStringIteratorProtector(); + if (!Protectors::IsStringIteratorLookupChainIntact(isolate_)) return; + Protectors::InvalidateStringIteratorLookupChain(isolate_); } } else if (*name_ == roots.resolve_string()) { - if (!isolate_->IsPromiseResolveLookupChainIntact()) return; + if (!Protectors::IsPromiseResolveLookupChainIntact(isolate_)) return; // Setting the "resolve" property on any %Promise% intrinsic object // invalidates the Promise.resolve protector. if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) { - isolate_->InvalidatePromiseResolveProtector(); + Protectors::InvalidatePromiseResolveLookupChain(isolate_); } } else if (*name_ == roots.then_string()) { - if (!isolate_->IsPromiseThenLookupChainIntact()) return; + if (!Protectors::IsPromiseThenLookupChainIntact(isolate_)) return; // Setting the "then" property on any JSPromise instance or on the // initial %PromisePrototype% invalidates the Promise#then protector. // Also setting the "then" property on the initial %ObjectPrototype% @@ -432,7 +432,7 @@ void LookupIterator::InternalUpdateProtector() { isolate_->IsInAnyContext(*receiver, Context::INITIAL_OBJECT_PROTOTYPE_INDEX) || isolate_->IsInAnyContext(*receiver, Context::PROMISE_PROTOTYPE_INDEX)) { - isolate_->InvalidatePromiseThenProtector(); + Protectors::InvalidatePromiseThenLookupChain(isolate_); } } } @@ -534,7 +534,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value, DCHECK(attributes != NONE || !holder_obj->HasFastElements(isolate_)); Handle<FixedArrayBase> elements(holder_obj->elements(isolate_), isolate()); holder_obj->GetElementsAccessor(isolate_)->Reconfigure( - holder_obj, elements, number_, value, attributes); + holder_obj, elements, InternalIndex(number_), value, attributes); ReloadPropertyInformation<true>(); } else if (holder_obj->HasFastProperties(isolate_)) { Handle<Map> old_map(holder_obj->map(isolate_), isolate_); @@ -699,8 +699,7 @@ void LookupIterator::ApplyTransitionToDataProperty( } if (simple_transition) { - int number = transition->LastAdded(); - number_ = static_cast<uint32_t>(number); + number_ = transition->LastAdded().as_uint32(); property_details_ = transition->GetLastDescriptorDetails(isolate_); state_ = DATA; } else if (receiver->map(isolate_).is_dictionary_map()) { @@ -731,7 +730,7 @@ void LookupIterator::Delete() { if (IsElement()) { Handle<JSObject> object = Handle<JSObject>::cast(holder); ElementsAccessor* accessor = object->GetElementsAccessor(isolate_); - accessor->Delete(object, number_); + accessor->Delete(object, InternalIndex(number_)); } else { DCHECK(!name()->IsPrivateName(isolate_)); bool is_prototype_map = holder->map(isolate_).is_prototype_map(); @@ -777,8 +776,11 @@ void LookupIterator::TransitionToAccessorProperty( } else if (state_ == INTERCEPTOR) { LookupInRegularHolder<false>(*old_map, *holder_); } - int descriptor = - IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound; + // TODO(jkummerow): {IsFound()} should be enough once {number_} has type + // {InternalIndex}. + InternalIndex descriptor = (IsFound() && number_ != kMaxUInt32) + ? InternalIndex(number_) + : InternalIndex::NotFound(); Handle<Map> new_map = Map::TransitionToAccessorProperty( isolate_, old_map, name_, descriptor, getter, setter, attributes); @@ -787,8 +789,7 @@ void LookupIterator::TransitionToAccessorProperty( JSObject::MigrateToMap(isolate_, receiver, new_map); if (simple_transition) { - int number = new_map->LastAdded(); - number_ = static_cast<uint32_t>(number); + number_ = new_map->LastAdded().as_uint32(); property_details_ = new_map->GetLastDescriptorDetails(isolate_); state_ = ACCESSOR; return; @@ -894,23 +895,24 @@ Handle<Object> LookupIterator::FetchValue() const { if (IsElement()) { Handle<JSObject> holder = GetHolder<JSObject>(); ElementsAccessor* accessor = holder->GetElementsAccessor(isolate_); - return accessor->Get(holder, number_); + return accessor->Get(holder, InternalIndex(number_)); } else if (holder_->IsJSGlobalObject(isolate_)) { Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>(); result = holder->global_dictionary(isolate_).ValueAt(isolate_, number_); } else if (!holder_->HasFastProperties(isolate_)) { - result = holder_->property_dictionary(isolate_).ValueAt(isolate_, number_); + result = holder_->property_dictionary(isolate_).ValueAt(isolate_, + dictionary_entry()); } else if (property_details_.location() == kField) { DCHECK_EQ(kData, property_details_.kind()); Handle<JSObject> holder = GetHolder<JSObject>(); FieldIndex field_index = - FieldIndex::ForDescriptor(holder->map(isolate_), number_); + FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number()); return JSObject::FastPropertyAt(holder, property_details_.representation(), field_index); } else { result = holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue( - isolate_, number_); + isolate_, descriptor_number()); } return handle(result, isolate_); } @@ -922,7 +924,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const { DCHECK_EQ(PropertyConstness::kConst, property_details_.constness()); Handle<JSObject> holder = GetHolder<JSObject>(); FieldIndex field_index = - FieldIndex::ForDescriptor(holder->map(isolate_), number_); + FieldIndex::ForDescriptor(holder->map(isolate_), descriptor_number()); if (property_details_.representation().IsDouble()) { if (!value.IsNumber(isolate_)) return false; uint64_t bits; @@ -958,7 +960,8 @@ int LookupIterator::GetFieldDescriptorIndex() const { DCHECK(holder_->HasFastProperties()); DCHECK_EQ(kField, property_details_.location()); DCHECK_EQ(kData, property_details_.kind()); - return descriptor_number(); + // TODO(jkummerow): Propagate InternalIndex further. + return descriptor_number().as_int(); } int LookupIterator::GetAccessorIndex() const { @@ -966,7 +969,7 @@ int LookupIterator::GetAccessorIndex() const { DCHECK(holder_->HasFastProperties(isolate_)); DCHECK_EQ(kDescriptor, property_details_.location()); DCHECK_EQ(kAccessor, property_details_.kind()); - return descriptor_number(); + return descriptor_number().as_int(); } Handle<Map> LookupIterator::GetFieldOwnerMap() const { @@ -1028,7 +1031,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value, if (IsElement()) { Handle<JSObject> object = Handle<JSObject>::cast(holder); ElementsAccessor* accessor = object->GetElementsAccessor(isolate_); - accessor->Set(object, number_, *value); + accessor->Set(object, InternalIndex(number_), *value); } else if (holder->HasFastProperties(isolate_)) { if (property_details_.location() == kField) { // Check that in case of VariableMode::kConst field the existing value is @@ -1164,13 +1167,15 @@ LookupIterator::State LookupIterator::LookupInRegularHolder( JSObject js_object = JSObject::cast(holder); ElementsAccessor* accessor = js_object.GetElementsAccessor(isolate_); FixedArrayBase backing_store = js_object.elements(isolate_); - number_ = + // TODO(jkummerow): {number_} should have type InternalIndex. + InternalIndex entry = accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_); + number_ = entry.is_found() ? entry.as_uint32() : kMaxUInt32; if (number_ == kMaxUInt32) { return holder.IsJSTypedArray(isolate_) ? INTEGER_INDEXED_EXOTIC : NOT_FOUND; } - property_details_ = accessor->GetDetails(js_object, number_); + property_details_ = accessor->GetDetails(js_object, InternalIndex(number_)); if (map.has_frozen_elements()) { property_details_ = property_details_.CopyAddAttributes(FROZEN); } else if (map.has_sealed_elements()) { @@ -1178,10 +1183,10 @@ LookupIterator::State LookupIterator::LookupInRegularHolder( } } else if (!map.is_dictionary_map()) { DescriptorArray descriptors = map.instance_descriptors(isolate_); - int number = descriptors.SearchWithCache(isolate_, *name_, map); - if (number == DescriptorArray::kNotFound) return NotFound(holder); - number_ = static_cast<uint32_t>(number); - property_details_ = descriptors.GetDetails(number_); + InternalIndex number = descriptors.SearchWithCache(isolate_, *name_, map); + if (number.is_not_found()) return NotFound(holder); + number_ = number.as_uint32(); + property_details_ = descriptors.GetDetails(InternalIndex(number_)); } else { DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_)); NameDictionary dict = holder.property_dictionary(isolate_); diff --git a/chromium/v8/src/objects/lookup.h b/chromium/v8/src/objects/lookup.h index 565ea4bb75b..2a1f0e2f1bb 100644 --- a/chromium/v8/src/objects/lookup.h +++ b/chromium/v8/src/objects/lookup.h @@ -241,7 +241,7 @@ class V8_EXPORT_PRIVATE LookupIterator final { bool check_interceptor() const { return (configuration_ & kInterceptor) != 0; } - inline int descriptor_number() const; + inline InternalIndex descriptor_number() const; inline int dictionary_entry() const; static inline Configuration ComputeConfiguration(Isolate* isolate, diff --git a/chromium/v8/src/objects/map-inl.h b/chromium/v8/src/objects/map-inl.h index 48bb86e2dab..557c0044018 100644 --- a/chromium/v8/src/objects/map-inl.h +++ b/chromium/v8/src/objects/map-inl.h @@ -112,7 +112,7 @@ bool Map::IsMostGeneralFieldType(Representation representation, bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) { return instance_type == JS_ARRAY_TYPE || instance_type == JS_PRIMITIVE_WRAPPER_TYPE || - instance_type == JS_ARGUMENTS_TYPE; + instance_type == JS_ARGUMENTS_OBJECT_TYPE; } bool Map::CanHaveFastTransitionableElementsKind() const { @@ -177,10 +177,10 @@ PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const { return instance_descriptors(isolate).GetDetails(LastAdded()); } -int Map::LastAdded() const { +InternalIndex Map::LastAdded() const { int number_of_own_descriptors = NumberOfOwnDescriptors(); DCHECK_GT(number_of_own_descriptors, 0); - return number_of_own_descriptors - 1; + return InternalIndex(number_of_own_descriptors - 1); } int Map::NumberOfOwnDescriptors() const { @@ -194,6 +194,10 @@ void Map::SetNumberOfOwnDescriptors(int number) { set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number)); } +InternalIndex::Range Map::IterateOwnDescriptors() const { + return InternalIndex::Range(NumberOfOwnDescriptors()); +} + int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); } void Map::SetEnumLength(int length) { @@ -207,7 +211,8 @@ void Map::SetEnumLength(int length) { FixedArrayBase Map::GetInitialElements() const { FixedArrayBase result; - if (has_fast_elements() || has_fast_string_wrapper_elements()) { + if (has_fast_elements() || has_fast_string_wrapper_elements() || + has_any_nonextensible_elements()) { result = GetReadOnlyRoots().empty_fixed_array(); } else if (has_fast_sloppy_arguments_elements()) { result = GetReadOnlyRoots().empty_sloppy_arguments_elements(); @@ -540,12 +545,12 @@ void Map::mark_unstable() { bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); } bool Map::CanBeDeprecated() const { - int descriptor = LastAdded(); - for (int i = 0; i <= descriptor; i++) { + for (InternalIndex i : IterateOwnDescriptors()) { PropertyDetails details = instance_descriptors().GetDetails(i); if (details.representation().IsNone()) return true; if (details.representation().IsSmi()) return true; - if (details.representation().IsDouble()) return true; + if (details.representation().IsDouble() && FLAG_unbox_double_fields) + return true; if (details.representation().IsHeapObject()) return true; if (details.kind() == kData && details.location() == kDescriptor) { return true; @@ -584,7 +589,7 @@ bool Map::IsNullOrUndefinedMap() const { } bool Map::IsPrimitiveMap() const { - return instance_type() <= LAST_PRIMITIVE_TYPE; + return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE; } LayoutDescriptor Map::layout_descriptor_gc_safe() const { @@ -675,8 +680,10 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) { // barrier. descriptors.Append(desc); SetNumberOfOwnDescriptors(number_of_own_descriptors + 1); +#ifndef V8_DISABLE_WRITE_BARRIERS MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors, number_of_own_descriptors + 1); +#endif } // Properly mark the map if the {desc} is an "interesting symbol". if (desc->GetKey()->IsInterestingSymbol()) { diff --git a/chromium/v8/src/objects/map-updater.cc b/chromium/v8/src/objects/map-updater.cc index 49b9ccea91f..8c9b94014f8 100644 --- a/chromium/v8/src/objects/map-updater.cc +++ b/chromium/v8/src/objects/map-updater.cc @@ -38,12 +38,12 @@ MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map) !old_map->FindRootMap(isolate).GetConstructor().IsFunctionTemplateInfo()); } -Name MapUpdater::GetKey(int descriptor) const { +Name MapUpdater::GetKey(InternalIndex descriptor) const { return old_descriptors_->GetKey(descriptor); } -PropertyDetails MapUpdater::GetDetails(int descriptor) const { - DCHECK_LE(0, descriptor); +PropertyDetails MapUpdater::GetDetails(InternalIndex descriptor) const { + DCHECK(descriptor.is_found()); if (descriptor == modified_descriptor_) { PropertyAttributes attributes = new_attributes_; // If the original map was sealed or frozen, let us used the old @@ -59,8 +59,8 @@ PropertyDetails MapUpdater::GetDetails(int descriptor) const { return old_descriptors_->GetDetails(descriptor); } -Object MapUpdater::GetValue(int descriptor) const { - DCHECK_LE(0, descriptor); +Object MapUpdater::GetValue(InternalIndex descriptor) const { + DCHECK(descriptor.is_found()); if (descriptor == modified_descriptor_) { DCHECK_EQ(kDescriptor, new_location_); return *new_value_; @@ -69,8 +69,8 @@ Object MapUpdater::GetValue(int descriptor) const { return old_descriptors_->GetStrongValue(descriptor); } -FieldType MapUpdater::GetFieldType(int descriptor) const { - DCHECK_LE(0, descriptor); +FieldType MapUpdater::GetFieldType(InternalIndex descriptor) const { + DCHECK(descriptor.is_found()); if (descriptor == modified_descriptor_) { DCHECK_EQ(kField, new_location_); return *new_field_type_; @@ -80,9 +80,9 @@ FieldType MapUpdater::GetFieldType(int descriptor) const { } Handle<FieldType> MapUpdater::GetOrComputeFieldType( - int descriptor, PropertyLocation location, + InternalIndex descriptor, PropertyLocation location, Representation representation) const { - DCHECK_LE(0, descriptor); + DCHECK(descriptor.is_found()); // |location| is just a pre-fetched GetDetails(descriptor).location(). DCHECK_EQ(location, GetDetails(descriptor).location()); if (location == kField) { @@ -93,7 +93,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType( } Handle<FieldType> MapUpdater::GetOrComputeFieldType( - Handle<DescriptorArray> descriptors, int descriptor, + Handle<DescriptorArray> descriptors, InternalIndex descriptor, PropertyLocation location, Representation representation) { // |location| is just a pre-fetched GetDetails(descriptor).location(). DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location); @@ -105,13 +105,13 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType( } } -Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor, +Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor, PropertyAttributes attributes, PropertyConstness constness, Representation representation, Handle<FieldType> field_type) { DCHECK_EQ(kInitialized, state_); - DCHECK_LE(0, descriptor); + DCHECK(descriptor.is_found()); DCHECK(!old_map_->is_dictionary_map()); modified_descriptor_ = descriptor; new_kind_ = kData; @@ -190,7 +190,7 @@ Handle<Map> MapUpdater::Update() { return result_map_; } -void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index, +void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index, PropertyConstness new_constness, Representation new_representation, Handle<FieldType> new_field_type) { @@ -338,7 +338,8 @@ MapUpdater::State MapUpdater::FindRootMap() { } int root_nof = root_map_->NumberOfOwnDescriptors(); - if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) { + if (modified_descriptor_.is_found() && + modified_descriptor_.as_int() < root_nof) { PropertyDetails old_details = old_descriptors_->GetDetails(modified_descriptor_); if (old_details.kind() != new_kind_ || @@ -374,7 +375,7 @@ MapUpdater::State MapUpdater::FindTargetMap() { target_map_ = root_map_; int root_nof = root_map_->NumberOfOwnDescriptors(); - for (int i = root_nof; i < old_nof_; ++i) { + for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) { PropertyDetails old_details = GetDetails(i); Map transition = TransitionsAccessor(isolate_, target_map_) .SearchTransition(GetKey(i), old_details.kind(), @@ -423,7 +424,7 @@ MapUpdater::State MapUpdater::FindTargetMap() { int target_nof = target_map_->NumberOfOwnDescriptors(); if (target_nof == old_nof_) { #ifdef DEBUG - if (modified_descriptor_ >= 0) { + if (modified_descriptor_.is_found()) { DescriptorArray target_descriptors = target_map_->instance_descriptors(); PropertyDetails details = target_descriptors.GetDetails(modified_descriptor_); @@ -465,7 +466,7 @@ MapUpdater::State MapUpdater::FindTargetMap() { } // Find the last compatible target map in the transition tree. - for (int i = target_nof; i < old_nof_; ++i) { + for (InternalIndex i : InternalIndex::Range(target_nof, old_nof_)) { PropertyDetails old_details = GetDetails(i); Map transition = TransitionsAccessor(isolate_, target_map_) .SearchTransition(GetKey(i), old_details.kind(), @@ -521,7 +522,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() { // general than we requested. Take |root_nof| entries as is. // 0 -> |root_nof| int current_offset = 0; - for (int i = 0; i < root_nof; ++i) { + for (InternalIndex i : InternalIndex::Range(root_nof)) { PropertyDetails old_details = old_descriptors_->GetDetails(i); if (old_details.location() == kField) { current_offset += old_details.field_width_in_words(); @@ -534,7 +535,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() { // Merge "updated" old_descriptor entries with target_descriptor entries. // |root_nof| -> |target_nof| - for (int i = root_nof; i < target_nof; ++i) { + for (InternalIndex i : InternalIndex::Range(root_nof, target_nof)) { Handle<Name> key(GetKey(i), isolate_); PropertyDetails old_details = GetDetails(i); PropertyDetails target_details = target_descriptors->GetDetails(i); @@ -606,7 +607,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() { // Take "updated" old_descriptor entries. // |target_nof| -> |old_nof| - for (int i = target_nof; i < old_nof_; ++i) { + for (InternalIndex i : InternalIndex::Range(target_nof, old_nof_)) { PropertyDetails old_details = GetDetails(i); Handle<Name> key(GetKey(i), isolate_); @@ -665,7 +666,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) { int root_nof = root_map_->NumberOfOwnDescriptors(); Map current = *root_map_; - for (int i = root_nof; i < old_nof_; i++) { + for (InternalIndex i : InternalIndex::Range(root_nof, old_nof_)) { Name name = descriptors->GetKey(i); PropertyDetails details = descriptors->GetDetails(i); Map next = @@ -707,13 +708,13 @@ MapUpdater::State MapUpdater::ConstructNewMap() { state_ = kAtIntegrityLevelSource; return state_; } - - PropertyDetails split_details = GetDetails(split_nof); + InternalIndex split_index(split_nof); + PropertyDetails split_details = GetDetails(split_index); TransitionsAccessor transitions(isolate_, split_map); // Invalidate a transition target at |key|. Map maybe_transition = transitions.SearchTransition( - GetKey(split_nof), split_details.kind(), split_details.attributes()); + GetKey(split_index), split_details.kind(), split_details.attributes()); if (!maybe_transition.is_null()) { maybe_transition.DeprecateTransitionTree(isolate_); } @@ -727,7 +728,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() { old_map_->NotifyLeafMapLayoutChange(isolate_); - if (FLAG_trace_generalization && modified_descriptor_ >= 0) { + if (FLAG_trace_generalization && modified_descriptor_.is_found()) { PropertyDetails old_details = old_descriptors_->GetDetails(modified_descriptor_); PropertyDetails new_details = diff --git a/chromium/v8/src/objects/map-updater.h b/chromium/v8/src/objects/map-updater.h index 6ee373cbdf3..11bdd0859ff 100644 --- a/chromium/v8/src/objects/map-updater.h +++ b/chromium/v8/src/objects/map-updater.h @@ -54,7 +54,7 @@ class MapUpdater { // Prepares for reconfiguring of a property at |descriptor| to data field // with given |attributes| and |representation|/|field_type| and // performs the steps 1-5. - Handle<Map> ReconfigureToDataField(int descriptor, + Handle<Map> ReconfigureToDataField(InternalIndex descriptor, PropertyAttributes attributes, PropertyConstness constness, Representation representation, @@ -127,26 +127,26 @@ class MapUpdater { State Normalize(const char* reason); // Returns name of a |descriptor| property. - inline Name GetKey(int descriptor) const; + inline Name GetKey(InternalIndex descriptor) const; // Returns property details of a |descriptor| in "updated" |old_descrtiptors_| // array. - inline PropertyDetails GetDetails(int descriptor) const; + inline PropertyDetails GetDetails(InternalIndex descriptor) const; // Returns value of a |descriptor| with kDescriptor location in "updated" // |old_descrtiptors_| array. - inline Object GetValue(int descriptor) const; + inline Object GetValue(InternalIndex descriptor) const; // Returns field type for a |descriptor| with kField location in "updated" // |old_descrtiptors_| array. - inline FieldType GetFieldType(int descriptor) const; + inline FieldType GetFieldType(InternalIndex descriptor) const; // If a |descriptor| property in "updated" |old_descriptors_| has kField // location then returns it's field type otherwise computes optimal field // type for the descriptor's value and |representation|. The |location| // value must be a pre-fetched location for |descriptor|. inline Handle<FieldType> GetOrComputeFieldType( - int descriptor, PropertyLocation location, + InternalIndex descriptor, PropertyLocation location, Representation representation) const; // If a |descriptor| property in given |descriptors| array has kField @@ -154,10 +154,10 @@ class MapUpdater { // type for the descriptor's value and |representation|. // The |location| value must be a pre-fetched location for |descriptor|. inline Handle<FieldType> GetOrComputeFieldType( - Handle<DescriptorArray> descriptors, int descriptor, + Handle<DescriptorArray> descriptors, InternalIndex descriptor, PropertyLocation location, Representation representation); - void GeneralizeField(Handle<Map> map, int modify_index, + void GeneralizeField(Handle<Map> map, InternalIndex modify_index, PropertyConstness new_constness, Representation new_representation, Handle<FieldType> new_field_type); @@ -182,9 +182,9 @@ class MapUpdater { ElementsKind new_elements_kind_; bool is_transitionable_fast_elements_kind_; - // If |modified_descriptor_| is not equal to -1 then the fields below form + // If |modified_descriptor_.is_found()|, then the fields below form // an "update" of the |old_map_|'s descriptors. - int modified_descriptor_ = -1; + InternalIndex modified_descriptor_ = InternalIndex::NotFound(); PropertyKind new_kind_ = kData; PropertyAttributes new_attributes_ = NONE; PropertyConstness new_constness_ = PropertyConstness::kMutable; diff --git a/chromium/v8/src/objects/map.cc b/chromium/v8/src/objects/map.cc index a672d6580a0..79844e6323e 100644 --- a/chromium/v8/src/objects/map.cc +++ b/chromium/v8/src/objects/map.cc @@ -56,20 +56,8 @@ MaybeHandle<JSFunction> Map::GetConstructorFunction( return MaybeHandle<JSFunction>(); } -bool Map::IsMapOfGlobalProxy(Handle<NativeContext> native_context) const { - DisallowHeapAllocation no_gc; - if (IsJSGlobalProxyMap()) { - Object maybe_constructor = GetConstructor(); - // Detached global proxies have |null| as their constructor. - return maybe_constructor.IsJSFunction() && - JSFunction::cast(maybe_constructor).native_context() == - *native_context; - } - return false; -} - -void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index, - PropertyKind kind, +void Map::PrintReconfiguration(Isolate* isolate, FILE* file, + InternalIndex modify_index, PropertyKind kind, PropertyAttributes attributes) { OFStream os(file); os << "[reconfiguring]"; @@ -256,7 +244,7 @@ VisitorId Map::GetVisitorId(Map map) { case CODE_DATA_CONTAINER_TYPE: return kVisitCodeDataContainer; - case WASM_INSTANCE_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: return kVisitWasmInstanceObject; case PREPARSE_DATA_TYPE: @@ -270,7 +258,7 @@ VisitorId Map::GetVisitorId(Map map) { case JS_OBJECT_TYPE: case JS_ERROR_TYPE: - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_GENERATOR_OBJECT_TYPE: @@ -291,27 +279,27 @@ VisitorId Map::GetVisitorId(Map map) { case JS_MAP_VALUE_ITERATOR_TYPE: case JS_STRING_ITERATOR_TYPE: case JS_PROMISE_TYPE: - case JS_REGEXP_TYPE: - case JS_REGEXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE: case JS_FINALIZATION_GROUP_TYPE: #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: - case JS_INTL_COLLATOR_TYPE: - case JS_INTL_DATE_TIME_FORMAT_TYPE: - case JS_INTL_LIST_FORMAT_TYPE: - case JS_INTL_LOCALE_TYPE: - case JS_INTL_NUMBER_FORMAT_TYPE: - case JS_INTL_PLURAL_RULES_TYPE: - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: - case JS_INTL_SEGMENT_ITERATOR_TYPE: - case JS_INTL_SEGMENTER_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: + case JS_COLLATOR_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: + case JS_LOCALE_TYPE: + case JS_NUMBER_FORMAT_TYPE: + case JS_PLURAL_RULES_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENTER_TYPE: #endif // V8_INTL_SUPPORT - case WASM_EXCEPTION_TYPE: - case WASM_GLOBAL_TYPE: - case WASM_MEMORY_TYPE: - case WASM_MODULE_TYPE: - case WASM_TABLE_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: + case WASM_MEMORY_OBJECT_TYPE: + case WASM_MODULE_OBJECT_TYPE: + case WASM_TABLE_OBJECT_TYPE: case JS_BOUND_FUNCTION_TYPE: { const bool has_raw_data_fields = (FLAG_unbox_double_fields && !map.HasFastPointerLayout()) || @@ -371,12 +359,13 @@ VisitorId Map::GetVisitorId(Map map) { } void Map::PrintGeneralization( - Isolate* isolate, FILE* file, const char* reason, int modify_index, - int split, int descriptors, bool descriptor_to_field, - Representation old_representation, Representation new_representation, - PropertyConstness old_constness, PropertyConstness new_constness, - MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value, - MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) { + Isolate* isolate, FILE* file, const char* reason, + InternalIndex modify_index, int split, int descriptors, + bool descriptor_to_field, Representation old_representation, + Representation new_representation, PropertyConstness old_constness, + PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type, + MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type, + MaybeHandle<Object> new_value) { OFStream os(file); os << "[generalizing]"; Name name = instance_descriptors().GetKey(modify_index); @@ -440,9 +429,9 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map, PropertyConstness constness, Representation representation, TransitionFlag flag) { - DCHECK( - DescriptorArray::kNotFound == - map->instance_descriptors().Search(*name, map->NumberOfOwnDescriptors())); + DCHECK(map->instance_descriptors() + .Search(*name, map->NumberOfOwnDescriptors()) + .is_not_found()); // Ensure the descriptor array does not get too big. if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) { @@ -536,8 +525,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields, // If smi descriptors were replaced by double descriptors, rewrite. DescriptorArray old_desc = instance_descriptors(); DescriptorArray new_desc = target.instance_descriptors(); - int limit = NumberOfOwnDescriptors(); - for (int i = 0; i < limit; i++) { + for (InternalIndex i : IterateOwnDescriptors()) { if (new_desc.GetDetails(i).representation().IsDouble() != old_desc.GetDetails(i).representation().IsDouble()) { return true; @@ -562,7 +550,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields, int Map::NumberOfFields() const { DescriptorArray descriptors = instance_descriptors(); int result = 0; - for (int i = 0; i < NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : IterateOwnDescriptors()) { if (descriptors.GetDetails(i).location() == kField) result++; } return result; @@ -572,7 +560,7 @@ Map::FieldCounts Map::GetFieldCounts() const { DescriptorArray descriptors = instance_descriptors(); int mutable_count = 0; int const_count = 0; - for (int i = 0; i < NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(i); if (details.location() == kField) { switch (details.constness()) { @@ -625,8 +613,10 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors, // descriptors will not be trimmed in the mark-compactor, we need to mark // all its elements. Map current = *this; +#ifndef V8_DISABLE_WRITE_BARRIERS MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace, to_replace.number_of_descriptors()); +#endif while (current.instance_descriptors(isolate) == to_replace) { Object next = current.GetBackPointer(isolate); if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map. @@ -643,9 +633,8 @@ Map Map::FindRootMap(Isolate* isolate) const { while (true) { Object back = result.GetBackPointer(isolate); if (back.IsUndefined(isolate)) { - // Initial map always owns descriptors and doesn't have unused entries - // in the descriptor array. - DCHECK(result.owns_descriptors()); + // Initial map must not contain descriptors in the descriptors array + // that do not belong to the map. DCHECK_EQ(result.NumberOfOwnDescriptors(), result.instance_descriptors().number_of_descriptors()); return result; @@ -654,7 +643,7 @@ Map Map::FindRootMap(Isolate* isolate) const { } } -Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const { +Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const { DisallowHeapAllocation no_allocation; DCHECK_EQ(kField, instance_descriptors(isolate).GetDetails(descriptor).location()); @@ -663,14 +652,14 @@ Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const { Object back = result.GetBackPointer(isolate); if (back.IsUndefined(isolate)) break; const Map parent = Map::cast(back); - if (parent.NumberOfOwnDescriptors() <= descriptor) break; + if (parent.NumberOfOwnDescriptors() <= descriptor.as_int()) break; result = parent; } return result; } -void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name, - PropertyConstness new_constness, +void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor, + Handle<Name> name, PropertyConstness new_constness, Representation new_representation, const MaybeObjectHandle& new_wrapped_type) { DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak()); @@ -740,7 +729,8 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1, } // static -void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index, +void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, + InternalIndex modify_index, PropertyConstness new_constness, Representation new_representation, Handle<FieldType> new_field_type) { @@ -791,7 +781,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index, map->PrintGeneralization( isolate, stdout, "field type generalization", modify_index, map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false, - details.representation(), details.representation(), old_constness, + details.representation(), + descriptors->GetDetails(modify_index).representation(), old_constness, new_constness, old_field_type, MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>()); } @@ -800,7 +791,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index, // TODO(ishell): remove. // static Handle<Map> Map::ReconfigureProperty(Isolate* isolate, Handle<Map> map, - int modify_index, PropertyKind new_kind, + InternalIndex modify_index, + PropertyKind new_kind, PropertyAttributes new_attributes, Representation new_representation, Handle<FieldType> new_field_type) { @@ -840,9 +832,8 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) { // types instead of old_map's types. // Go to slow map updating if the old_map has fast properties with cleared // field types. - int old_nof = old_map.NumberOfOwnDescriptors(); DescriptorArray old_descriptors = old_map.instance_descriptors(); - for (int i = 0; i < old_nof; i++) { + for (InternalIndex i : old_map.IterateOwnDescriptors()) { PropertyDetails old_details = old_descriptors.GetDetails(i); if (old_details.location() == kField && old_details.kind() == kData) { FieldType old_type = old_descriptors.GetFieldType(i); @@ -1007,7 +998,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) { DescriptorArray old_descriptors = old_map.instance_descriptors(); Map new_map = *this; - for (int i = root_nof; i < old_nof; ++i) { + for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) { PropertyDetails old_details = old_descriptors.GetDetails(i); Map transition = TransitionsAccessor(isolate, new_map, &no_allocation) @@ -1107,8 +1098,10 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) { // Replace descriptors by new_descriptors in all maps that share it. The old // descriptors will not be trimmed in the mark-compactor, we need to mark // all its elements. +#ifndef V8_DISABLE_WRITE_BARRIERS MarkingBarrierForDescriptorArray(isolate->heap(), *map, *descriptors, descriptors->number_of_descriptors()); +#endif Map current = *map; while (current.instance_descriptors() == *descriptors) { @@ -1363,8 +1356,7 @@ Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map, int Map::NumberOfEnumerableProperties() const { int result = 0; DescriptorArray descs = instance_descriptors(); - int limit = NumberOfOwnDescriptors(); - for (int i = 0; i < limit; i++) { + for (InternalIndex i : IterateOwnDescriptors()) { if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 && !descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) { result++; @@ -1378,7 +1370,7 @@ int Map::NextFreePropertyIndex() const { DescriptorArray descs = instance_descriptors(); // Search properties backwards to find the last field. for (int i = number_of_own_descriptors - 1; i >= 0; --i) { - PropertyDetails details = descs.GetDetails(i); + PropertyDetails details = descs.GetDetails(InternalIndex(i)); if (details.location() == kField) { return details.field_index() + details.field_width_in_words(); } @@ -1578,9 +1570,8 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) { *map == *isolate->async_function_with_home_object_map() || *map == *isolate->async_function_with_name_and_home_object_map()); #endif - // Initial maps must always own their descriptors and it's descriptor array - // does not contain descriptors that do not belong to the map. - DCHECK(map->owns_descriptors()); + // Initial maps must not contain descriptors in the descriptors array + // that do not belong to the map. DCHECK_EQ(map->NumberOfOwnDescriptors(), map->instance_descriptors().number_of_descriptors()); } @@ -1598,6 +1589,11 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map, int instance_size, int inobject_properties, int unused_property_fields) { EnsureInitialMap(isolate, map); + // Initial map must not contain descriptors in the descriptors array + // that do not belong to the map. + DCHECK_EQ(map->NumberOfOwnDescriptors(), + map->instance_descriptors().number_of_descriptors()); + Handle<Map> result = RawCopy(isolate, map, instance_size, inobject_properties); @@ -1606,9 +1602,10 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map, int number_of_own_descriptors = map->NumberOfOwnDescriptors(); if (number_of_own_descriptors > 0) { - // The copy will use the same descriptors array. - result->UpdateDescriptors(isolate, map->instance_descriptors(), - map->GetLayoutDescriptor(), + // The copy will use the same descriptors array without ownership. + DescriptorArray descriptors = map->instance_descriptors(); + result->set_owns_descriptors(false); + result->UpdateDescriptors(isolate, descriptors, map->GetLayoutDescriptor(), number_of_own_descriptors); DCHECK_EQ(result->NumberOfFields(), @@ -1688,9 +1685,8 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent, if (!parent->GetBackPointer().IsUndefined(isolate)) { parent->set_owns_descriptors(false); } else { - // |parent| is initial map and it must keep the ownership, there must be no - // descriptors in the descriptors array that do not belong to the map. - DCHECK(parent->owns_descriptors()); + // |parent| is initial map and it must not contain descriptors in the + // descriptors array that do not belong to the map. DCHECK_EQ(parent->NumberOfOwnDescriptors(), parent->instance_descriptors().number_of_descriptors()); } @@ -1788,7 +1784,7 @@ Handle<Map> Map::AddMissingTransitions( // if there are no dead transitions from that map and this is exactly the // case for all the intermediate maps we create here. Handle<Map> map = split_map; - for (int i = split_nof; i < nof_descriptors - 1; ++i) { + for (InternalIndex i : InternalIndex::Range(split_nof, nof_descriptors - 1)) { Handle<Map> new_map = CopyDropDescriptors(isolate, map); InstallDescriptors(isolate, map, new_map, i, descriptors, full_layout_descriptor); @@ -1797,20 +1793,21 @@ Handle<Map> Map::AddMissingTransitions( } map->NotifyLeafMapLayoutChange(isolate); last_map->set_may_have_interesting_symbols(false); - InstallDescriptors(isolate, map, last_map, nof_descriptors - 1, descriptors, - full_layout_descriptor); + InstallDescriptors(isolate, map, last_map, InternalIndex(nof_descriptors - 1), + descriptors, full_layout_descriptor); return last_map; } // Since this method is used to rewrite an existing transition tree, it can // always insert transitions without checking. void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent, - Handle<Map> child, int new_descriptor, + Handle<Map> child, InternalIndex new_descriptor, Handle<DescriptorArray> descriptors, Handle<LayoutDescriptor> full_layout_descriptor) { DCHECK(descriptors->IsSortedNoDuplicates()); - child->SetInstanceDescriptors(isolate, *descriptors, new_descriptor + 1); + child->SetInstanceDescriptors(isolate, *descriptors, + new_descriptor.as_int() + 1); child->CopyUnusedPropertyFields(*parent); PropertyDetails details = descriptors->GetDetails(new_descriptor); if (details.location() == kField) { @@ -1933,6 +1930,7 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) { // In case the map owned its own descriptors, share the descriptors and // transfer ownership to the new map. // The properties did not change, so reuse descriptors. + map->set_owns_descriptors(false); new_map->InitializeDescriptors(isolate, map->instance_descriptors(), map->GetLayoutDescriptor()); } else { @@ -2063,7 +2061,7 @@ Handle<Map> Map::CopyForPreventExtensions( namespace { -bool CanHoldValue(DescriptorArray descriptors, int descriptor, +bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor, PropertyConstness constness, Object value) { PropertyDetails details = descriptors.GetDetails(descriptor); if (details.location() == kField) { @@ -2086,7 +2084,7 @@ bool CanHoldValue(DescriptorArray descriptors, int descriptor, } Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map, - int descriptor, + InternalIndex descriptor, PropertyConstness constness, Handle<Object> value) { if (CanHoldValue(map->instance_descriptors(), descriptor, constness, @@ -2108,7 +2106,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map, // static Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map, - int descriptor, + InternalIndex descriptor, PropertyConstness constness, Handle<Object> value) { // Update to the newest map before storing the property. @@ -2140,7 +2138,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map, .SearchTransition(*name, kData, attributes); if (!maybe_transition.is_null()) { Handle<Map> transition(maybe_transition, isolate); - int descriptor = transition->LastAdded(); + InternalIndex descriptor = transition->LastAdded(); DCHECK_EQ( attributes, @@ -2206,7 +2204,8 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map, } Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map, - int descriptor, PropertyKind kind, + InternalIndex descriptor, + PropertyKind kind, PropertyAttributes attributes, PropertyConstness constness) { // Dictionaries have to be reconfigured in-place. @@ -2232,7 +2231,8 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map, } Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map, - Handle<Name> name, int descriptor, + Handle<Name> name, + InternalIndex descriptor, Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes) { @@ -2261,7 +2261,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map, if (!maybe_transition.is_null()) { Handle<Map> transition(maybe_transition, isolate); DescriptorArray descriptors = transition->instance_descriptors(); - int descriptor = transition->LastAdded(); + InternalIndex descriptor = transition->LastAdded(); DCHECK(descriptors.GetKey(descriptor).Equals(*name)); DCHECK_EQ(kAccessor, descriptors.GetDetails(descriptor).kind()); @@ -2284,7 +2284,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map, Handle<AccessorPair> pair; DescriptorArray old_descriptors = map->instance_descriptors(); - if (descriptor != DescriptorArray::kNotFound) { + if (descriptor.is_found()) { if (descriptor != map->LastAdded()) { return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast"); } @@ -2374,9 +2374,9 @@ Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate); // We replace the key if it is already present. - int index = + InternalIndex index = old_descriptors->SearchWithCache(isolate, *descriptor->GetKey(), *map); - if (index != DescriptorArray::kNotFound) { + if (index.is_found()) { return CopyReplaceDescriptor(isolate, map, old_descriptors, descriptor, index, flag); } @@ -2386,7 +2386,7 @@ Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map, Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors, Descriptor* descriptor, - int insertion_index, + InternalIndex insertion_index, TransitionFlag flag) { Handle<Name> key = descriptor->GetKey(); DCHECK_EQ(*key, descriptors->GetKey(insertion_index)); @@ -2403,7 +2403,7 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map, isolate, map, new_descriptors, new_descriptors->number_of_descriptors()); SimpleTransitionFlag simple_flag = - (insertion_index == descriptors->number_of_descriptors() - 1) + (insertion_index.as_int() == descriptors->number_of_descriptors() - 1) ? SIMPLE_PROPERTY_TRANSITION : PROPERTY_TRANSITION; return CopyReplaceDescriptors(isolate, map, new_descriptors, @@ -2465,8 +2465,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const { // with fields that may be generalized in-place. This must already be handled // during addition of a new field. DescriptorArray descriptors = instance_descriptors(); - int nof = NumberOfOwnDescriptors(); - for (int i = 0; i < nof; i++) { + for (InternalIndex i : IterateOwnDescriptors()) { PropertyDetails details = descriptors.GetDetails(i); if (details.location() == kField) { DCHECK(IsMostGeneralFieldType(details.representation(), @@ -2547,8 +2546,10 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors, int number_of_own_descriptors) { set_synchronized_instance_descriptors(descriptors); SetNumberOfOwnDescriptors(number_of_own_descriptors); +#ifndef V8_DISABLE_WRITE_BARRIERS MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors, number_of_own_descriptors); +#endif } // static diff --git a/chromium/v8/src/objects/map.h b/chromium/v8/src/objects/map.h index ef16019685f..0daadbee088 100644 --- a/chromium/v8/src/objects/map.h +++ b/chromium/v8/src/objects/map.h @@ -8,6 +8,7 @@ #include "src/common/globals.h" #include "src/objects/code.h" #include "src/objects/heap-object.h" +#include "src/objects/internal-index.h" #include "src/objects/objects.h" #include "torque-generated/field-offsets-tq.h" @@ -470,7 +471,8 @@ class Map : public HeapObject { Map GetPrototypeChainRootMap(Isolate* isolate) const; V8_EXPORT_PRIVATE Map FindRootMap(Isolate* isolate) const; - V8_EXPORT_PRIVATE Map FindFieldOwner(Isolate* isolate, int descriptor) const; + V8_EXPORT_PRIVATE Map FindFieldOwner(Isolate* isolate, + InternalIndex descriptor) const; inline int GetInObjectPropertyOffset(int index) const; @@ -513,7 +515,8 @@ class Map : public HeapObject { Representation rep1, Handle<FieldType> type1, Representation rep2, Handle<FieldType> type2, Isolate* isolate); static void GeneralizeField(Isolate* isolate, Handle<Map> map, - int modify_index, PropertyConstness new_constness, + InternalIndex modify_index, + PropertyConstness new_constness, Representation new_representation, Handle<FieldType> new_field_type); // Returns true if the |field_type| is the most general one for @@ -533,7 +536,7 @@ class Map : public HeapObject { Representation* representation, Handle<FieldType>* field_type); V8_EXPORT_PRIVATE static Handle<Map> ReconfigureProperty( - Isolate* isolate, Handle<Map> map, int modify_index, + Isolate* isolate, Handle<Map> map, InternalIndex modify_index, PropertyKind new_kind, PropertyAttributes new_attributes, Representation new_representation, Handle<FieldType> new_field_type); @@ -541,7 +544,7 @@ class Map : public HeapObject { Isolate* isolate, Handle<Map> map, ElementsKind new_elements_kind); V8_EXPORT_PRIVATE static Handle<Map> PrepareForDataProperty( - Isolate* isolate, Handle<Map> old_map, int descriptor_number, + Isolate* isolate, Handle<Map> old_map, InternalIndex descriptor_number, PropertyConstness constness, Handle<Object> value); V8_EXPORT_PRIVATE static Handle<Map> Normalize(Isolate* isolate, @@ -636,10 +639,11 @@ class Map : public HeapObject { inline PropertyDetails GetLastDescriptorDetails(Isolate* isolate) const; - inline int LastAdded() const; + inline InternalIndex LastAdded() const; inline int NumberOfOwnDescriptors() const; inline void SetNumberOfOwnDescriptors(int number); + inline InternalIndex::Range IterateOwnDescriptors() const; inline Cell RetrieveDescriptorsPointer(); @@ -742,12 +746,13 @@ class Map : public HeapObject { Handle<Object> value, PropertyAttributes attributes, PropertyConstness constness, StoreOrigin store_origin); V8_EXPORT_PRIVATE static Handle<Map> TransitionToAccessorProperty( - Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor, - Handle<Object> getter, Handle<Object> setter, + Isolate* isolate, Handle<Map> map, Handle<Name> name, + InternalIndex descriptor, Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes); V8_EXPORT_PRIVATE static Handle<Map> ReconfigureExistingProperty( - Isolate* isolate, Handle<Map> map, int descriptor, PropertyKind kind, - PropertyAttributes attributes, PropertyConstness constness); + Isolate* isolate, Handle<Map> map, InternalIndex descriptor, + PropertyKind kind, PropertyAttributes attributes, + PropertyConstness constness); inline void AppendDescriptor(Isolate* isolate, Descriptor* desc); @@ -881,9 +886,6 @@ class Map : public HeapObject { InstanceType instance_type); inline bool CanHaveFastTransitionableElementsKind() const; - // Whether this is the map of the given native context's global proxy. - bool IsMapOfGlobalProxy(Handle<NativeContext> native_context) const; - private: // This byte encodes either the instance size without the in-object slack or // the slack size in properties backing store. @@ -925,7 +927,7 @@ class Map : public HeapObject { Handle<LayoutDescriptor> full_layout_descriptor); static void InstallDescriptors( Isolate* isolate, Handle<Map> parent_map, Handle<Map> child_map, - int new_descriptor, Handle<DescriptorArray> descriptors, + InternalIndex new_descriptor, Handle<DescriptorArray> descriptors, Handle<LayoutDescriptor> full_layout_descriptor); static Handle<Map> CopyAddDescriptor(Isolate* isolate, Handle<Map> map, Descriptor* descriptor, @@ -938,7 +940,8 @@ class Map : public HeapObject { static Handle<Map> CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors, - Descriptor* descriptor, int index, + Descriptor* descriptor, + InternalIndex index, TransitionFlag flag); static Handle<Map> CopyNormalized(Isolate* isolate, Handle<Map> map, PropertyNormalizationMode mode); @@ -951,22 +954,24 @@ class Map : public HeapObject { // Update field type of the given descriptor to new representation and new // type. The type must be prepared for storing in descriptor array: // it must be either a simple type or a map wrapped in a weak cell. - void UpdateFieldType(Isolate* isolate, int descriptor_number, + void UpdateFieldType(Isolate* isolate, InternalIndex descriptor_number, Handle<Name> name, PropertyConstness new_constness, Representation new_representation, const MaybeObjectHandle& new_wrapped_type); // TODO(ishell): Move to MapUpdater. - void PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index, - PropertyKind kind, PropertyAttributes attributes); + void PrintReconfiguration(Isolate* isolate, FILE* file, + InternalIndex modify_index, PropertyKind kind, + PropertyAttributes attributes); // TODO(ishell): Move to MapUpdater. void PrintGeneralization( - Isolate* isolate, FILE* file, const char* reason, int modify_index, - int split, int descriptors, bool constant_to_field, - Representation old_representation, Representation new_representation, - PropertyConstness old_constness, PropertyConstness new_constness, - MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value, - MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value); + Isolate* isolate, FILE* file, const char* reason, + InternalIndex modify_index, int split, int descriptors, + bool constant_to_field, Representation old_representation, + Representation new_representation, PropertyConstness old_constness, + PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type, + MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type, + MaybeHandle<Object> new_value); // Use the high-level instance_descriptors/SetInstanceDescriptors instead. DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray) diff --git a/chromium/v8/src/objects/module-inl.h b/chromium/v8/src/objects/module-inl.h index ac545163766..aaf790cc8ac 100644 --- a/chromium/v8/src/objects/module-inl.h +++ b/chromium/v8/src/objects/module-inl.h @@ -38,9 +38,17 @@ SMI_ACCESSORS(Module, hash, kHashOffset) TQ_SMI_ACCESSORS(SourceTextModule, dfs_index) TQ_SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index) +TQ_SMI_ACCESSORS(SourceTextModule, flags) +BOOL_ACCESSORS(SourceTextModule, flags, async, kAsyncBit) +BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating, kAsyncEvaluatingBit) +TQ_SMI_ACCESSORS(SourceTextModule, pending_async_dependencies) +ACCESSORS(SourceTextModule, async_parent_modules, ArrayList, + kAsyncParentModulesOffset) +ACCESSORS(SourceTextModule, top_level_capability, HeapObject, + kTopLevelCapabilityOffset) SourceTextModuleInfo SourceTextModule::info() const { - return (status() >= kEvaluating) + return status() == kErrored ? SourceTextModuleInfo::cast(code()) : GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo(); } @@ -112,6 +120,37 @@ class UnorderedModuleSet ZoneAllocator<Handle<Module>>(zone)) {} }; +void SourceTextModule::AddAsyncParentModule(Isolate* isolate, + Handle<SourceTextModule> module) { + Handle<ArrayList> new_array_list = + ArrayList::Add(isolate, handle(async_parent_modules(), isolate), module); + set_async_parent_modules(*new_array_list); +} + +Handle<SourceTextModule> SourceTextModule::GetAsyncParentModule( + Isolate* isolate, int index) { + Handle<SourceTextModule> module( + SourceTextModule::cast(async_parent_modules().Get(index)), isolate); + return module; +} + +int SourceTextModule::AsyncParentModuleCount() { + return async_parent_modules().Length(); +} + +bool SourceTextModule::HasPendingAsyncDependencies() { + DCHECK_GE(pending_async_dependencies(), 0); + return pending_async_dependencies() > 0; +} + +void SourceTextModule::IncrementPendingAsyncDependencies() { + set_pending_async_dependencies(pending_async_dependencies() + 1); +} + +void SourceTextModule::DecrementPendingAsyncDependencies() { + set_pending_async_dependencies(pending_async_dependencies() - 1); +} + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/objects/module.cc b/chromium/v8/src/objects/module.cc index 60b9145d101..9c37de0c855 100644 --- a/chromium/v8/src/objects/module.cc +++ b/chromium/v8/src/objects/module.cc @@ -10,6 +10,7 @@ #include "src/api/api-inl.h" #include "src/ast/modules.h" #include "src/builtins/accessors.h" +#include "src/heap/heap-inl.h" #include "src/objects/cell-inl.h" #include "src/objects/hash-table-inl.h" #include "src/objects/js-generator-inl.h" @@ -50,12 +51,14 @@ void Module::SetStatus(Status new_status) { set_status(new_status); } -void Module::RecordError(Isolate* isolate) { - DisallowHeapAllocation no_alloc; - DCHECK(exception().IsTheHole(isolate)); - Object the_exception = isolate->pending_exception(); - DCHECK(!the_exception.IsTheHole(isolate)); +void Module::RecordErrorUsingPendingException(Isolate* isolate) { + Handle<Object> the_exception(isolate->pending_exception(), isolate); + RecordError(isolate, the_exception); +} +void Module::RecordError(Isolate* isolate, Handle<Object> error) { + DCHECK(exception().IsTheHole(isolate)); + DCHECK(!error->IsTheHole(isolate)); if (this->IsSourceTextModule()) { Handle<SourceTextModule> self(SourceTextModule::cast(*this), GetIsolate()); self->set_code(self->info()); @@ -64,7 +67,7 @@ void Module::RecordError(Isolate* isolate) { PrintStatusTransition(Module::kErrored); #endif // DEBUG set_status(Module::kErrored); - set_exception(the_exception); + set_exception(*error); } void Module::ResetGraph(Isolate* isolate, Handle<Module> module) { @@ -244,46 +247,35 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) { #endif // OBJECT_PRINT } #endif // DEBUG - if (module->status() == kErrored) { - isolate->Throw(module->GetException()); - return MaybeHandle<Object>(); - } - DCHECK_NE(module->status(), kEvaluating); - DCHECK_GE(module->status(), kInstantiated); - Zone zone(isolate->allocator(), ZONE_NAME); - - ZoneForwardList<Handle<SourceTextModule>> stack(&zone); - unsigned dfs_index = 0; - Handle<Object> result; - if (!Evaluate(isolate, module, &stack, &dfs_index).ToHandle(&result)) { - for (auto& descendant : stack) { - DCHECK_EQ(descendant->status(), kEvaluating); - descendant->RecordError(isolate); - } - DCHECK_EQ(module->GetException(), isolate->pending_exception()); - return MaybeHandle<Object>(); + STACK_CHECK(isolate, MaybeHandle<Object>()); + if (FLAG_harmony_top_level_await && module->IsSourceTextModule()) { + return SourceTextModule::EvaluateMaybeAsync( + isolate, Handle<SourceTextModule>::cast(module)); + } else { + return Module::InnerEvaluate(isolate, module); } - DCHECK_EQ(module->status(), kEvaluated); - DCHECK(stack.empty()); - return result; } -MaybeHandle<Object> Module::Evaluate( - Isolate* isolate, Handle<Module> module, - ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) { +MaybeHandle<Object> Module::InnerEvaluate(Isolate* isolate, + Handle<Module> module) { if (module->status() == kErrored) { isolate->Throw(module->GetException()); return MaybeHandle<Object>(); - } - if (module->status() >= kEvaluating) { + } else if (module->status() == kEvaluated) { return isolate->factory()->undefined_value(); } - DCHECK_EQ(module->status(), kInstantiated); - STACK_CHECK(isolate, MaybeHandle<Object>()); + + // InnerEvaluate can be called both to evaluate top level modules without + // the harmony_top_level_await flag and recursively to evaluate + // SyntheticModules in the dependency graphs of SourceTextModules. + // + // However, SyntheticModules transition directly to 'Evaluated,' so we should + // never see an 'Evaluating' module at this point. + CHECK_EQ(module->status(), kInstantiated); if (module->IsSourceTextModule()) { - return SourceTextModule::Evaluate( - isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index); + return SourceTextModule::Evaluate(isolate, + Handle<SourceTextModule>::cast(module)); } else { return SyntheticModule::Evaluate(isolate, Handle<SyntheticModule>::cast(module)); diff --git a/chromium/v8/src/objects/module.h b/chromium/v8/src/objects/module.h index 08badf0357d..d0ea22e6e58 100644 --- a/chromium/v8/src/objects/module.h +++ b/chromium/v8/src/objects/module.h @@ -112,18 +112,19 @@ class Module : public HeapObject { ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index, Zone* zone); - static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate( - Isolate* isolate, Handle<Module> module, - ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index); + static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerEvaluate( + Isolate* isolate, Handle<Module> module); // Set module's status back to kUninstantiated and reset other internal state. // This is used when instantiation fails. static void Reset(Isolate* isolate, Handle<Module> module); static void ResetGraph(Isolate* isolate, Handle<Module> module); - // To set status to kErrored, RecordError should be used. + // To set status to kErrored, RecordError or RecordErrorUsingPendingException + // should be used. void SetStatus(Status status); - void RecordError(Isolate* isolate); + void RecordErrorUsingPendingException(Isolate* isolate); + void RecordError(Isolate* isolate, Handle<Object> error); #ifdef DEBUG // For --trace-module-status. @@ -137,7 +138,8 @@ class Module : public HeapObject { // JSModuleNamespace object (representing module "bar") is created and bound to // the declared variable (foo). A module can have at most one namespace object. class JSModuleNamespace - : public TorqueGeneratedJSModuleNamespace<JSModuleNamespace, JSObject> { + : public TorqueGeneratedJSModuleNamespace<JSModuleNamespace, + JSSpecialObject> { public: DECL_PRINTER(JSModuleNamespace) diff --git a/chromium/v8/src/objects/name-inl.h b/chromium/v8/src/objects/name-inl.h index b76ae245a2d..88ae2feea58 100644 --- a/chromium/v8/src/objects/name-inl.h +++ b/chromium/v8/src/objects/name-inl.h @@ -9,6 +9,7 @@ #include "src/heap/heap-write-barrier-inl.h" #include "src/objects/map-inl.h" +#include "src/objects/primitive-heap-object-inl.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -100,6 +101,10 @@ bool Name::AsArrayIndex(uint32_t* index) { return IsString() && String::cast(*this).AsArrayIndex(index); } +bool Name::AsIntegerIndex(size_t* index) { + return IsString() && String::cast(*this).AsIntegerIndex(index); +} + // static bool Name::ContainsCachedArrayIndex(uint32_t hash) { return (hash & Name::kDoesNotContainCachedArrayIndexMask) == 0; diff --git a/chromium/v8/src/objects/name.h b/chromium/v8/src/objects/name.h index a02bb3d7945..386b9ec0556 100644 --- a/chromium/v8/src/objects/name.h +++ b/chromium/v8/src/objects/name.h @@ -5,9 +5,8 @@ #ifndef V8_OBJECTS_NAME_H_ #define V8_OBJECTS_NAME_H_ -#include "src/objects/heap-object.h" #include "src/objects/objects.h" -#include "torque-generated/class-definitions-tq.h" +#include "src/objects/primitive-heap-object.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -17,7 +16,7 @@ namespace internal { // The Name abstract class captures anything that can be used as a property // name, i.e., strings and symbols. All names store a hash value. -class Name : public TorqueGeneratedName<Name, HeapObject> { +class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> { public: // Tells whether the hash code has been computed. inline bool HasHashCode(); @@ -32,6 +31,7 @@ class Name : public TorqueGeneratedName<Name, HeapObject> { // Conversion. inline bool AsArrayIndex(uint32_t* index); + inline bool AsIntegerIndex(size_t* index); // An "interesting symbol" is a well-known symbol, like @@toStringTag, // that's often looked up on random objects but is usually not present. @@ -73,7 +73,8 @@ class Name : public TorqueGeneratedName<Name, HeapObject> { // array index. static const int kHashNotComputedMask = 1; static const int kIsNotArrayIndexMask = 1 << 1; - static const int kNofHashBitFields = 2; + static const int kIsNotIntegerIndexMask = 1 << 2; + static const int kNofHashBitFields = 3; // Shift constant retrieving hash code from hash field. static const int kHashShift = kNofHashBitFields; @@ -88,6 +89,14 @@ class Name : public TorqueGeneratedName<Name, HeapObject> { // Maximum number of characters to consider when trying to convert a string // value into an array index. static const int kMaxArrayIndexSize = 10; + // Maximum number of characters that might be parsed into a size_t: + // 10 characters per 32 bits of size_t width. + // We choose this as large as possible (rather than MAX_SAFE_INTEGER range) + // because TypedArray accesses will treat all string keys that are + // canonical representations of numbers in the range [MAX_SAFE_INTEGER .. + // size_t::max] as out-of-bounds accesses, and we can handle those in the + // fast path if we tag them as such (see kIsNotIntegerIndexMask). + static const int kMaxIntegerIndexSize = 10 * (sizeof(size_t) / 4); // For strings which are array indexes the hash value has the string length // mixed into the hash, mainly to avoid a hash value of zero which would be @@ -120,7 +129,7 @@ class Name : public TorqueGeneratedName<Name, HeapObject> { // Value of empty hash field indicating that the hash is not computed. static const int kEmptyHashField = - kIsNotArrayIndexMask | kHashNotComputedMask; + kIsNotIntegerIndexMask | kIsNotArrayIndexMask | kHashNotComputedMask; protected: static inline bool IsHashFieldComputed(uint32_t field); diff --git a/chromium/v8/src/objects/object-list-macros.h b/chromium/v8/src/objects/object-list-macros.h index d5bce62d433..09b1bdc5f05 100644 --- a/chromium/v8/src/objects/object-list-macros.h +++ b/chromium/v8/src/objects/object-list-macros.h @@ -41,6 +41,7 @@ class HeapNumber; class ObjectHashTable; class ObjectTemplateInfo; class ObjectVisitor; +class OSROptimizedCodeCache; class PreparseData; class PropertyArray; class PropertyCell; @@ -138,12 +139,14 @@ class ZoneForwardList; V(JSCollection) \ V(JSCollectionIterator) \ V(JSContextExtensionObject) \ + V(JSCustomElementsObject) \ V(JSDataView) \ V(JSDate) \ V(JSError) \ V(JSFinalizationGroup) \ V(JSFinalizationGroupCleanupIterator) \ V(JSFunction) \ + V(JSFunctionOrBoundFunction) \ V(JSGeneratorObject) \ V(JSGlobalObject) \ V(JSGlobalProxy) \ @@ -158,10 +161,12 @@ class ZoneForwardList; V(JSReceiver) \ V(JSRegExp) \ V(JSRegExpResult) \ + V(JSRegExpResultIndices) \ V(JSRegExpStringIterator) \ V(JSSet) \ V(JSSetIterator) \ V(JSSloppyArgumentsObject) \ + V(JSSpecialObject) \ V(JSStringIterator) \ V(JSTypedArray) \ V(JSWeakCollection) \ @@ -185,7 +190,9 @@ class ZoneForwardList; V(OrderedHashMap) \ V(OrderedHashSet) \ V(OrderedNameDictionary) \ + V(OSROptimizedCodeCache) \ V(PreparseData) \ + V(PrimitiveHeapObject) \ V(PromiseReactionJobTask) \ V(PropertyArray) \ V(PropertyCell) \ @@ -225,6 +232,7 @@ class ZoneForwardList; V(Undetectable) \ V(UniqueName) \ V(WasmExceptionObject) \ + V(WasmExceptionPackage) \ V(WasmGlobalObject) \ V(WasmInstanceObject) \ V(WasmMemoryObject) \ diff --git a/chromium/v8/src/objects/objects-body-descriptors-inl.h b/chromium/v8/src/objects/objects-body-descriptors-inl.h index 4c980b2697c..68164fdce67 100644 --- a/chromium/v8/src/objects/objects-body-descriptors-inl.h +++ b/chromium/v8/src/objects/objects-body-descriptors-inl.h @@ -913,7 +913,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) { return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4); case JS_OBJECT_TYPE: case JS_ERROR_TYPE: - case JS_ARGUMENTS_TYPE: + case JS_ARGUMENTS_OBJECT_TYPE: case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: case JS_PROMISE_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE: @@ -933,8 +933,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) { case JS_MAP_KEY_VALUE_ITERATOR_TYPE: case JS_MAP_VALUE_ITERATOR_TYPE: case JS_STRING_ITERATOR_TYPE: - case JS_REGEXP_STRING_ITERATOR_TYPE: - case JS_REGEXP_TYPE: + case JS_REG_EXP_STRING_ITERATOR_TYPE: + case JS_REG_EXP_TYPE: case JS_GLOBAL_PROXY_TYPE: case JS_GLOBAL_OBJECT_TYPE: case JS_API_OBJECT_TYPE: @@ -944,24 +944,24 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) { case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE: case JS_FINALIZATION_GROUP_TYPE: #ifdef V8_INTL_SUPPORT - case JS_INTL_V8_BREAK_ITERATOR_TYPE: - case JS_INTL_COLLATOR_TYPE: - case JS_INTL_DATE_TIME_FORMAT_TYPE: - case JS_INTL_LIST_FORMAT_TYPE: - case JS_INTL_LOCALE_TYPE: - case JS_INTL_NUMBER_FORMAT_TYPE: - case JS_INTL_PLURAL_RULES_TYPE: - case JS_INTL_RELATIVE_TIME_FORMAT_TYPE: - case JS_INTL_SEGMENT_ITERATOR_TYPE: - case JS_INTL_SEGMENTER_TYPE: + case JS_V8_BREAK_ITERATOR_TYPE: + case JS_COLLATOR_TYPE: + case JS_DATE_TIME_FORMAT_TYPE: + case JS_LIST_FORMAT_TYPE: + case JS_LOCALE_TYPE: + case JS_NUMBER_FORMAT_TYPE: + case JS_PLURAL_RULES_TYPE: + case JS_RELATIVE_TIME_FORMAT_TYPE: + case JS_SEGMENT_ITERATOR_TYPE: + case JS_SEGMENTER_TYPE: #endif // V8_INTL_SUPPORT - case WASM_EXCEPTION_TYPE: - case WASM_GLOBAL_TYPE: - case WASM_MEMORY_TYPE: - case WASM_MODULE_TYPE: - case WASM_TABLE_TYPE: + case WASM_EXCEPTION_OBJECT_TYPE: + case WASM_GLOBAL_OBJECT_TYPE: + case WASM_MEMORY_OBJECT_TYPE: + case WASM_MODULE_OBJECT_TYPE: + case WASM_TABLE_OBJECT_TYPE: return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4); - case WASM_INSTANCE_TYPE: + case WASM_INSTANCE_OBJECT_TYPE: return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3, p4); case JS_WEAK_MAP_TYPE: diff --git a/chromium/v8/src/objects/objects-definitions.h b/chromium/v8/src/objects/objects-definitions.h index b346b5b7d15..53354014e9c 100644 --- a/chromium/v8/src/objects/objects-definitions.h +++ b/chromium/v8/src/objects/objects-definitions.h @@ -32,15 +32,7 @@ namespace internal { // instance_types that are less than those of all other types: // HeapObject::Size, HeapObject::IterateBody, the typeof operator, and // Object::IsString. -// -// NOTE: Everything following JS_PRIMITIVE_WRAPPER_TYPE is considered a -// JSObject for GC purposes. The first four entries here have typeof -// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'. -// -// NOTE: List had to be split into two, because of conditional item(s) from -// INTL namespace. They can't just be appended to the end, because of the -// checks we do in tests (expecting JS_FUNCTION_TYPE to be last). -#define INSTANCE_TYPE_LIST_BEFORE_INTL(V) \ +#define INSTANCE_TYPE_LIST_BASE(V) \ V(INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \ @@ -58,191 +50,11 @@ namespace internal { V(SLICED_ONE_BYTE_STRING_TYPE) \ V(THIN_ONE_BYTE_STRING_TYPE) \ V(UNCACHED_EXTERNAL_STRING_TYPE) \ - V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \ - \ - V(SYMBOL_TYPE) \ - V(HEAP_NUMBER_TYPE) \ - V(BIGINT_TYPE) \ - V(ODDBALL_TYPE) \ - \ - V(MAP_TYPE) \ - V(CODE_TYPE) \ - V(FOREIGN_TYPE) \ - V(BYTE_ARRAY_TYPE) \ - V(BYTECODE_ARRAY_TYPE) \ - V(FREE_SPACE_TYPE) \ - \ - V(FIXED_DOUBLE_ARRAY_TYPE) \ - V(FEEDBACK_METADATA_TYPE) \ - V(FILLER_TYPE) \ - \ - V(ACCESS_CHECK_INFO_TYPE) \ - V(ACCESSOR_INFO_TYPE) \ - V(ACCESSOR_PAIR_TYPE) \ - V(ALIASED_ARGUMENTS_ENTRY_TYPE) \ - V(ALLOCATION_MEMENTO_TYPE) \ - V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \ - V(ASM_WASM_DATA_TYPE) \ - V(ASYNC_GENERATOR_REQUEST_TYPE) \ - V(CLASS_POSITIONS_TYPE) \ - V(DEBUG_INFO_TYPE) \ - V(ENUM_CACHE_TYPE) \ - V(FUNCTION_TEMPLATE_INFO_TYPE) \ - V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \ - V(INTERCEPTOR_INFO_TYPE) \ - V(INTERPRETER_DATA_TYPE) \ - V(OBJECT_TEMPLATE_INFO_TYPE) \ - V(PROMISE_CAPABILITY_TYPE) \ - V(PROMISE_REACTION_TYPE) \ - V(PROTOTYPE_INFO_TYPE) \ - V(SCRIPT_TYPE) \ - V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \ - V(SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE) \ - V(STACK_FRAME_INFO_TYPE) \ - V(STACK_TRACE_FRAME_TYPE) \ - V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \ - V(TUPLE2_TYPE) \ - V(TUPLE3_TYPE) \ - V(WASM_CAPI_FUNCTION_DATA_TYPE) \ - V(WASM_DEBUG_INFO_TYPE) \ - V(WASM_EXCEPTION_TAG_TYPE) \ - V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \ - V(WASM_INDIRECT_FUNCTION_TABLE_TYPE) \ - V(WASM_JS_FUNCTION_DATA_TYPE) \ - \ - V(CALLABLE_TASK_TYPE) \ - V(CALLBACK_TASK_TYPE) \ - V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \ - V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \ - V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \ - \ - TORQUE_DEFINED_INSTANCE_TYPES(V) \ - \ - V(SOURCE_TEXT_MODULE_TYPE) \ - V(SYNTHETIC_MODULE_TYPE) \ - \ - V(ALLOCATION_SITE_TYPE) \ - V(EMBEDDER_DATA_ARRAY_TYPE) \ - \ - V(FIXED_ARRAY_TYPE) \ - V(OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \ - V(CLOSURE_FEEDBACK_CELL_ARRAY_TYPE) \ - V(HASH_TABLE_TYPE) \ - V(ORDERED_HASH_MAP_TYPE) \ - V(ORDERED_HASH_SET_TYPE) \ - V(ORDERED_NAME_DICTIONARY_TYPE) \ - V(NAME_DICTIONARY_TYPE) \ - V(GLOBAL_DICTIONARY_TYPE) \ - V(NUMBER_DICTIONARY_TYPE) \ - V(SIMPLE_NUMBER_DICTIONARY_TYPE) \ - V(STRING_TABLE_TYPE) \ - V(EPHEMERON_HASH_TABLE_TYPE) \ - V(SCOPE_INFO_TYPE) \ - V(SCRIPT_CONTEXT_TABLE_TYPE) \ - \ - V(AWAIT_CONTEXT_TYPE) \ - V(BLOCK_CONTEXT_TYPE) \ - V(CATCH_CONTEXT_TYPE) \ - V(DEBUG_EVALUATE_CONTEXT_TYPE) \ - V(EVAL_CONTEXT_TYPE) \ - V(FUNCTION_CONTEXT_TYPE) \ - V(MODULE_CONTEXT_TYPE) \ - V(NATIVE_CONTEXT_TYPE) \ - V(SCRIPT_CONTEXT_TYPE) \ - V(WITH_CONTEXT_TYPE) \ - \ - V(WEAK_FIXED_ARRAY_TYPE) \ - V(TRANSITION_ARRAY_TYPE) \ - \ - V(CALL_HANDLER_INFO_TYPE) \ - V(CELL_TYPE) \ - V(CODE_DATA_CONTAINER_TYPE) \ - V(DESCRIPTOR_ARRAY_TYPE) \ - V(FEEDBACK_CELL_TYPE) \ - V(FEEDBACK_VECTOR_TYPE) \ - V(LOAD_HANDLER_TYPE) \ - V(PREPARSE_DATA_TYPE) \ - V(PROPERTY_ARRAY_TYPE) \ - V(PROPERTY_CELL_TYPE) \ - V(SHARED_FUNCTION_INFO_TYPE) \ - V(SMALL_ORDERED_HASH_MAP_TYPE) \ - V(SMALL_ORDERED_HASH_SET_TYPE) \ - V(SMALL_ORDERED_NAME_DICTIONARY_TYPE) \ - V(STORE_HANDLER_TYPE) \ - V(UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \ - V(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE) \ - V(WEAK_ARRAY_LIST_TYPE) \ - V(WEAK_CELL_TYPE) \ - \ - V(JS_PROXY_TYPE) \ - V(JS_GLOBAL_OBJECT_TYPE) \ - V(JS_GLOBAL_PROXY_TYPE) \ - V(JS_MODULE_NAMESPACE_TYPE) \ - V(JS_SPECIAL_API_OBJECT_TYPE) \ - V(JS_PRIMITIVE_WRAPPER_TYPE) \ - V(JS_API_OBJECT_TYPE) \ - V(JS_OBJECT_TYPE) \ - \ - V(JS_ARGUMENTS_TYPE) \ - V(JS_ARRAY_BUFFER_TYPE) \ - V(JS_ARRAY_ITERATOR_TYPE) \ - V(JS_ARRAY_TYPE) \ - V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \ - V(JS_ASYNC_FUNCTION_OBJECT_TYPE) \ - V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \ - V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \ - V(JS_DATE_TYPE) \ - V(JS_ERROR_TYPE) \ - V(JS_GENERATOR_OBJECT_TYPE) \ - V(JS_MAP_TYPE) \ - V(JS_MAP_KEY_ITERATOR_TYPE) \ - V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \ - V(JS_MAP_VALUE_ITERATOR_TYPE) \ - V(JS_MESSAGE_OBJECT_TYPE) \ - V(JS_PROMISE_TYPE) \ - V(JS_REGEXP_TYPE) \ - V(JS_REGEXP_STRING_ITERATOR_TYPE) \ - V(JS_SET_TYPE) \ - V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \ - V(JS_SET_VALUE_ITERATOR_TYPE) \ - V(JS_STRING_ITERATOR_TYPE) \ - V(JS_WEAK_REF_TYPE) \ - V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \ - V(JS_FINALIZATION_GROUP_TYPE) \ - V(JS_WEAK_MAP_TYPE) \ - V(JS_WEAK_SET_TYPE) \ - V(JS_TYPED_ARRAY_TYPE) \ - V(JS_DATA_VIEW_TYPE) + V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) -#define INSTANCE_TYPE_LIST_AFTER_INTL(V) \ - V(WASM_EXCEPTION_TYPE) \ - V(WASM_GLOBAL_TYPE) \ - V(WASM_INSTANCE_TYPE) \ - V(WASM_MEMORY_TYPE) \ - V(WASM_MODULE_TYPE) \ - V(WASM_TABLE_TYPE) \ - V(JS_BOUND_FUNCTION_TYPE) \ - V(JS_FUNCTION_TYPE) - -#ifdef V8_INTL_SUPPORT -#define INSTANCE_TYPE_LIST(V) \ - INSTANCE_TYPE_LIST_BEFORE_INTL(V) \ - V(JS_INTL_V8_BREAK_ITERATOR_TYPE) \ - V(JS_INTL_COLLATOR_TYPE) \ - V(JS_INTL_DATE_TIME_FORMAT_TYPE) \ - V(JS_INTL_LIST_FORMAT_TYPE) \ - V(JS_INTL_LOCALE_TYPE) \ - V(JS_INTL_NUMBER_FORMAT_TYPE) \ - V(JS_INTL_PLURAL_RULES_TYPE) \ - V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \ - V(JS_INTL_SEGMENT_ITERATOR_TYPE) \ - V(JS_INTL_SEGMENTER_TYPE) \ - INSTANCE_TYPE_LIST_AFTER_INTL(V) -#else -#define INSTANCE_TYPE_LIST(V) \ - INSTANCE_TYPE_LIST_BEFORE_INTL(V) \ - INSTANCE_TYPE_LIST_AFTER_INTL(V) -#endif // V8_INTL_SUPPORT +#define INSTANCE_TYPE_LIST(V) \ + INSTANCE_TYPE_LIST_BASE(V) \ + TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(V) // Since string types are not consecutive, this macro is used to // iterate over them. @@ -290,11 +102,20 @@ namespace internal { // code for the class including allocation and garbage collection routines, // casts and predicates. All you need to define is the class, methods and // object verification routines. Easy, no? -// -// Note that for subtle reasons related to the ordering or numerical values of -// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST -// manually. -#define STRUCT_LIST_GENERATOR(V, _) \ +#define STRUCT_LIST_GENERATOR_BASE(V, _) \ + V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \ + promise_fulfill_reaction_job_task) \ + V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \ + promise_reject_reaction_job_task) \ + V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \ + V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \ + V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \ + promise_resolve_thenable_job_task) \ + V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \ + function_template_info) \ + V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \ + V(_, TUPLE2_TYPE, Tuple2, tuple2) \ + V(_, TUPLE3_TYPE, Tuple3, tuple3) \ V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \ V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \ V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \ @@ -309,13 +130,10 @@ namespace internal { V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \ V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \ V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \ - V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \ - function_template_info) \ V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \ function_template_rare_data) \ V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \ V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \ - V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \ V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \ V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \ V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \ @@ -328,8 +146,6 @@ namespace internal { V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \ V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \ template_object_description) \ - V(_, TUPLE2_TYPE, Tuple2, tuple2) \ - V(_, TUPLE3_TYPE, Tuple3, tuple3) \ V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \ wasm_capi_function_data) \ V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \ @@ -338,32 +154,24 @@ namespace internal { wasm_exported_function_data) \ V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \ wasm_indirect_function_table) \ - V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \ - V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \ - V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \ - V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \ - promise_fulfill_reaction_job_task) \ - V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \ - promise_reject_reaction_job_task) \ - V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \ - promise_resolve_thenable_job_task) + V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) + +#define STRUCT_LIST_GENERATOR(V, _) \ + STRUCT_LIST_GENERATOR_BASE(V, _) \ + TORQUE_STRUCT_LIST_GENERATOR(V, _) // Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry #define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name) // Produces (NAME, Name, name) entries. -#define STRUCT_LIST(V) \ - STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) \ - TORQUE_STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) +#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) // Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry #define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \ V(Map, name##_map, Name##Map) // Produces (Map, struct_name_map, StructNameMap) entries -#define STRUCT_MAPS_LIST(V) \ - STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) \ - TORQUE_STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) +#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) // // The following macros define list of allocation size objects and list of diff --git a/chromium/v8/src/objects/objects-inl.h b/chromium/v8/src/objects/objects-inl.h index cf8c3ffad25..08f4a2b6f09 100644 --- a/chromium/v8/src/objects/objects-inl.h +++ b/chromium/v8/src/objects/objects-inl.h @@ -350,6 +350,13 @@ DEF_GETTER(HeapObject, IsDependentCode, bool) { return true; } +DEF_GETTER(HeapObject, IsOSROptimizedCodeCache, bool) { + if (!IsWeakFixedArray(isolate)) return false; + // There's actually no way to see the difference between a weak fixed array + // and a osr optimized code cache. + return true; +} + DEF_GETTER(HeapObject, IsAbstractCode, bool) { return IsBytecodeArray(isolate) || IsCode(isolate); } @@ -411,6 +418,12 @@ DEF_GETTER(HeapObject, IsSmallOrderedHashTable, bool) { IsSmallOrderedNameDictionary(isolate); } +DEF_GETTER(HeapObject, IsWasmExceptionPackage, bool) { + // It is not possible to check for the existence of certain properties on the + // underlying {JSReceiver} here because that requires calling handlified code. + return IsJSReceiver(isolate); +} + bool Object::IsPrimitive() const { if (IsSmi()) return true; HeapObject this_heap_object = HeapObject::cast(*this); @@ -506,7 +519,7 @@ bool Object::IsMinusZero() const { OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray) OBJECT_CONSTRUCTORS_IMPL(ScopeInfo, FixedArray) -OBJECT_CONSTRUCTORS_IMPL(BigIntBase, HeapObject) +OBJECT_CONSTRUCTORS_IMPL(BigIntBase, PrimitiveHeapObject) OBJECT_CONSTRUCTORS_IMPL(BigInt, BigIntBase) OBJECT_CONSTRUCTORS_IMPL(FreshlyAllocatedBigInt, BigIntBase) @@ -756,11 +769,13 @@ void HeapObject::set_map(Map value) { #endif } set_map_word(MapWord::FromMap(value)); +#ifndef V8_DISABLE_WRITE_BARRIERS if (!value.is_null()) { // TODO(1600) We are passing kNullAddress as a slot because maps can never // be on an evacuation candidate. MarkingBarrier(*this, ObjectSlot(kNullAddress), value); } +#endif } DEF_GETTER(HeapObject, synchronized_map, Map) { @@ -774,11 +789,13 @@ void HeapObject::synchronized_set_map(Map value) { #endif } synchronized_set_map_word(MapWord::FromMap(value)); +#ifndef V8_DISABLE_WRITE_BARRIERS if (!value.is_null()) { // TODO(1600) We are passing kNullAddress as a slot because maps can never // be on an evacuation candidate. MarkingBarrier(*this, ObjectSlot(kNullAddress), value); } +#endif } // Unsafe accessor omitting write barrier. @@ -793,12 +810,14 @@ void HeapObject::set_map_no_write_barrier(Map value) { void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) { set_map_word(MapWord::FromMap(value)); +#ifndef V8_DISABLE_WRITE_BARRIERS if (mode != SKIP_WRITE_BARRIER) { DCHECK(!value.is_null()); // TODO(1600) We are passing kNullAddress as a slot because maps can never // be on an evacuation candidate. MarkingBarrier(*this, ObjectSlot(kNullAddress), value); } +#endif } ObjectSlot HeapObject::map_slot() const { diff --git a/chromium/v8/src/objects/objects.cc b/chromium/v8/src/objects/objects.cc index 134cb3998a5..227cff8da47 100644 --- a/chromium/v8/src/objects/objects.cc +++ b/chromium/v8/src/objects/objects.cc @@ -65,6 +65,7 @@ #include "src/objects/lookup-inl.h" #include "src/objects/map-updater.h" #include "src/objects/objects-body-descriptors-inl.h" +#include "src/objects/property-details.h" #include "src/utils/identity-map.h" #ifdef V8_INTL_SUPPORT #include "src/objects/js-break-iterator.h" @@ -1770,7 +1771,7 @@ bool Object::IterationHasObservableEffects() { // Check that the ArrayPrototype hasn't been modified in a way that would // affect iteration. - if (!isolate->IsArrayIteratorLookupChainIntact()) return true; + if (!Protectors::IsArrayIteratorLookupChainIntact(isolate)) return true; // For FastPacked kinds, iteration will have the same effect as simply // accessing each property in order. @@ -1781,7 +1782,7 @@ bool Object::IterationHasObservableEffects() { // the prototype. This could have different results if the prototype has been // changed. if (IsHoleyElementsKind(array_kind) && - isolate->IsNoElementsProtectorIntact()) { + Protectors::IsNoElementsIntact(isolate)) { return false; } return true; @@ -2188,7 +2189,8 @@ int HeapObject::SizeFromMap(Map map) const { } if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) { if (instance_type == NATIVE_CONTEXT_TYPE) return NativeContext::kSize; - return Context::SizeFor(Context::unchecked_cast(*this).length()); + return Context::SizeFor( + Context::unchecked_cast(*this).synchronized_length()); } if (instance_type == ONE_BYTE_STRING_TYPE || instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) { @@ -2378,7 +2380,7 @@ bool HeapObject::IsExternal(Isolate* isolate) const { void DescriptorArray::GeneralizeAllFields() { int length = number_of_descriptors(); - for (int i = 0; i < length; i++) { + for (InternalIndex i : InternalIndex::Range(length)) { PropertyDetails details = GetDetails(i); details = details.CopyWithRepresentation(Representation::Tagged()); if (details.location() == kField) { @@ -3717,7 +3719,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes( DescriptorArray::Allocate(isolate, size, slack); if (attributes != NONE) { - for (int i = 0; i < size; ++i) { + for (InternalIndex i : InternalIndex::Range(size)) { MaybeObject value_or_field_type = desc->GetValue(i); Name key = desc->GetKey(i); PropertyDetails details = desc->GetDetails(i); @@ -3737,7 +3739,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes( descriptors->Set(i, key, value_or_field_type, details); } } else { - for (int i = 0; i < size; ++i) { + for (InternalIndex i : InternalIndex::Range(size)) { descriptors->CopyFrom(i, *desc); } } @@ -3760,21 +3762,17 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone( Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(isolate, size, slack); - for (int i = 0; i < size; ++i) { + for (InternalIndex i : InternalIndex::Range(size)) { Name key = src->GetKey(i); PropertyDetails details = src->GetDetails(i); + Representation new_representation = details.representation(); DCHECK(!key.IsPrivateName()); DCHECK(details.IsEnumerable()); DCHECK_EQ(details.kind(), kData); - - // Ensure the ObjectClone property details are NONE, and that all source - // details did not contain DONT_ENUM. - PropertyDetails new_details(kData, NONE, details.location(), - details.constness(), details.representation(), - details.field_index()); - // Do not propagate the field type of normal object fields from the - // original descriptors since FieldType changes don't create new maps. + // If the new representation is an in-place changeable field, make it + // generic as possible (under in-place changes) to avoid type confusion if + // the source representation changes after this feedback has been collected. MaybeObject type = src->GetValue(i); if (details.location() == PropertyLocation::kField) { type = MaybeObject::FromObject(FieldType::Any()); @@ -3783,13 +3781,15 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone( // need to generalize the descriptors here. That will also enable // us to skip the defensive copying of the target map whenever a // CloneObjectIC misses. - if (FLAG_modify_field_representation_inplace && - (new_details.representation().IsSmi() || - new_details.representation().IsHeapObject())) { - new_details = - new_details.CopyWithRepresentation(Representation::Tagged()); - } + new_representation = new_representation.MostGenericInPlaceChange(); } + + // Ensure the ObjectClone property details are NONE, and that all source + // details did not contain DONT_ENUM. + PropertyDetails new_details(kData, NONE, details.location(), + details.constness(), new_representation, + details.field_index()); + descriptors->Set(i, key, type, new_details); } @@ -3799,7 +3799,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone( } bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) { - for (int i = 0; i < nof_descriptors; i++) { + for (InternalIndex i : InternalIndex::Range(nof_descriptors)) { if (GetKey(i) != desc.GetKey(i) || GetValue(i) != desc.GetValue(i)) { return false; } @@ -3816,8 +3816,7 @@ bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) { Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate, Handle<FixedArray> array, int index, - Handle<Object> value, - AllocationType allocation) { + Handle<Object> value) { if (index < array->length()) { array->set(index, *value); return array; @@ -3827,7 +3826,7 @@ Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate, capacity = JSObject::NewElementsCapacity(capacity); } while (capacity <= index); Handle<FixedArray> new_array = - isolate->factory()->NewUninitializedFixedArray(capacity, allocation); + isolate->factory()->NewUninitializedFixedArray(capacity); array->CopyTo(0, *new_array, 0, array->length()); new_array->FillWithHoles(array->length(), new_array->length()); new_array->set(index, *value); @@ -3952,6 +3951,20 @@ Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate, return array; } +Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate, + Handle<WeakArrayList> array, + const MaybeObjectHandle& value1, + const MaybeObjectHandle& value2) { + int length = array->length(); + array = EnsureSpace(isolate, array, length + 2); + // Reload length; GC might have removed elements from the array. + length = array->length(); + array->Set(length, *value1); + array->Set(length + 1, *value2); + array->set_length(length + 2); + return array; +} + bool WeakArrayList::IsFull() { return length() == capacity(); } // static @@ -4147,12 +4160,10 @@ Handle<FrameArray> FrameArray::EnsureSpace(Isolate* isolate, Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate, int nof_descriptors, - int slack, - AllocationType allocation) { + int slack) { return nof_descriptors + slack == 0 ? isolate->factory()->empty_descriptor_array() - : isolate->factory()->NewDescriptorArray(nof_descriptors, slack, - allocation); + : isolate->factory()->NewDescriptorArray(nof_descriptors, slack); } void DescriptorArray::Initialize(EnumCache enum_cache, @@ -4174,8 +4185,8 @@ void DescriptorArray::ClearEnumCache() { set_enum_cache(GetReadOnlyRoots().empty_enum_cache()); } -void DescriptorArray::Replace(int index, Descriptor* descriptor) { - descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index)); +void DescriptorArray::Replace(InternalIndex index, Descriptor* descriptor) { + descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index.as_int())); Set(index, descriptor); } @@ -4193,7 +4204,7 @@ void DescriptorArray::InitializeOrChangeEnumCache( } } -void DescriptorArray::CopyFrom(int index, DescriptorArray src) { +void DescriptorArray::CopyFrom(InternalIndex index, DescriptorArray src) { PropertyDetails details = src.GetDetails(index); Set(index, src.GetKey(index), src.GetValue(index), details); } @@ -4304,7 +4315,7 @@ bool DescriptorArray::IsEqualTo(DescriptorArray other) { if (number_of_all_descriptors() != other.number_of_all_descriptors()) { return false; } - for (int i = 0; i < number_of_descriptors(); ++i) { + for (InternalIndex i : InternalIndex::Range(number_of_descriptors())) { if (GetKey(i) != other.GetKey(i)) return false; if (GetDetails(i).AsSmi() != other.GetDetails(i).AsSmi()) return false; if (GetValue(i) != other.GetValue(i)) return false; @@ -4507,6 +4518,7 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { value |= length << String::ArrayIndexLengthBits::kShift; DCHECK_EQ(value & String::kIsNotArrayIndexMask, 0); + DCHECK_EQ(value & String::kIsNotIntegerIndexMask, 0); DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength, Name::ContainsCachedArrayIndex(value)); return value; @@ -4659,8 +4671,26 @@ bool Script::GetPositionInfo(int position, PositionInfo* info, // directly. if (type() == Script::TYPE_WASM) { DCHECK_LE(0, position); - return WasmModuleObject::cast(wasm_module_object()) - .GetPositionInfo(static_cast<uint32_t>(position), info); + wasm::NativeModule* native_module = wasm_native_module(); + const wasm::WasmModule* module = native_module->module(); + if (source_mapping_url().IsString()) { + if (module->functions.size() == 0) return false; + info->line = 0; + info->column = position; + info->line_start = module->functions[0].code.offset(); + info->line_end = module->functions.back().code.end_offset(); + return true; + } + int func_index = GetContainingWasmFunction(module, position); + if (func_index < 0) return false; + + const wasm::WasmFunction& function = module->functions[func_index]; + + info->line = func_index; + info->column = position - function.code.offset(); + info->line_start = function.code.offset(); + info->line_end = function.code.end_offset(); + return true; } if (line_ends().IsUndefined()) { @@ -4972,26 +5002,8 @@ void SharedFunctionInfo::ScriptIterator::Reset(Isolate* isolate, index_ = 0; } -SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate) - : isolate_(isolate), - script_iterator_(isolate), - noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()), - sfi_iterator_(isolate, script_iterator_.Next()) {} - -SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() { - HeapObject next = noscript_sfi_iterator_.Next(); - if (!next.is_null()) return SharedFunctionInfo::cast(next); - for (;;) { - next = sfi_iterator_.Next(); - if (!next.is_null()) return SharedFunctionInfo::cast(next); - Script next_script = script_iterator_.Next(); - if (next_script.is_null()) return SharedFunctionInfo(); - sfi_iterator_.Reset(isolate_, next_script); - } -} - void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared, - Handle<Object> script_object, + Handle<HeapObject> script_object, int function_literal_id, bool reset_preparsed_scope_data) { if (shared->script() == *script_object) return; @@ -5020,30 +5032,8 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared, } #endif list->Set(function_literal_id, HeapObjectReference::Weak(*shared)); - - // Remove shared function info from root array. - WeakArrayList noscript_list = - isolate->heap()->noscript_shared_function_infos(); - CHECK(noscript_list.RemoveOne(MaybeObjectHandle::Weak(shared))); } else { DCHECK(shared->script().IsScript()); - Handle<WeakArrayList> list = - isolate->factory()->noscript_shared_function_infos(); - -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - WeakArrayList::Iterator iterator(*list); - for (HeapObject next = iterator.Next(); !next.is_null(); - next = iterator.Next()) { - DCHECK_NE(next, *shared); - } - } -#endif // DEBUG - - list = - WeakArrayList::AddToEnd(isolate, list, MaybeObjectHandle::Weak(shared)); - - isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list); // Remove shared function info from old script's list. Script old_script = Script::cast(shared->script()); @@ -5354,6 +5344,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral( Scope* outer_scope = lit->scope()->GetOuterScopeWithContext(); if (outer_scope) { shared_info->set_outer_scope_info(*outer_scope->scope_info()); + shared_info->set_private_name_lookup_skips_outer_class( + lit->scope()->private_name_lookup_skips_outer_class()); } } @@ -5669,9 +5661,10 @@ bool JSArray::HasReadOnlyLength(Handle<JSArray> array) { // Fast path: "length" is the first fast property of arrays. Since it's not // configurable, it's guaranteed to be the first in the descriptor array. if (!map.is_dictionary_map()) { - DCHECK(map.instance_descriptors().GetKey(0) == + InternalIndex first(0); + DCHECK(map.instance_descriptors().GetKey(first) == array->GetReadOnlyRoots().length_string()); - return map.instance_descriptors().GetDetails(0).IsReadOnly(); + return map.instance_descriptors().GetDetails(first).IsReadOnly(); } Isolate* isolate = array->GetIsolate(); @@ -5927,17 +5920,25 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise, // 8. Let then be Get(resolution, "then"). MaybeHandle<Object> then; - if (isolate->IsPromiseThenLookupChainIntact( - Handle<JSReceiver>::cast(resolution))) { + Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(resolution)); + + // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the + // initial %PromisePrototype% yields the initial method. In addition this + // protector also guards the negative lookup of "then" on the intrinsic + // %ObjectPrototype%, meaning that such lookups are guaranteed to yield + // undefined without triggering any side-effects. + if (receiver->IsJSPromise() && + isolate->IsInAnyContext(receiver->map().prototype(), + Context::PROMISE_PROTOTYPE_INDEX) && + Protectors::IsPromiseThenLookupChainIntact(isolate)) { // We can skip the "then" lookup on {resolution} if its [[Prototype]] // is the (initial) Promise.prototype and the Promise#then protector // is intact, as that guards the lookup path for the "then" property // on JSPromise instances which have the (initial) %PromisePrototype%. then = isolate->promise_then(); } else { - then = - JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(resolution), - isolate->factory()->then_string()); + then = JSReceiver::GetProperty(isolate, receiver, + isolate->factory()->then_string()); } // 9. If then is an abrupt completion, then @@ -6151,27 +6152,40 @@ bool JSRegExp::ShouldProduceBytecode() { } // An irregexp is considered to be marked for tier up if the tier-up ticks value -// is not zero. An atom is not subject to tier-up implementation, so the tier-up -// ticks value is not set. +// reaches zero. An atom is not subject to tier-up implementation, so the +// tier-up ticks value is not set. bool JSRegExp::MarkedForTierUp() { DCHECK(data().IsFixedArray()); - if (TypeTag() == JSRegExp::ATOM) { + if (TypeTag() == JSRegExp::ATOM || !FLAG_regexp_tier_up) { return false; } - return Smi::ToInt(DataAt(kIrregexpTierUpTicksIndex)) != 0; + return Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) == 0; +} + +void JSRegExp::ResetLastTierUpTick() { + DCHECK(FLAG_regexp_tier_up); + DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP); + int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) + 1; + FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex, + Smi::FromInt(tier_up_ticks)); } -void JSRegExp::ResetTierUp() { +void JSRegExp::TierUpTick() { DCHECK(FLAG_regexp_tier_up); DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP); - FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero); + int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)); + if (tier_up_ticks == 0) { + return; + } + FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex, + Smi::FromInt(tier_up_ticks - 1)); } void JSRegExp::MarkTierUpForNextExec() { DCHECK(FLAG_regexp_tier_up); DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP); - FixedArray::cast(data()).set(JSRegExp::kIrregexpTierUpTicksIndex, - Smi::FromInt(1)); + FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex, + Smi::kZero); } namespace { @@ -6938,7 +6952,7 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry, if (entry < 0) { // Copy old optimized code map and append one new entry. new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow( - old_literals_map, kLiteralEntryLength, AllocationType::kOld); + old_literals_map, kLiteralEntryLength); entry = old_literals_map->length(); } } @@ -7312,8 +7326,13 @@ Handle<NumberDictionary> NumberDictionary::Set( Isolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value, Handle<JSObject> dictionary_holder, PropertyDetails details) { - dictionary->UpdateMaxNumberKey(key, dictionary_holder); - return AtPut(isolate, dictionary, key, value, details); + // We could call Set with empty dictionaries. UpdateMaxNumberKey doesn't + // expect empty dictionaries so make sure to call AtPut that correctly handles + // them by creating new dictionary when required. + Handle<NumberDictionary> new_dictionary = + AtPut(isolate, dictionary, key, value, details); + new_dictionary->UpdateMaxNumberKey(key, dictionary_holder); + return new_dictionary; } void NumberDictionary::CopyValuesTo(FixedArray elements) { @@ -7898,9 +7917,6 @@ void PropertyCell::SetValueWithInvalidation(Isolate* isolate, Handle<PropertyCell> cell, Handle<Object> new_value) { if (cell->value() != *new_value) { - if (FLAG_trace_protector_invalidation) { - isolate->TraceProtectorInvalidation(cell_name); - } cell->set_value(*new_value); cell->dependent_code().DeoptimizeDependentCodeGroup( isolate, DependentCode::kPropertyCellChangedGroup); diff --git a/chromium/v8/src/objects/objects.h b/chromium/v8/src/objects/objects.h index b4e78a19377..f66023456c3 100644 --- a/chromium/v8/src/objects/objects.h +++ b/chromium/v8/src/objects/objects.h @@ -46,19 +46,22 @@ // - JSArrayBufferView // - JSTypedArray // - JSDataView -// - JSBoundFunction // - JSCollection // - JSSet // - JSMap +// - JSCustomElementsObject (may have elements despite empty FixedArray) +// - JSSpecialObject (requires custom property lookup handling) +// - JSGlobalObject +// - JSGlobalProxy +// - JSModuleNamespace +// - JSPrimitiveWrapper // - JSDate -// - JSFunction +// - JSFunctionOrBoundFunction +// - JSBoundFunction +// - JSFunction // - JSGeneratorObject -// - JSGlobalObject -// - JSGlobalProxy // - JSMapIterator // - JSMessageObject -// - JSModuleNamespace -// - JSPrimitiveWrapper // - JSRegExp // - JSSetIterator // - JSStringIterator @@ -104,30 +107,32 @@ // - ScriptContextTable // - ClosureFeedbackCellArray // - FixedDoubleArray -// - Name -// - String -// - SeqString -// - SeqOneByteString -// - SeqTwoByteString -// - SlicedString -// - ConsString -// - ThinString -// - ExternalString -// - ExternalOneByteString -// - ExternalTwoByteString -// - InternalizedString -// - SeqInternalizedString -// - SeqOneByteInternalizedString -// - SeqTwoByteInternalizedString -// - ConsInternalizedString -// - ExternalInternalizedString -// - ExternalOneByteInternalizedString -// - ExternalTwoByteInternalizedString -// - Symbol +// - PrimitiveHeapObject +// - BigInt +// - HeapNumber +// - Name +// - String +// - SeqString +// - SeqOneByteString +// - SeqTwoByteString +// - SlicedString +// - ConsString +// - ThinString +// - ExternalString +// - ExternalOneByteString +// - ExternalTwoByteString +// - InternalizedString +// - SeqInternalizedString +// - SeqOneByteInternalizedString +// - SeqTwoByteInternalizedString +// - ConsInternalizedString +// - ExternalInternalizedString +// - ExternalOneByteInternalizedString +// - ExternalTwoByteInternalizedString +// - Symbol +// - Oddball // - Context // - NativeContext -// - HeapNumber -// - BigInt // - Cell // - DescriptorArray // - PropertyCell @@ -135,7 +140,6 @@ // - Code // - AbstractCode, a wrapper around Code or BytecodeArray // - Map -// - Oddball // - Foreign // - SmallOrderedHashTable // - SmallOrderedHashMap @@ -607,15 +611,13 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> { // For use with std::unordered_set. struct Hasher { size_t operator()(const Object o) const { - return std::hash<v8::internal::Address>{}(o.ptr()); + return std::hash<v8::internal::Address>{}(static_cast<Tagged_t>(o.ptr())); } }; // For use with std::map. struct Comparer { - bool operator()(const Object a, const Object b) const { - return a.ptr() < b.ptr(); - } + bool operator()(const Object a, const Object b) const { return a < b; } }; template <class T, typename std::enable_if<std::is_arithmetic<T>::value, @@ -784,7 +786,8 @@ enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER }; enum class GetKeysConversion { kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers), - kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString) + kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString), + kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers) }; enum class KeyCollectionMode { diff --git a/chromium/v8/src/objects/oddball.h b/chromium/v8/src/objects/oddball.h index 025f9379ba9..e88d96624e9 100644 --- a/chromium/v8/src/objects/oddball.h +++ b/chromium/v8/src/objects/oddball.h @@ -5,8 +5,7 @@ #ifndef V8_OBJECTS_ODDBALL_H_ #define V8_OBJECTS_ODDBALL_H_ -#include "src/objects/heap-object.h" -#include "torque-generated/class-definitions-tq.h" +#include "src/objects/primitive-heap-object.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -15,7 +14,7 @@ namespace v8 { namespace internal { // The Oddball describes objects null, undefined, true, and false. -class Oddball : public TorqueGeneratedOddball<Oddball, HeapObject> { +class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> { public: // [to_number_raw]: Cached raw to_number computed at startup. inline void set_to_number_raw_as_bits(uint64_t bits); diff --git a/chromium/v8/src/objects/ordered-hash-table-inl.h b/chromium/v8/src/objects/ordered-hash-table-inl.h index a2270b0a4a1..f6b8f972a98 100644 --- a/chromium/v8/src/objects/ordered-hash-table-inl.h +++ b/chromium/v8/src/objects/ordered-hash-table-inl.h @@ -164,10 +164,18 @@ inline bool OrderedHashMap::Is(Handle<HeapObject> table) { return table->IsOrderedHashMap(); } +inline bool OrderedNameDictionary::Is(Handle<HeapObject> table) { + return table->IsOrderedNameDictionary(); +} + inline bool SmallOrderedHashSet::Is(Handle<HeapObject> table) { return table->IsSmallOrderedHashSet(); } +inline bool SmallOrderedNameDictionary::Is(Handle<HeapObject> table) { + return table->IsSmallOrderedNameDictionary(); +} + inline bool SmallOrderedHashMap::Is(Handle<HeapObject> table) { return table->IsSmallOrderedHashMap(); } diff --git a/chromium/v8/src/objects/ordered-hash-table.cc b/chromium/v8/src/objects/ordered-hash-table.cc index dda848f0100..962224024ea 100644 --- a/chromium/v8/src/objects/ordered-hash-table.cc +++ b/chromium/v8/src/objects/ordered-hash-table.cc @@ -168,8 +168,8 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray( for (int i = 0; i < length; i++) { int index = HashTableStartIndex() + nof_buckets + (i * kEntrySize); Object key = table->get(index); + uint32_t index_value; if (convert == GetKeysConversion::kConvertToString) { - uint32_t index_value; if (key.ToArrayIndex(&index_value)) { // Avoid trashing the Number2String cache if indices get very large. bool use_cache = i < kMaxStringTableEntries; @@ -177,6 +177,8 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray( } else { CHECK(key.IsName()); } + } else if (convert == GetKeysConversion::kNoNumbers) { + DCHECK(!key.ToArrayIndex(&index_value)); } result->set(i, key); } @@ -959,15 +961,15 @@ OrderedHashTableHandler<SmallOrderedNameDictionary, template <class SmallTable, class LargeTable> bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete( - Handle<HeapObject> table, Handle<Object> key) { + Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) { if (SmallTable::Is(table)) { - return SmallTable::Delete(Handle<SmallTable>::cast(table), key); + return SmallTable::Delete(isolate, *Handle<SmallTable>::cast(table), *key); } DCHECK(LargeTable::Is(table)); // Note: Once we migrate to the a big hash table, we never migrate // down to a smaller hash table. - return LargeTable::Delete(Handle<LargeTable>::cast(table), key); + return LargeTable::Delete(isolate, *Handle<LargeTable>::cast(table), *key); } template <class SmallTable, class LargeTable> @@ -988,6 +990,18 @@ template bool OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::HasKey( Isolate* isolate, Handle<HeapObject> table, Handle<Object> key); +template bool +OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::Delete( + Isolate* isolate, Handle<HeapObject> table, Handle<Object> key); +template bool +OrderedHashTableHandler<SmallOrderedHashMap, OrderedHashMap>::Delete( + Isolate* isolate, Handle<HeapObject> table, Handle<Object> key); +template bool +OrderedHashTableHandler<SmallOrderedNameDictionary, + OrderedNameDictionary>::Delete(Isolate* isolate, + Handle<HeapObject> table, + Handle<Object> key); + MaybeHandle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation( Isolate* isolate, Handle<SmallOrderedHashMap> table) { MaybeHandle<OrderedHashMap> new_table_candidate = diff --git a/chromium/v8/src/objects/ordered-hash-table.h b/chromium/v8/src/objects/ordered-hash-table.h index 21decaeba72..590846f1302 100644 --- a/chromium/v8/src/objects/ordered-hash-table.h +++ b/chromium/v8/src/objects/ordered-hash-table.h @@ -658,7 +658,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) OrderedHashTableHandler { using Entry = int; static MaybeHandle<HeapObject> Allocate(Isolate* isolate, int capacity); - static bool Delete(Handle<HeapObject> table, Handle<Object> key); + static bool Delete(Isolate* isolate, Handle<HeapObject> table, + Handle<Object> key); static bool HasKey(Isolate* isolate, Handle<HeapObject> table, Handle<Object> key); @@ -730,6 +731,7 @@ class OrderedNameDictionary static HeapObject GetEmpty(ReadOnlyRoots ro_roots); static inline RootIndex GetMapRootIndex(); + static inline bool Is(Handle<HeapObject> table); static const int kValueOffset = 1; static const int kPropertyDetailsOffset = 2; @@ -831,6 +833,7 @@ class SmallOrderedNameDictionary Object value, PropertyDetails details); static inline RootIndex GetMapRootIndex(); + static inline bool Is(Handle<HeapObject> table); OBJECT_CONSTRUCTORS(SmallOrderedNameDictionary, SmallOrderedHashTable<SmallOrderedNameDictionary>); diff --git a/chromium/v8/src/objects/osr-optimized-code-cache-inl.h b/chromium/v8/src/objects/osr-optimized-code-cache-inl.h new file mode 100644 index 00000000000..ab7a97b6aa2 --- /dev/null +++ b/chromium/v8/src/objects/osr-optimized-code-cache-inl.h @@ -0,0 +1,25 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_ +#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_ + +#include "src/objects/osr-optimized-code-cache.h" + +#include "src/objects/fixed-array-inl.h" +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +OBJECT_CONSTRUCTORS_IMPL(OSROptimizedCodeCache, WeakFixedArray) +CAST_ACCESSOR(OSROptimizedCodeCache) + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_ diff --git a/chromium/v8/src/objects/osr-optimized-code-cache.cc b/chromium/v8/src/objects/osr-optimized-code-cache.cc new file mode 100644 index 00000000000..62190529f1b --- /dev/null +++ b/chromium/v8/src/objects/osr-optimized-code-cache.cc @@ -0,0 +1,223 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/execution/isolate-inl.h" +#include "src/objects/code.h" +#include "src/objects/maybe-object.h" +#include "src/objects/shared-function-info.h" + +#include "src/objects/osr-optimized-code-cache.h" + +namespace v8 { +namespace internal { + +const int OSROptimizedCodeCache::kInitialLength; +const int OSROptimizedCodeCache::kMaxLength; + +void OSROptimizedCodeCache::AddOptimizedCode( + Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared, + Handle<Code> code, BailoutId osr_offset) { + DCHECK(!osr_offset.IsNone()); + DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); + STATIC_ASSERT(kEntryLength == 3); + Isolate* isolate = native_context->GetIsolate(); + DCHECK(!isolate->serializer_enabled()); + + Handle<OSROptimizedCodeCache> osr_cache( + native_context->GetOSROptimizedCodeCache(), isolate); + + DCHECK_EQ(osr_cache->FindEntry(shared, osr_offset), -1); + int entry = -1; + for (int index = 0; index < osr_cache->length(); index += kEntryLength) { + if (osr_cache->Get(index + kSharedOffset)->IsCleared() || + osr_cache->Get(index + kCachedCodeOffset)->IsCleared()) { + entry = index; + break; + } + } + + if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) { + entry = GrowOSRCache(native_context, &osr_cache); + } else if (entry == -1) { + // We reached max capacity and cannot grow further. Reuse an existing entry. + // TODO(mythria): We could use better mechanisms (like lru) to replace + // existing entries. Though we don't expect this to be a common case, so + // for now choosing to replace the first entry. + entry = 0; + } + + osr_cache->InitializeEntry(entry, *shared, *code, osr_offset); +} + +void OSROptimizedCodeCache::Clear(NativeContext native_context) { + native_context.set_osr_code_cache( + *native_context.GetIsolate()->factory()->empty_weak_fixed_array()); +} + +void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) { + Handle<OSROptimizedCodeCache> osr_cache( + native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate()); + Isolate* isolate = native_context->GetIsolate(); + + // Re-adjust the cache so all the valid entries are on one side. This will + // enable us to compress the cache if needed. + int curr_valid_index = 0; + for (int curr_index = 0; curr_index < osr_cache->length(); + curr_index += kEntryLength) { + if (osr_cache->Get(curr_index + kSharedOffset)->IsCleared() || + osr_cache->Get(curr_index + kCachedCodeOffset)->IsCleared()) { + continue; + } + if (curr_valid_index != curr_index) { + osr_cache->MoveEntry(curr_index, curr_valid_index, isolate); + } + curr_valid_index += kEntryLength; + } + + if (!NeedsTrimming(curr_valid_index, osr_cache->length())) return; + + Handle<OSROptimizedCodeCache> new_osr_cache = + Handle<OSROptimizedCodeCache>::cast(isolate->factory()->NewWeakFixedArray( + CapacityForLength(curr_valid_index), AllocationType::kOld)); + DCHECK_LT(new_osr_cache->length(), osr_cache->length()); + { + DisallowHeapAllocation no_gc; + new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0, + new_osr_cache->length(), + new_osr_cache->GetWriteBarrierMode(no_gc)); + } + native_context->set_osr_code_cache(*new_osr_cache); +} + +Code OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared, + BailoutId osr_offset, + Isolate* isolate) { + DisallowHeapAllocation no_gc; + int index = FindEntry(shared, osr_offset); + if (index == -1) return Code(); + Code code = GetCodeFromEntry(index); + if (code.is_null()) { + ClearEntry(index, isolate); + return code; + } + DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization()); + return code; +} + +void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) { + // This is called from DeoptimizeMarkedCodeForContext that uses raw pointers + // and hence the DisallowHeapAllocation scope here. + DisallowHeapAllocation no_gc; + for (int index = 0; index < length(); index += kEntryLength) { + MaybeObject code_entry = Get(index + kCachedCodeOffset); + HeapObject heap_object; + if (!code_entry->GetHeapObject(&heap_object)) continue; + + DCHECK(heap_object.IsCode()); + DCHECK(Code::cast(heap_object).is_optimized_code()); + if (!Code::cast(heap_object).marked_for_deoptimization()) continue; + + ClearEntry(index, isolate); + } +} + +int OSROptimizedCodeCache::GrowOSRCache( + Handle<NativeContext> native_context, + Handle<OSROptimizedCodeCache>* osr_cache) { + Isolate* isolate = native_context->GetIsolate(); + int old_length = (*osr_cache)->length(); + int grow_by = CapacityForLength(old_length) - old_length; + DCHECK_GT(grow_by, kEntryLength); + *osr_cache = Handle<OSROptimizedCodeCache>::cast( + isolate->factory()->CopyWeakFixedArrayAndGrow(*osr_cache, grow_by)); + for (int i = old_length; i < (*osr_cache)->length(); i++) { + (*osr_cache)->Set(i, HeapObjectReference::ClearedValue(isolate)); + } + native_context->set_osr_code_cache(**osr_cache); + + return old_length; +} + +Code OSROptimizedCodeCache::GetCodeFromEntry(int index) { + DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length()); + DCHECK_EQ(index % kEntryLength, 0); + HeapObject code_entry; + Get(index + OSRCodeCacheConstants::kCachedCodeOffset) + ->GetHeapObject(&code_entry); + return code_entry.is_null() ? Code() : Code::cast(code_entry); +} + +SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) { + DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length()); + DCHECK_EQ(index % kEntryLength, 0); + HeapObject sfi_entry; + Get(index + OSRCodeCacheConstants::kSharedOffset)->GetHeapObject(&sfi_entry); + return sfi_entry.is_null() ? SharedFunctionInfo() + : SharedFunctionInfo::cast(sfi_entry); +} + +BailoutId OSROptimizedCodeCache::GetBailoutIdFromEntry(int index) { + DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length()); + DCHECK_EQ(index % kEntryLength, 0); + Smi osr_offset_entry; + Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry); + return BailoutId(osr_offset_entry.value()); +} + +int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared, + BailoutId osr_offset) { + DisallowHeapAllocation no_gc; + DCHECK(!osr_offset.IsNone()); + for (int index = 0; index < length(); index += kEntryLength) { + if (GetSFIFromEntry(index) != *shared) continue; + if (GetBailoutIdFromEntry(index) != osr_offset) continue; + return index; + } + return -1; +} + +void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) { + Set(index + OSRCodeCacheConstants::kSharedOffset, + HeapObjectReference::ClearedValue(isolate)); + Set(index + OSRCodeCacheConstants::kCachedCodeOffset, + HeapObjectReference::ClearedValue(isolate)); + Set(index + OSRCodeCacheConstants::kOsrIdOffset, + HeapObjectReference::ClearedValue(isolate)); +} + +void OSROptimizedCodeCache::InitializeEntry(int entry, + SharedFunctionInfo shared, + Code code, BailoutId osr_offset) { + Set(entry + OSRCodeCacheConstants::kSharedOffset, + HeapObjectReference::Weak(shared)); + Set(entry + OSRCodeCacheConstants::kCachedCodeOffset, + HeapObjectReference::Weak(code)); + Set(entry + OSRCodeCacheConstants::kOsrIdOffset, + MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt()))); +} + +void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) { + Set(dst + OSRCodeCacheConstants::kSharedOffset, + Get(src + OSRCodeCacheConstants::kSharedOffset)); + Set(dst + OSRCodeCacheConstants::kCachedCodeOffset, + Get(src + OSRCodeCacheConstants::kCachedCodeOffset)); + Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset)); + ClearEntry(src, isolate); +} + +int OSROptimizedCodeCache::CapacityForLength(int curr_length) { + // TODO(mythria): This is a randomly chosen heuristic and is not based on any + // data. We may have to tune this later. + if (curr_length == 0) return kInitialLength; + if (curr_length * 2 > kMaxLength) return kMaxLength; + return curr_length * 2; +} + +bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries, + int curr_length) { + return curr_length > kInitialLength && curr_length > num_valid_entries * 3; +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/objects/osr-optimized-code-cache.h b/chromium/v8/src/objects/osr-optimized-code-cache.h new file mode 100644 index 00000000000..99c148a7e18 --- /dev/null +++ b/chromium/v8/src/objects/osr-optimized-code-cache.h @@ -0,0 +1,77 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_ +#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_ + +#include "src/objects/fixed-array.h" +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray { + public: + DECL_CAST(OSROptimizedCodeCache) + + enum OSRCodeCacheConstants { + kSharedOffset, + kCachedCodeOffset, + kOsrIdOffset, + kEntryLength + }; + + static const int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4; + static const int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024; + + // Caches the optimized code |code| corresponding to the shared function + // |shared| and bailout id |osr_offset| in the OSROptimized code cache. + // If the OSR code cache wasn't created before it creates a code cache with + // kOSRCodeCacheInitialLength entries. + static void AddOptimizedCode(Handle<NativeContext> context, + Handle<SharedFunctionInfo> shared, + Handle<Code> code, BailoutId osr_offset); + // Reduces the size of the OSR code cache if the number of valid entries are + // less than the current capacity of the cache. + static void Compact(Handle<NativeContext> context); + // Sets the OSR optimized code cache to an empty array. + static void Clear(NativeContext context); + + // Returns the code corresponding to the shared function |shared| and + // BailoutId |offset| if an entry exists in the cache. Returns an empty + // object otherwise. + Code GetOptimizedCode(Handle<SharedFunctionInfo> shared, BailoutId osr_offset, + Isolate* isolate); + + // Remove all code objects marked for deoptimization from OSR code cache. + void EvictMarkedCode(Isolate* isolate); + + private: + // Functions that implement heuristics on when to grow / shrink the cache. + static int CapacityForLength(int curr_capacity); + static bool NeedsTrimming(int num_valid_entries, int curr_capacity); + static int GrowOSRCache(Handle<NativeContext> native_context, + Handle<OSROptimizedCodeCache>* osr_cache); + + // Helper functions to get individual items from an entry in the cache. + Code GetCodeFromEntry(int index); + SharedFunctionInfo GetSFIFromEntry(int index); + BailoutId GetBailoutIdFromEntry(int index); + + inline int FindEntry(Handle<SharedFunctionInfo> shared, BailoutId osr_offset); + inline void ClearEntry(int src, Isolate* isolate); + inline void InitializeEntry(int entry, SharedFunctionInfo shared, Code code, + BailoutId osr_offset); + inline void MoveEntry(int src, int dst, Isolate* isolate); + + OBJECT_CONSTRUCTORS(OSROptimizedCodeCache, WeakFixedArray); +}; + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_ diff --git a/chromium/v8/src/objects/primitive-heap-object-inl.h b/chromium/v8/src/objects/primitive-heap-object-inl.h new file mode 100644 index 00000000000..2c694bd1d6d --- /dev/null +++ b/chromium/v8/src/objects/primitive-heap-object-inl.h @@ -0,0 +1,26 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_ +#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_ + +#include "src/objects/primitive-heap-object.h" + +#include "src/objects/heap-object-inl.h" +#include "torque-generated/class-definitions-tq-inl.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +TQ_OBJECT_CONSTRUCTORS_IMPL(PrimitiveHeapObject) + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_INL_H_ diff --git a/chromium/v8/src/objects/primitive-heap-object.h b/chromium/v8/src/objects/primitive-heap-object.h new file mode 100644 index 00000000000..9bd13cafc90 --- /dev/null +++ b/chromium/v8/src/objects/primitive-heap-object.h @@ -0,0 +1,33 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_ +#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_ + +#include "src/objects/heap-object.h" +#include "torque-generated/class-definitions-tq.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +// An abstract superclass for classes representing JavaScript primitive values +// other than Smi. It doesn't carry any functionality but allows primitive +// classes to be identified in the type system. +class PrimitiveHeapObject + : public TorqueGeneratedPrimitiveHeapObject<PrimitiveHeapObject, + HeapObject> { + public: + STATIC_ASSERT(kHeaderSize == HeapObject::kHeaderSize); + TQ_OBJECT_CONSTRUCTORS(PrimitiveHeapObject) +}; + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_ diff --git a/chromium/v8/src/objects/property-descriptor.cc b/chromium/v8/src/objects/property-descriptor.cc index b3b05deceb7..c5cfe8c9a9d 100644 --- a/chromium/v8/src/objects/property-descriptor.cc +++ b/chromium/v8/src/objects/property-descriptor.cc @@ -58,7 +58,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj, if (map.is_dictionary_map()) return false; Handle<DescriptorArray> descs = Handle<DescriptorArray>(map.instance_descriptors(), isolate); - for (int i = 0; i < map.NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map.IterateOwnDescriptors()) { PropertyDetails details = descs->GetDetails(i); Name key = descs->GetKey(i); Handle<Object> value; diff --git a/chromium/v8/src/objects/property-details.h b/chromium/v8/src/objects/property-details.h index e350fe2c278..51318f475af 100644 --- a/chromium/v8/src/objects/property-details.h +++ b/chromium/v8/src/objects/property-details.h @@ -112,7 +112,19 @@ class Representation { // smi and tagged values. Doubles, however, would require a box allocation. if (IsNone()) return !other.IsDouble(); if (!FLAG_modify_field_representation_inplace) return false; - return (IsSmi() || IsHeapObject()) && other.IsTagged(); + return (IsSmi() || (!FLAG_unbox_double_fields && IsDouble()) || + IsHeapObject()) && + other.IsTagged(); + } + + // Return the most generic representation that this representation can be + // changed to in-place. If in-place representation changes are disabled, then + // this will return the current representation. + Representation MostGenericInPlaceChange() const { + if (!FLAG_modify_field_representation_inplace) return *this; + // Everything but unboxed doubles can be in-place changed to Tagged. + if (FLAG_unbox_double_fields && IsDouble()) return Representation::Double(); + return Representation::Tagged(); } bool is_more_general_than(const Representation& other) const { diff --git a/chromium/v8/src/objects/scope-info.cc b/chromium/v8/src/objects/scope-info.cc index c390298b5d2..65a26e5d98f 100644 --- a/chromium/v8/src/objects/scope-info.cc +++ b/chromium/v8/src/objects/scope-info.cc @@ -138,6 +138,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, const bool has_brand = scope->is_class_scope() ? scope->AsClassScope()->brand() != nullptr : false; + const bool should_save_class_variable_index = + scope->is_class_scope() + ? scope->AsClassScope()->should_save_class_variable_index() + : false; const bool has_function_name = function_name_info != NONE; const bool has_position_info = NeedsPositionInfo(scope->scope_type()); const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT; @@ -146,7 +150,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, ? scope->AsDeclarationScope()->num_parameters() : 0; const bool has_outer_scope_info = !outer_scope.is_null(); + const int length = kVariablePartIndex + 2 * context_local_count + + (should_save_class_variable_index ? 1 : 0) + (has_receiver ? 1 : 0) + (has_function_name ? kFunctionNameEntries : 0) + (has_inferred_function_name ? 1 : 0) + @@ -187,6 +193,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, DeclarationScopeField::encode(scope->is_declaration_scope()) | ReceiverVariableField::encode(receiver_info) | HasClassBrandField::encode(has_brand) | + HasSavedClassVariableIndexField::encode( + should_save_class_variable_index) | HasNewTargetField::encode(has_new_target) | FunctionVariableField::encode(function_name_info) | HasInferredFunctionNameField::encode(has_inferred_function_name) | @@ -196,7 +204,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, HasOuterScopeInfoField::encode(has_outer_scope_info) | IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) | ForceContextAllocationField::encode( - scope->ForceContextForLanguageMode()); + scope->ForceContextForLanguageMode()) | + PrivateNameLookupSkipsOuterClassField::encode( + scope->private_name_lookup_skips_outer_class()); scope_info.SetFlags(flags); scope_info.SetParameterCount(parameter_count); @@ -220,7 +230,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, VariableModeField::encode(var->mode()) | InitFlagField::encode(var->initialization_flag()) | MaybeAssignedFlagField::encode(var->maybe_assigned()) | - ParameterNumberField::encode(ParameterNumberField::kMax); + ParameterNumberField::encode(ParameterNumberField::kMax) | + IsStaticFlagField::encode(var->is_static_flag()); scope_info.set(context_local_base + local_index, *var->name(), mode); scope_info.set(context_local_info_base + local_index, Smi::FromInt(info)); @@ -235,7 +246,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, VariableModeField::encode(var->mode()) | InitFlagField::encode(var->initialization_flag()) | MaybeAssignedFlagField::encode(var->maybe_assigned()) | - ParameterNumberField::encode(ParameterNumberField::kMax); + ParameterNumberField::encode(ParameterNumberField::kMax) | + IsStaticFlagField::encode(var->is_static_flag()); scope_info.set(module_var_entry + kModuleVariablePropertiesOffset, Smi::FromInt(properties)); module_var_entry += kModuleVariableEntryLength; @@ -273,7 +285,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, VariableModeField::encode(var->mode()) | InitFlagField::encode(var->initialization_flag()) | MaybeAssignedFlagField::encode(var->maybe_assigned()) | - ParameterNumberField::encode(ParameterNumberField::kMax); + ParameterNumberField::encode(ParameterNumberField::kMax) | + IsStaticFlagField::encode(var->is_static_flag()); scope_info.set(context_local_base + local_index, *var->name(), mode); scope_info.set(context_local_info_base + local_index, Smi::FromInt(info)); @@ -283,6 +296,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope, index += 2 * context_local_count; + DCHECK_EQ(index, scope_info.SavedClassVariableInfoIndex()); + // If the scope is a class scope and has used static private methods, save + // the context slot index of the class variable. + // Store the class variable index. + if (should_save_class_variable_index) { + Variable* class_variable = scope->AsClassScope()->class_variable(); + DCHECK_EQ(class_variable->location(), VariableLocation::CONTEXT); + scope_info.set(index++, Smi::FromInt(class_variable->index())); + } + // If the receiver is allocated, add its index. DCHECK_EQ(index, scope_info.ReceiverInfoIndex()); if (has_receiver) { @@ -362,11 +385,14 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope( LanguageModeField::encode(LanguageMode::kSloppy) | DeclarationScopeField::encode(false) | ReceiverVariableField::encode(NONE) | HasClassBrandField::encode(false) | + HasSavedClassVariableIndexField::encode(false) | HasNewTargetField::encode(false) | FunctionVariableField::encode(NONE) | IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) | FunctionKindField::encode(kNormalFunction) | HasOuterScopeInfoField::encode(has_outer_scope_info) | - IsDebugEvaluateScopeField::encode(false); + IsDebugEvaluateScopeField::encode(false) | + ForceContextAllocationField::encode(false) | + PrivateNameLookupSkipsOuterClassField::encode(false); scope_info->SetFlags(flags); scope_info->SetParameterCount(0); @@ -425,13 +451,17 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate, LanguageModeField::encode(LanguageMode::kSloppy) | DeclarationScopeField::encode(true) | ReceiverVariableField::encode(is_empty_function ? UNUSED : CONTEXT) | - HasClassBrandField::encode(false) | HasNewTargetField::encode(false) | + HasClassBrandField::encode(false) | + HasSavedClassVariableIndexField::encode(false) | + HasNewTargetField::encode(false) | FunctionVariableField::encode(is_empty_function ? UNUSED : NONE) | HasInferredFunctionNameField::encode(has_inferred_function_name) | IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) | FunctionKindField::encode(FunctionKind::kNormalFunction) | HasOuterScopeInfoField::encode(false) | - IsDebugEvaluateScopeField::encode(false); + IsDebugEvaluateScopeField::encode(false) | + ForceContextAllocationField::encode(false) | + PrivateNameLookupSkipsOuterClassField::encode(false); scope_info->SetFlags(flags); scope_info->SetParameterCount(parameter_count); scope_info->SetContextLocalCount(context_local_count); @@ -449,7 +479,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate, VariableModeField::encode(VariableMode::kConst) | InitFlagField::encode(kCreatedInitialized) | MaybeAssignedFlagField::encode(kNotAssigned) | - ParameterNumberField::encode(ParameterNumberField::kMax); + ParameterNumberField::encode(ParameterNumberField::kMax) | + IsStaticFlagField::encode(IsStaticFlag::kNotStatic); scope_info->set(index++, Smi::FromInt(value)); } @@ -549,6 +580,10 @@ bool ScopeInfo::HasClassBrand() const { return HasClassBrandField::decode(Flags()); } +bool ScopeInfo::HasSavedClassVariableIndex() const { + return HasSavedClassVariableIndexField::decode(Flags()); +} + bool ScopeInfo::HasNewTarget() const { return HasNewTargetField::decode(Flags()); } @@ -608,6 +643,11 @@ void ScopeInfo::SetIsDebugEvaluateScope() { } } +bool ScopeInfo::PrivateNameLookupSkipsOuterClass() const { + if (length() == 0) return false; + return PrivateNameLookupSkipsOuterClassField::decode(Flags()); +} + bool ScopeInfo::HasContext() const { return ContextLength() > 0; } Object ScopeInfo::FunctionName() const { @@ -674,6 +714,14 @@ VariableMode ScopeInfo::ContextLocalMode(int var) const { return VariableModeField::decode(value); } +IsStaticFlag ScopeInfo::ContextLocalIsStaticFlag(int var) const { + DCHECK_LE(0, var); + DCHECK_LT(var, ContextLocalCount()); + int info_index = ContextLocalInfosIndex() + var; + int value = Smi::ToInt(get(info_index)); + return IsStaticFlagField::decode(value); +} + InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) const { DCHECK_LE(0, var); DCHECK_LT(var, ContextLocalCount()); @@ -744,7 +792,8 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode, int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name, VariableMode* mode, InitializationFlag* init_flag, - MaybeAssignedFlag* maybe_assigned_flag) { + MaybeAssignedFlag* maybe_assigned_flag, + IsStaticFlag* is_static_flag) { DisallowHeapAllocation no_gc; DCHECK(name.IsInternalizedString()); DCHECK_NOT_NULL(mode); @@ -759,6 +808,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name, if (name != scope_info.get(i)) continue; int var = i - start; *mode = scope_info.ContextLocalMode(var); + *is_static_flag = scope_info.ContextLocalIsStaticFlag(var); *init_flag = scope_info.ContextLocalInitFlag(var); *maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var); int result = Context::MIN_CONTEXT_SLOTS + var; @@ -770,6 +820,14 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name, return -1; } +int ScopeInfo::SavedClassVariableContextLocalIndex() const { + if (length() > 0 && HasSavedClassVariableIndexField::decode(Flags())) { + int index = Smi::ToInt(get(SavedClassVariableInfoIndex())); + return index - Context::MIN_CONTEXT_SLOTS; + } + return -1; +} + int ScopeInfo::ReceiverContextSlotIndex() const { if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT) { return Smi::ToInt(get(ReceiverInfoIndex())); @@ -801,10 +859,14 @@ int ScopeInfo::ContextLocalInfosIndex() const { return ContextLocalNamesIndex() + ContextLocalCount(); } -int ScopeInfo::ReceiverInfoIndex() const { +int ScopeInfo::SavedClassVariableInfoIndex() const { return ContextLocalInfosIndex() + ContextLocalCount(); } +int ScopeInfo::ReceiverInfoIndex() const { + return SavedClassVariableInfoIndex() + (HasSavedClassVariableIndex() ? 1 : 0); +} + int ScopeInfo::FunctionNameInfoIndex() const { return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0); } @@ -879,9 +941,10 @@ std::ostream& operator<<(std::ostream& os, } Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New( - Isolate* isolate, Handle<HeapObject> export_name, - Handle<HeapObject> local_name, Handle<HeapObject> import_name, - int module_request, int cell_index, int beg_pos, int end_pos) { + Isolate* isolate, Handle<PrimitiveHeapObject> export_name, + Handle<PrimitiveHeapObject> local_name, + Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index, + int beg_pos, int end_pos) { Handle<SourceTextModuleInfoEntry> result = Handle<SourceTextModuleInfoEntry>::cast(isolate->factory()->NewStruct( SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, AllocationType::kOld)); diff --git a/chromium/v8/src/objects/scope-info.h b/chromium/v8/src/objects/scope-info.h index 123b9b17973..aa63cf2998e 100644 --- a/chromium/v8/src/objects/scope-info.h +++ b/chromium/v8/src/objects/scope-info.h @@ -72,6 +72,10 @@ class ScopeInfo : public FixedArray { // Does this scope has class brand (for private methods)? bool HasClassBrand() const; + // Does this scope contains a saved class variable context local slot index + // for checking receivers of static private methods? + bool HasSavedClassVariableIndex() const; + // Does this scope declare a "new.target" binding? bool HasNewTarget() const; @@ -121,6 +125,9 @@ class ScopeInfo : public FixedArray { // Return the mode of the given context local. VariableMode ContextLocalMode(int var) const; + // Return whether the given context local variable is static. + IsStaticFlag ContextLocalIsStaticFlag(int var) const; + // Return the initialization flag of the given context local. InitializationFlag ContextLocalInitFlag(int var) const; @@ -141,7 +148,8 @@ class ScopeInfo : public FixedArray { // mode for that variable. static int ContextSlotIndex(ScopeInfo scope_info, String name, VariableMode* mode, InitializationFlag* init_flag, - MaybeAssignedFlag* maybe_assigned_flag); + MaybeAssignedFlag* maybe_assigned_flag, + IsStaticFlag* is_static_flag); // Lookup metadata of a MODULE-allocated variable. Return 0 if there is no // module variable with the given name (the index value of a MODULE variable @@ -161,6 +169,12 @@ class ScopeInfo : public FixedArray { // context-allocated. Otherwise returns a value < 0. int ReceiverContextSlotIndex() const; + // Lookup support for serialized scope info. Returns the index of the + // saved class variable in context local slots if scope is a class scope + // and it contains static private methods that may be accessed. + // Otherwise returns a value < 0. + int SavedClassVariableContextLocalIndex() const; + FunctionKind function_kind() const; // Returns true if this ScopeInfo is linked to a outer ScopeInfo. @@ -176,6 +190,10 @@ class ScopeInfo : public FixedArray { // Return the outer ScopeInfo if present. ScopeInfo OuterScopeInfo() const; + // Returns true if this ScopeInfo was created for a scope that skips the + // closest outer class when resolving private names. + bool PrivateNameLookupSkipsOuterClass() const; + #ifdef DEBUG bool Equals(ScopeInfo other) const; #endif @@ -228,7 +246,8 @@ class ScopeInfo : public FixedArray { using ReceiverVariableField = DeclarationScopeField::Next<VariableAllocationInfo, 2>; using HasClassBrandField = ReceiverVariableField::Next<bool, 1>; - using HasNewTargetField = HasClassBrandField::Next<bool, 1>; + using HasSavedClassVariableIndexField = HasClassBrandField::Next<bool, 1>; + using HasNewTargetField = HasSavedClassVariableIndexField::Next<bool, 1>; using FunctionVariableField = HasNewTargetField::Next<VariableAllocationInfo, 2>; // TODO(cbruni): Combine with function variable field when only storing the @@ -240,6 +259,8 @@ class ScopeInfo : public FixedArray { using HasOuterScopeInfoField = FunctionKindField::Next<bool, 1>; using IsDebugEvaluateScopeField = HasOuterScopeInfoField::Next<bool, 1>; using ForceContextAllocationField = IsDebugEvaluateScopeField::Next<bool, 1>; + using PrivateNameLookupSkipsOuterClassField = + ForceContextAllocationField::Next<bool, 1>; STATIC_ASSERT(kLastFunctionKind <= FunctionKindField::kMax); @@ -256,27 +277,32 @@ class ScopeInfo : public FixedArray { // the context locals in ContextLocalNames. One slot is used per // context local, so in total this part occupies ContextLocalCount() // slots in the array. - // 3. ReceiverInfo: + // 3. SavedClassVariableInfo: + // If the scope is a class scope and it has static private methods that + // may be accessed directly or through eval, one slot is reserved to hold + // the context slot index for the class variable. + // 4. ReceiverInfo: // If the scope binds a "this" value, one slot is reserved to hold the // context or stack slot index for the variable. - // 4. FunctionNameInfo: + // 5. FunctionNameInfo: // If the scope belongs to a named function expression this part contains // information about the function variable. It always occupies two array // slots: a. The name of the function variable. // b. The context or stack slot index for the variable. - // 5. InferredFunctionName: + // 6. InferredFunctionName: // Contains the function's inferred name. - // 6. SourcePosition: + // 7. SourcePosition: // Contains two slots with a) the startPosition and b) the endPosition if // the scope belongs to a function or script. - // 7. OuterScopeInfoIndex: + // 8. OuterScopeInfoIndex: // The outer scope's ScopeInfo or the hole if there's none. - // 8. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables: + // 9. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables: // For a module scope, this part contains the SourceTextModuleInfo, the // number of MODULE-allocated variables, and the metadata of those // variables. For non-module scopes it is empty. int ContextLocalNamesIndex() const; int ContextLocalInfosIndex() const; + int SavedClassVariableInfoIndex() const; int ReceiverInfoIndex() const; int FunctionNameInfoIndex() const; int InferredFunctionNameIndex() const; @@ -310,6 +336,7 @@ class ScopeInfo : public FixedArray { using InitFlagField = VariableModeField::Next<InitializationFlag, 1>; using MaybeAssignedFlagField = InitFlagField::Next<MaybeAssignedFlag, 1>; using ParameterNumberField = MaybeAssignedFlagField::Next<uint32_t, 16>; + using IsStaticFlagField = ParameterNumberField::Next<IsStaticFlag, 1>; friend class ScopeIterator; friend std::ostream& operator<<(std::ostream& os, diff --git a/chromium/v8/src/objects/script-inl.h b/chromium/v8/src/objects/script-inl.h index 07450c73ec5..c306c2c092b 100644 --- a/chromium/v8/src/objects/script-inl.h +++ b/chromium/v8/src/objects/script-inl.h @@ -36,15 +36,17 @@ ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object, this->type() != TYPE_WASM) SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset, this->type() != TYPE_WASM) -ACCESSORS(Script, shared_function_infos, WeakFixedArray, - kSharedFunctionInfosOffset) SMI_ACCESSORS(Script, flags, kFlagsOffset) ACCESSORS(Script, source_url, Object, kSourceUrlOffset) ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset) ACCESSORS(Script, host_defined_options, FixedArray, kHostDefinedOptionsOffset) -ACCESSORS_CHECKED(Script, wasm_module_object, Object, +ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray, kEvalFromSharedOrWrappedArgumentsOffset, this->type() == TYPE_WASM) +ACCESSORS_CHECKED(Script, wasm_managed_native_module, Object, + kEvalFromPositionOffset, this->type() == TYPE_WASM) +ACCESSORS_CHECKED(Script, wasm_weak_instance_list, WeakArrayList, + kSharedFunctionInfosOffset, this->type() == TYPE_WASM) bool Script::is_wrapped() const { return eval_from_shared_or_wrapped_arguments().IsFixedArray(); @@ -75,6 +77,28 @@ FixedArray Script::wrapped_arguments() const { return FixedArray::cast(eval_from_shared_or_wrapped_arguments()); } +DEF_GETTER(Script, shared_function_infos, WeakFixedArray) { + return type() == TYPE_WASM + ? ReadOnlyRoots(GetHeap()).empty_weak_fixed_array() + : TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::load( + *this); +} + +void Script::set_shared_function_infos(WeakFixedArray value, + WriteBarrierMode mode) { + DCHECK_NE(TYPE_WASM, type()); + TaggedField<WeakFixedArray, kSharedFunctionInfosOffset>::store(*this, value); + CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfosOffset, value, mode); +} + +bool Script::has_wasm_breakpoint_infos() const { + return type() == TYPE_WASM && wasm_breakpoint_infos().length() > 0; +} + +wasm::NativeModule* Script::wasm_native_module() const { + return Managed<wasm::NativeModule>::cast(wasm_managed_native_module()).raw(); +} + Script::CompilationType Script::compilation_type() { return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL : COMPILATION_TYPE_HOST; diff --git a/chromium/v8/src/objects/script.h b/chromium/v8/src/objects/script.h index 2d9e4bca781..935241a1492 100644 --- a/chromium/v8/src/objects/script.h +++ b/chromium/v8/src/objects/script.h @@ -5,6 +5,8 @@ #ifndef V8_OBJECTS_SCRIPT_H_ #define V8_OBJECTS_SCRIPT_H_ +#include <memory> + #include "src/objects/fixed-array.h" #include "src/objects/objects.h" #include "src/objects/struct.h" @@ -101,9 +103,21 @@ class Script : public Struct { // [source_mapping_url]: sourceMappingURL magic comment DECL_ACCESSORS(source_mapping_url, Object) - // [wasm_module_object]: the wasm module object this script belongs to. + // [wasm_breakpoint_infos]: the list of {BreakPointInfo} objects describing + // all WebAssembly breakpoints for modules/instances managed via this script. + // This must only be called if the type of this script is TYPE_WASM. + DECL_ACCESSORS(wasm_breakpoint_infos, FixedArray) + inline bool has_wasm_breakpoint_infos() const; + + // [wasm_native_module]: the wasm {NativeModule} this script belongs to. + // This must only be called if the type of this script is TYPE_WASM. + DECL_ACCESSORS(wasm_managed_native_module, Object) + inline wasm::NativeModule* wasm_native_module() const; + + // [wasm_weak_instance_list]: the list of all {WasmInstanceObject} being + // affected by breakpoints that are managed via this script. // This must only be called if the type of this script is TYPE_WASM. - DECL_ACCESSORS(wasm_module_object, Object) + DECL_ACCESSORS(wasm_weak_instance_list, WeakArrayList) // [host_defined_options]: Options defined by the embedder. DECL_ACCESSORS(host_defined_options, FixedArray) diff --git a/chromium/v8/src/objects/shared-function-info-inl.h b/chromium/v8/src/objects/shared-function-info-inl.h index 6023c3b8286..4f12bc4324a 100644 --- a/chromium/v8/src/objects/shared-function-info-inl.h +++ b/chromium/v8/src/objects/shared-function-info-inl.h @@ -21,11 +21,7 @@ namespace v8 { namespace internal { -OBJECT_CONSTRUCTORS_IMPL(PreparseData, HeapObject) - -CAST_ACCESSOR(PreparseData) -INT_ACCESSORS(PreparseData, data_length, kDataLengthOffset) -INT_ACCESSORS(PreparseData, children_length, kInnerLengthOffset) +TQ_OBJECT_CONSTRUCTORS_IMPL(PreparseData) int PreparseData::inner_start_offset() const { return InnerOffset(data_length()); @@ -84,26 +80,9 @@ void PreparseData::set_child(int index, PreparseData value, CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); } -OBJECT_CONSTRUCTORS_IMPL(UncompiledData, HeapObject) -OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData, UncompiledData) -OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData, UncompiledData) -CAST_ACCESSOR(UncompiledData) -ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset) -INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset) -INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset) - -void UncompiledData::clear_padding() { - if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return; - DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset)); - memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0, - FIELD_SIZE(kOptionalPaddingOffset)); -} - -CAST_ACCESSOR(UncompiledDataWithoutPreparseData) - -CAST_ACCESSOR(UncompiledDataWithPreparseData) -ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData, - kPreparseDataOffset) +TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData) +TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData) +TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData) DEF_GETTER(HeapObject, IsUncompiledData, bool) { return IsUncompiledDataWithoutPreparseData(isolate) || @@ -124,7 +103,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object) ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object, kNameOrScopeInfoOffset) -ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object, +ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject, kScriptOrDebugInfoOffset) INT32_ACCESSORS(SharedFunctionInfo, function_literal_id, @@ -229,6 +208,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_safe_to_skip_arguments_adaptor, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit) +BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, + private_name_lookup_skips_outer_class, + SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit) bool SharedFunctionInfo::optimization_disabled() const { return disable_optimization_reason() != BailoutReason::kNoReason; @@ -613,12 +595,11 @@ void SharedFunctionInfo::ClearPreparseData() { Heap* heap = GetHeapFromWritableObject(data); // Swap the map. - heap->NotifyObjectLayoutChange(data, UncompiledDataWithPreparseData::kSize, - no_gc); + heap->NotifyObjectLayoutChange(data, no_gc); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize < UncompiledDataWithPreparseData::kSize); STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize == - UncompiledData::kSize); + UncompiledData::kHeaderSize); data.synchronized_set_map( GetReadOnlyRoots().uncompiled_data_without_preparse_data_map()); @@ -644,7 +625,6 @@ void UncompiledData::Initialize( data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name); data.set_start_position(start_position); data.set_end_position(end_position); - data.clear_padding(); } void UncompiledDataWithPreparseData::Initialize( @@ -672,16 +652,16 @@ bool SharedFunctionInfo::HasWasmCapiFunctionData() const { return function_data().IsWasmCapiFunctionData(); } -Object SharedFunctionInfo::script() const { - Object maybe_script = script_or_debug_info(); +HeapObject SharedFunctionInfo::script() const { + HeapObject maybe_script = script_or_debug_info(); if (maybe_script.IsDebugInfo()) { return DebugInfo::cast(maybe_script).script(); } return maybe_script; } -void SharedFunctionInfo::set_script(Object script) { - Object maybe_debug_info = script_or_debug_info(); +void SharedFunctionInfo::set_script(HeapObject script) { + HeapObject maybe_debug_info = script_or_debug_info(); if (maybe_debug_info.IsDebugInfo()) { DebugInfo::cast(maybe_debug_info).set_script(script); } else { diff --git a/chromium/v8/src/objects/shared-function-info.h b/chromium/v8/src/objects/shared-function-info.h index dc84653ede2..9c57d366978 100644 --- a/chromium/v8/src/objects/shared-function-info.h +++ b/chromium/v8/src/objects/shared-function-info.h @@ -5,6 +5,8 @@ #ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_H_ #define V8_OBJECTS_SHARED_FUNCTION_INFO_H_ +#include <memory> + #include "src/codegen/bailout-reason.h" #include "src/objects/compressed-slots.h" #include "src/objects/function-kind.h" @@ -55,11 +57,9 @@ class WasmJSFunctionData; // +-------------------------------+ // | Inner PreparseData N | // +-------------------------------+ -class PreparseData : public HeapObject { +class PreparseData + : public TorqueGeneratedPreparseData<PreparseData, HeapObject> { public: - DECL_INT_ACCESSORS(data_length) - DECL_INT_ACCESSORS(children_length) - inline int inner_start_offset() const; inline ObjectSlot inner_data_start() const; @@ -74,12 +74,9 @@ class PreparseData : public HeapObject { // Clear uninitialized padding space. inline void clear_padding(); - DECL_CAST(PreparseData) DECL_PRINTER(PreparseData) DECL_VERIFIER(PreparseData) - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_PREPARSE_DATA_FIELDS) static const int kDataStartOffset = kSize; class BodyDescriptor; @@ -92,7 +89,7 @@ class PreparseData : public HeapObject { return InnerOffset(data_length) + children_length * kTaggedSize; } - OBJECT_CONSTRUCTORS(PreparseData, HeapObject); + TQ_OBJECT_CONSTRUCTORS(PreparseData) private: inline Object get_child_raw(int index) const; @@ -100,14 +97,9 @@ class PreparseData : public HeapObject { // Abstract class representing extra data for an uncompiled function, which is // not stored in the SharedFunctionInfo. -class UncompiledData : public HeapObject { +class UncompiledData + : public TorqueGeneratedUncompiledData<UncompiledData, HeapObject> { public: - DECL_ACCESSORS(inferred_name, String) - DECL_INT32_ACCESSORS(start_position) - DECL_INT32_ACCESSORS(end_position) - - DECL_CAST(UncompiledData) - inline static void Initialize( UncompiledData data, String inferred_name, int start_position, int end_position, @@ -115,56 +107,35 @@ class UncompiledData : public HeapObject { gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot, HeapObject target) {}); - // Layout description. -#define UNCOMPILED_DATA_FIELDS(V) \ - V(kStartOfStrongFieldsOffset, 0) \ - V(kInferredNameOffset, kTaggedSize) \ - V(kEndOfStrongFieldsOffset, 0) \ - /* Raw data fields. */ \ - V(kStartPositionOffset, kInt32Size) \ - V(kEndPositionOffset, kInt32Size) \ - V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \ - /* Header size. */ \ - V(kSize, 0) - - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS) -#undef UNCOMPILED_DATA_FIELDS - - using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset, - kEndOfStrongFieldsOffset, kSize>; - - // Clear uninitialized padding space. - inline void clear_padding(); + using BodyDescriptor = + FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset, + kHeaderSize>; - OBJECT_CONSTRUCTORS(UncompiledData, HeapObject); + TQ_OBJECT_CONSTRUCTORS(UncompiledData) }; // Class representing data for an uncompiled function that does not have any // data from the pre-parser, either because it's a leaf function or because the // pre-parser bailed out. -class UncompiledDataWithoutPreparseData : public UncompiledData { +class UncompiledDataWithoutPreparseData + : public TorqueGeneratedUncompiledDataWithoutPreparseData< + UncompiledDataWithoutPreparseData, UncompiledData> { public: - DECL_CAST(UncompiledDataWithoutPreparseData) DECL_PRINTER(UncompiledDataWithoutPreparseData) - DECL_VERIFIER(UncompiledDataWithoutPreparseData) - - static const int kSize = UncompiledData::kSize; // No extra fields compared to UncompiledData. using BodyDescriptor = UncompiledData::BodyDescriptor; - OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseData, UncompiledData); + TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseData) }; // Class representing data for an uncompiled function that has pre-parsed scope // data. -class UncompiledDataWithPreparseData : public UncompiledData { +class UncompiledDataWithPreparseData + : public TorqueGeneratedUncompiledDataWithPreparseData< + UncompiledDataWithPreparseData, UncompiledData> { public: - DECL_ACCESSORS(preparse_data, PreparseData) - - DECL_CAST(UncompiledDataWithPreparseData) DECL_PRINTER(UncompiledDataWithPreparseData) - DECL_VERIFIER(UncompiledDataWithPreparseData) inline static void Initialize( UncompiledDataWithPreparseData data, String inferred_name, @@ -173,28 +144,12 @@ class UncompiledDataWithPreparseData : public UncompiledData { gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot, HeapObject target) {}); - // Layout description. - -#define UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS(V) \ - V(kStartOfStrongFieldsOffset, 0) \ - V(kPreparseDataOffset, kTaggedSize) \ - V(kEndOfStrongFieldsOffset, 0) \ - /* Total size. */ \ - V(kSize, 0) - - DEFINE_FIELD_OFFSET_CONSTANTS(UncompiledData::kSize, - UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS) -#undef UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS - - // Make sure the size is aligned - STATIC_ASSERT(IsAligned(kSize, kTaggedSize)); - using BodyDescriptor = SubclassBodyDescriptor< UncompiledData::BodyDescriptor, FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset, kSize>>; - OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData, UncompiledData); + TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData) }; class InterpreterData : public Struct { @@ -242,7 +197,7 @@ class SharedFunctionInfo : public HeapObject { // Set up the link between shared function info and the script. The shared // function info is added to the list on the script. V8_EXPORT_PRIVATE static void SetScript( - Handle<SharedFunctionInfo> shared, Handle<Object> script_object, + Handle<SharedFunctionInfo> shared, Handle<HeapObject> script_object, int function_literal_id, bool reset_preparsed_scope_data = true); // Layout description of the optimized code map. @@ -408,10 +363,10 @@ class SharedFunctionInfo : public HeapObject { // [script_or_debug_info]: One of: // - Script from which the function originates. // - a DebugInfo which holds the actual script [HasDebugInfo()]. - DECL_ACCESSORS(script_or_debug_info, Object) + DECL_ACCESSORS(script_or_debug_info, HeapObject) - inline Object script() const; - inline void set_script(Object script); + inline HeapObject script() const; + inline void set_script(HeapObject script); // The function is subject to debugging if a debug info is attached. inline bool HasDebugInfo() const; @@ -490,6 +445,10 @@ class SharedFunctionInfo : public HeapObject { // Indicates that the function has been reported for binary code coverage. DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage) + // Indicates that the private name lookups inside the function skips the + // closest outer class scope. + DECL_BOOLEAN_ACCESSORS(private_name_lookup_skips_outer_class) + inline FunctionKind kind() const; // Defines the index in a native context of closure's map instantiated using @@ -640,21 +599,6 @@ class SharedFunctionInfo : public HeapObject { DISALLOW_COPY_AND_ASSIGN(ScriptIterator); }; - // Iterate over all shared function infos on the heap. - class GlobalIterator { - public: - V8_EXPORT_PRIVATE explicit GlobalIterator(Isolate* isolate); - V8_EXPORT_PRIVATE SharedFunctionInfo Next(); - - private: - Isolate* isolate_; - Script::Iterator script_iterator_; - WeakArrayList::Iterator noscript_sfi_iterator_; - SharedFunctionInfo::ScriptIterator sfi_iterator_; - DISALLOW_HEAP_ALLOCATION(no_gc_) - DISALLOW_COPY_AND_ASSIGN(GlobalIterator); - }; - DECL_CAST(SharedFunctionInfo) // Constants. @@ -691,7 +635,8 @@ class SharedFunctionInfo : public HeapObject { V(HasReportedBinaryCoverageBit, bool, 1, _) \ V(IsTopLevelBit, bool, 1, _) \ V(IsOneshotIIFEOrPropertiesAreFinalBit, bool, 1, _) \ - V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _) + V(IsSafeToSkipArgumentsAdaptorBit, bool, 1, _) \ + V(PrivateNameLookupSkipsOuterClassBit, bool, 1, _) DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS) #undef FLAGS_BIT_FIELDS diff --git a/chromium/v8/src/objects/slots-inl.h b/chromium/v8/src/objects/slots-inl.h index b240729114e..7e692b79483 100644 --- a/chromium/v8/src/objects/slots-inl.h +++ b/chromium/v8/src/objects/slots-inl.h @@ -119,7 +119,7 @@ inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) { #ifdef V8_COMPRESS_POINTERS Tagged_t raw_value = CompressTagged(value.ptr()); STATIC_ASSERT(kTaggedSize == kInt32Size); - MemsetInt32(start.location(), raw_value, counter); + MemsetInt32(reinterpret_cast<int32_t*>(start.location()), raw_value, counter); #else Address raw_value = value.ptr(); MemsetPointer(start.location(), raw_value, counter); diff --git a/chromium/v8/src/objects/source-text-module.cc b/chromium/v8/src/objects/source-text-module.cc index f17c59de1ad..2959e1b8542 100644 --- a/chromium/v8/src/objects/source-text-module.cc +++ b/chromium/v8/src/objects/source-text-module.cc @@ -78,8 +78,6 @@ class Module::ResolveSet SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const { DisallowHeapAllocation no_alloc; - DCHECK_NE(status(), Module::kEvaluating); - DCHECK_NE(status(), Module::kEvaluated); switch (status()) { case kUninstantiated: case kPreInstantiating: @@ -89,10 +87,10 @@ SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const { DCHECK(code().IsJSFunction()); return JSFunction::cast(code()).shared(); case kInstantiated: - DCHECK(code().IsJSGeneratorObject()); - return JSGeneratorObject::cast(code()).function().shared(); case kEvaluating: case kEvaluated: + DCHECK(code().IsJSGeneratorObject()); + return JSGeneratorObject::cast(code()).function().shared(); case kErrored: UNREACHABLE(); } @@ -580,58 +578,518 @@ Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace( return Module::GetModuleNamespace(isolate, requested_module); } +MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync( + Isolate* isolate, Handle<SourceTextModule> module) { + // In the event of errored evaluation, return a rejected promise. + if (module->status() == kErrored) { + // If we have a top level capability we assume it has already been + // rejected, and return it here. Otherwise create a new promise and + // reject it with the module's exception. + if (module->top_level_capability().IsJSPromise()) { + Handle<JSPromise> top_level_capability( + JSPromise::cast(module->top_level_capability()), isolate); + DCHECK(top_level_capability->status() == Promise::kRejected && + top_level_capability->result() == module->exception()); + return top_level_capability; + } + Handle<JSPromise> capability = isolate->factory()->NewJSPromise(); + JSPromise::Reject(capability, handle(module->exception(), isolate)); + return capability; + } + + // Start of Evaluate () Concrete Method + // 2. Assert: module.[[Status]] is "linked" or "evaluated". + CHECK(module->status() == kInstantiated || module->status() == kEvaluated); + + // 3. If module.[[Status]] is "evaluated", set module to + // GetAsyncCycleRoot(module). + if (module->status() == kEvaluated) { + module = GetAsyncCycleRoot(isolate, module); + } + + // 4. If module.[[TopLevelCapability]] is not undefined, then + // a. Return module.[[TopLevelCapability]].[[Promise]]. + if (module->top_level_capability().IsJSPromise()) { + return handle(JSPromise::cast(module->top_level_capability()), isolate); + } + DCHECK(module->top_level_capability().IsUndefined()); + + // 6. Let capability be ! NewPromiseCapability(%Promise%). + Handle<JSPromise> capability = isolate->factory()->NewJSPromise(); + + // 7. Set module.[[TopLevelCapability]] to capability. + module->set_top_level_capability(*capability); + DCHECK(module->top_level_capability().IsJSPromise()); + + // 9. If result is an abrupt completion, then + Handle<Object> unused_result; + if (!Evaluate(isolate, module).ToHandle(&unused_result)) { + // d. Perform ! Call(capability.[[Reject]], undefined, + // «result.[[Value]]»). + isolate->clear_pending_exception(); + JSPromise::Reject(capability, handle(module->exception(), isolate)); + } else { + // 10. Otherwise, + // a. Assert: module.[[Status]] is "evaluated"... + CHECK_EQ(module->status(), kEvaluated); + + // b. If module.[[AsyncEvaluating]] is false, then + if (!module->async_evaluating()) { + // i. Perform ! Call(capability.[[Resolve]], undefined, + // «undefined»). + JSPromise::Resolve(capability, isolate->factory()->undefined_value()) + .ToHandleChecked(); + } + } + + // 11. Return capability.[[Promise]]. + return capability; +} + MaybeHandle<Object> SourceTextModule::Evaluate( + Isolate* isolate, Handle<SourceTextModule> module) { + // Evaluate () Concrete Method continued from EvaluateMaybeAsync. + CHECK(module->status() == kInstantiated || module->status() == kEvaluated); + + // 5. Let stack be a new empty List. + Zone zone(isolate->allocator(), ZONE_NAME); + ZoneForwardList<Handle<SourceTextModule>> stack(&zone); + unsigned dfs_index = 0; + + // 8. Let result be InnerModuleEvaluation(module, stack, 0). + // 9. If result is an abrupt completion, then + Handle<Object> result; + if (!InnerModuleEvaluation(isolate, module, &stack, &dfs_index) + .ToHandle(&result)) { + // a. For each Cyclic Module Record m in stack, do + for (auto& descendant : stack) { + // i. Assert: m.[[Status]] is "evaluating". + CHECK_EQ(descendant->status(), kEvaluating); + // ii. Set m.[[Status]] to "evaluated". + // iii. Set m.[[EvaluationError]] to result. + descendant->RecordErrorUsingPendingException(isolate); + } + DCHECK_EQ(module->exception(), isolate->pending_exception()); + } else { + // 10. Otherwise, + // c. Assert: stack is empty. + DCHECK(stack.empty()); + } + return result; +} + +void SourceTextModule::AsyncModuleExecutionFulfilled( + Isolate* isolate, Handle<SourceTextModule> module) { + // 1. Assert: module.[[Status]] is "evaluated". + CHECK(module->status() == kEvaluated || module->status() == kErrored); + + // 2. If module.[[AsyncEvaluating]] is false, + if (!module->async_evaluating()) { + // a. Assert: module.[[EvaluationError]] is not undefined. + CHECK_EQ(module->status(), kErrored); + + // b. Return undefined. + return; + } + + // 3. Assert: module.[[EvaluationError]] is undefined. + CHECK_EQ(module->status(), kEvaluated); + + // 4. Set module.[[AsyncEvaluating]] to false. + module->set_async_evaluating(false); + + // 5. For each Module m of module.[[AsyncParentModules]], do + for (int i = 0; i < module->AsyncParentModuleCount(); i++) { + Handle<SourceTextModule> m = module->GetAsyncParentModule(isolate, i); + + // a. If module.[[DFSIndex]] is not equal to module.[[DFSAncestorIndex]], + // then + if (module->dfs_index() != module->dfs_ancestor_index()) { + // i. Assert: m.[[DFSAncestorIndex]] is equal to + // module.[[DFSAncestorIndex]]. + DCHECK_LE(m->dfs_ancestor_index(), module->dfs_ancestor_index()); + } + // b. Decrement m.[[PendingAsyncDependencies]] by 1. + m->DecrementPendingAsyncDependencies(); + + // c. If m.[[PendingAsyncDependencies]] is 0 and m.[[EvaluationError]] is + // undefined, then + if (!m->HasPendingAsyncDependencies() && m->status() == kEvaluated) { + // i. Assert: m.[[AsyncEvaluating]] is true. + DCHECK(m->async_evaluating()); + + // ii. Let cycleRoot be ! GetAsyncCycleRoot(m). + auto cycle_root = GetAsyncCycleRoot(isolate, m); + + // iii. If cycleRoot.[[EvaluationError]] is not undefined, + // return undefined. + if (cycle_root->status() == kErrored) { + return; + } + + // iv. If m.[[Async]] is true, then + if (m->async()) { + // 1. Perform ! ExecuteAsyncModule(m). + ExecuteAsyncModule(isolate, m); + } else { + // v. Otherwise, + // 1. Let result be m.ExecuteModule(). + // 2. If result is a normal completion, + Handle<Object> unused_result; + if (ExecuteModule(isolate, m).ToHandle(&unused_result)) { + // a. Perform ! AsyncModuleExecutionFulfilled(m). + AsyncModuleExecutionFulfilled(isolate, m); + } else { + // 3. Otherwise, + // a. Perform ! AsyncModuleExecutionRejected(m, + // result.[[Value]]). + Handle<Object> exception(isolate->pending_exception(), isolate); + isolate->clear_pending_exception(); + AsyncModuleExecutionRejected(isolate, m, exception); + } + } + } + } + + // 6. If module.[[TopLevelCapability]] is not undefined, then + if (!module->top_level_capability().IsUndefined(isolate)) { + // a. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]]. + DCHECK_EQ(module->dfs_index(), module->dfs_ancestor_index()); + + // b. Perform ! Call(module.[[TopLevelCapability]].[[Resolve]], + // undefined, «undefined»). + Handle<JSPromise> capability( + JSPromise::cast(module->top_level_capability()), isolate); + JSPromise::Resolve(capability, isolate->factory()->undefined_value()) + .ToHandleChecked(); + } + + // 7. Return undefined. +} + +void SourceTextModule::AsyncModuleExecutionRejected( Isolate* isolate, Handle<SourceTextModule> module, - ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) { + Handle<Object> exception) { + // 1. Assert: module.[[Status]] is "evaluated". + CHECK(module->status() == kEvaluated || module->status() == kErrored); + + // 2. If module.[[AsyncEvaluating]] is false, + if (!module->async_evaluating()) { + // a. Assert: module.[[EvaluationError]] is not undefined. + CHECK_EQ(module->status(), kErrored); + + // b. Return undefined. + return; + } + + // 4. Set module.[[EvaluationError]] to ThrowCompletion(error). + module->RecordError(isolate, exception); + + // 5. Set module.[[AsyncEvaluating]] to false. + module->set_async_evaluating(false); + + // 6. For each Module m of module.[[AsyncParentModules]], do + for (int i = 0; i < module->AsyncParentModuleCount(); i++) { + Handle<SourceTextModule> m = module->GetAsyncParentModule(isolate, i); + + // a. If module.[[DFSIndex]] is not equal to module.[[DFSAncestorIndex]], + // then + if (module->dfs_index() != module->dfs_ancestor_index()) { + // i. Assert: m.[[DFSAncestorIndex]] is equal to + // module.[[DFSAncestorIndex]]. + DCHECK_EQ(m->dfs_ancestor_index(), module->dfs_ancestor_index()); + } + // b. Perform ! AsyncModuleExecutionRejected(m, error). + AsyncModuleExecutionRejected(isolate, m, exception); + } + + // 7. If module.[[TopLevelCapability]] is not undefined, then + if (!module->top_level_capability().IsUndefined(isolate)) { + // a. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]]. + DCHECK(module->dfs_index() == module->dfs_ancestor_index()); + + // b. Perform ! Call(module.[[TopLevelCapability]].[[Reject]], + // undefined, «error»). + Handle<JSPromise> capability( + JSPromise::cast(module->top_level_capability()), isolate); + JSPromise::Reject(capability, exception); + } + + // 8. Return undefined. +} + +void SourceTextModule::ExecuteAsyncModule(Isolate* isolate, + Handle<SourceTextModule> module) { + // 1. Assert: module.[[Status]] is "evaluating" or "evaluated". + CHECK(module->status() == kEvaluating || module->status() == kEvaluated); + + // 2. Assert: module.[[Async]] is true. + DCHECK(module->async()); + + // 3. Set module.[[AsyncEvaluating]] to true. + module->set_async_evaluating(true); + + // 4. Let capability be ! NewPromiseCapability(%Promise%). + Handle<JSPromise> capability = isolate->factory()->NewJSPromise(); + + // 5. Let stepsFulfilled be the steps of a CallAsyncModuleFulfilled + Handle<JSFunction> steps_fulfilled( + isolate->native_context()->call_async_module_fulfilled(), isolate); + + ScopedVector<Handle<Object>> empty_argv(0); + + // 6. Let onFulfilled be CreateBuiltinFunction(stepsFulfilled, + // «[[Module]]»). + // 7. Set onFulfilled.[[Module]] to module. + Handle<JSBoundFunction> on_fulfilled = + isolate->factory() + ->NewJSBoundFunction(steps_fulfilled, module, empty_argv) + .ToHandleChecked(); + + // 8. Let stepsRejected be the steps of a CallAsyncModuleRejected. + Handle<JSFunction> steps_rejected( + isolate->native_context()->call_async_module_rejected(), isolate); + + // 9. Let onRejected be CreateBuiltinFunction(stepsRejected, «[[Module]]»). + // 10. Set onRejected.[[Module]] to module. + Handle<JSBoundFunction> on_rejected = + isolate->factory() + ->NewJSBoundFunction(steps_rejected, module, empty_argv) + .ToHandleChecked(); + + // 11. Perform ! PerformPromiseThen(capability.[[Promise]], + // onFulfilled, onRejected). + Handle<Object> argv[] = {on_fulfilled, on_rejected}; + Execution::CallBuiltin(isolate, isolate->promise_then(), capability, + arraysize(argv), argv) + .ToHandleChecked(); + + // 12. Perform ! module.ExecuteModule(capability). + // Note: In V8 we have broken module.ExecuteModule into + // ExecuteModule for synchronous module execution and + // InnerExecuteAsyncModule for asynchronous execution. + InnerExecuteAsyncModule(isolate, module, capability).ToHandleChecked(); + + // 13. Return. +} + +MaybeHandle<Object> SourceTextModule::InnerExecuteAsyncModule( + Isolate* isolate, Handle<SourceTextModule> module, + Handle<JSPromise> capability) { + // If we have an async module, then it has an associated + // JSAsyncFunctionObject, which we then evaluate with the passed in promise + // capability. + Handle<JSAsyncFunctionObject> async_function_object( + JSAsyncFunctionObject::cast(module->code()), isolate); + async_function_object->set_promise(*capability); + Handle<JSFunction> resume( + isolate->native_context()->async_module_evaluate_internal(), isolate); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, resume, async_function_object, 0, nullptr), + Object); + return result; +} + +MaybeHandle<Object> SourceTextModule::ExecuteModule( + Isolate* isolate, Handle<SourceTextModule> module) { + // Synchronous modules have an associated JSGeneratorObject. Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()), isolate); - module->set_code( - generator->function().shared().scope_info().ModuleDescriptorInfo()); + Handle<JSFunction> resume( + isolate->native_context()->generator_next_internal(), isolate); + Handle<Object> result; + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr), + Object); + DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate)); + return handle(JSIteratorResult::cast(*result).value(), isolate); +} + +MaybeHandle<Object> SourceTextModule::InnerModuleEvaluation( + Isolate* isolate, Handle<SourceTextModule> module, + ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) { + STACK_CHECK(isolate, MaybeHandle<Object>()); + + // InnerModuleEvaluation(module, stack, index) + // 2. If module.[[Status]] is "evaluated", then + // a. If module.[[EvaluationError]] is undefined, return index. + // (We return undefined instead) + if (module->status() == kEvaluated || module->status() == kEvaluating) { + return isolate->factory()->undefined_value(); + } + + // b. Otherwise return module.[[EvaluationError]]. + // (We throw on isolate and return a MaybeHandle<Object> + // instead) + if (module->status() == kErrored) { + isolate->Throw(module->exception()); + return MaybeHandle<Object>(); + } + + // 4. Assert: module.[[Status]] is "linked". + CHECK_EQ(module->status(), kInstantiated); + + // 5. Set module.[[Status]] to "evaluating". module->SetStatus(kEvaluating); + + // 6. Set module.[[DFSIndex]] to index. module->set_dfs_index(*dfs_index); + + // 7. Set module.[[DFSAncestorIndex]] to index. module->set_dfs_ancestor_index(*dfs_index); - stack->push_front(module); + + // 8. Set module.[[PendingAsyncDependencies]] to 0. + DCHECK(!module->HasPendingAsyncDependencies()); + + // 9. Set module.[[AsyncParentModules]] to a new empty List. + Handle<ArrayList> async_parent_modules = ArrayList::New(isolate, 0); + module->set_async_parent_modules(*async_parent_modules); + + // 10. Set index to index + 1. (*dfs_index)++; + // 11. Append module to stack. + stack->push_front(module); + // Recursion. Handle<FixedArray> requested_modules(module->requested_modules(), isolate); + + // 12. For each String required that is an element of + // module.[[RequestedModules]], do for (int i = 0, length = requested_modules->length(); i < length; ++i) { Handle<Module> requested_module(Module::cast(requested_modules->get(i)), isolate); - RETURN_ON_EXCEPTION( - isolate, Module::Evaluate(isolate, requested_module, stack, dfs_index), - Object); - - DCHECK_GE(requested_module->status(), kEvaluating); - DCHECK_NE(requested_module->status(), kErrored); - SLOW_DCHECK( - // {requested_module} is evaluating iff it's on the {stack}. - (requested_module->status() == kEvaluating) == - std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) { - return *m == *requested_module; - })); - - if (requested_module->status() == kEvaluating) { - // SyntheticModules go straight to kEvaluated so this must be a - // SourceTextModule - module->set_dfs_ancestor_index( - std::min(module->dfs_ancestor_index(), - Handle<SourceTextModule>::cast(requested_module) - ->dfs_ancestor_index())); + // d. If requiredModule is a Cyclic Module Record, then + if (requested_module->IsSourceTextModule()) { + Handle<SourceTextModule> required_module( + SourceTextModule::cast(*requested_module), isolate); + RETURN_ON_EXCEPTION( + isolate, + InnerModuleEvaluation(isolate, required_module, stack, dfs_index), + Object); + + // i. Assert: requiredModule.[[Status]] is either "evaluating" or + // "evaluated". + // (We also assert the module cannot be errored, because if it was + // we would have already returned from InnerModuleEvaluation) + CHECK_GE(required_module->status(), kEvaluating); + CHECK_NE(required_module->status(), kErrored); + + // ii. Assert: requiredModule.[[Status]] is "evaluating" if and + // only if requiredModule is in stack. + SLOW_DCHECK( + (requested_module->status() == kEvaluating) == + std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) { + return *m == *requested_module; + })); + + // iii. If requiredModule.[[Status]] is "evaluating", then + if (required_module->status() == kEvaluating) { + // 1. Set module.[[DFSAncestorIndex]] to + // min( + // module.[[DFSAncestorIndex]], + // requiredModule.[[DFSAncestorIndex]]). + module->set_dfs_ancestor_index( + std::min(module->dfs_ancestor_index(), + required_module->dfs_ancestor_index())); + } else { + // iv. Otherwise, + // 1. Set requiredModule to GetAsyncCycleRoot(requiredModule). + required_module = GetAsyncCycleRoot(isolate, required_module); + + // 2. Assert: requiredModule.[[Status]] is "evaluated". + CHECK_GE(required_module->status(), kEvaluated); + + // 3. If requiredModule.[[EvaluationError]] is not undefined, + // return module.[[EvaluationError]]. + // (If there was an exception on the original required module + // we would have already returned. This check handles the case + // where the AsyncCycleRoot has an error. Instead of returning + // the exception, we throw on isolate and return a + // MaybeHandle<Object>) + if (required_module->status() == kErrored) { + isolate->Throw(required_module->exception()); + return MaybeHandle<Object>(); + } + } + // v. If requiredModule.[[AsyncEvaluating]] is true, then + if (required_module->async_evaluating()) { + // 1. Set module.[[PendingAsyncDependencies]] to + // module.[[PendingAsyncDependencies]] + 1. + module->IncrementPendingAsyncDependencies(); + + // 2. Append module to requiredModule.[[AsyncParentModules]]. + required_module->AddAsyncParentModule(isolate, module); + } + } else { + RETURN_ON_EXCEPTION(isolate, Module::Evaluate(isolate, requested_module), + Object); } } - // Evaluation of module body. - Handle<JSFunction> resume( - isolate->native_context()->generator_next_internal(), isolate); - Handle<Object> result; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr), - Object); - DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate)); + // The spec returns the module index for proper numbering of dependencies. + // However, we pass the module index by pointer instead. + // + // Before async modules v8 returned the value result from calling next + // on the module's implicit iterator. We preserve this behavior for + // synchronous modules, but return undefined for AsyncModules. + Handle<Object> result = isolate->factory()->undefined_value(); + + // 14. If module.[[PendingAsyncDependencies]] is > 0, set + // module.[[AsyncEvaluating]] to true. + if (module->HasPendingAsyncDependencies()) { + module->set_async_evaluating(true); + } else if (module->async()) { + // 15. Otherwise, if module.[[Async]] is true, + // perform ! ExecuteAsyncModule(module). + SourceTextModule::ExecuteAsyncModule(isolate, module); + } else { + // 16. Otherwise, perform ? module.ExecuteModule(). + ASSIGN_RETURN_ON_EXCEPTION(isolate, result, ExecuteModule(isolate, module), + Object); + } CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated)); - return handle(JSIteratorResult::cast(*result).value(), isolate); + return result; +} + +Handle<SourceTextModule> SourceTextModule::GetAsyncCycleRoot( + Isolate* isolate, Handle<SourceTextModule> module) { + // 1. Assert: module.[[Status]] is "evaluated". + CHECK_GE(module->status(), kEvaluated); + + // 2. If module.[[AsyncParentModules]] is an empty List, return module. + if (module->AsyncParentModuleCount() == 0) { + return module; + } + + // 3. Repeat, while module.[[DFSIndex]] is greater than + // module.[[DFSAncestorIndex]], + while (module->dfs_index() > module->dfs_ancestor_index()) { + // a. Assert: module.[[AsyncParentModules]] is a non-empty List. + DCHECK_GT(module->AsyncParentModuleCount(), 0); + + // b. Let nextCycleModule be the first element of + // module.[[AsyncParentModules]]. + Handle<SourceTextModule> next_cycle_module = + module->GetAsyncParentModule(isolate, 0); + + // c. Assert: nextCycleModule.[[DFSAncestorIndex]] is less than or equal + // to module.[[DFSAncestorIndex]]. + DCHECK_LE(next_cycle_module->dfs_ancestor_index(), + module->dfs_ancestor_index()); + + // d. Set module to nextCycleModule + module = next_cycle_module; + } + + // 4. Assert: module.[[DFSIndex]] is equal to module.[[DFSAncestorIndex]]. + DCHECK_EQ(module->dfs_index(), module->dfs_ancestor_index()); + + // 5. Return module. + return module; } void SourceTextModule::Reset(Isolate* isolate, diff --git a/chromium/v8/src/objects/source-text-module.h b/chromium/v8/src/objects/source-text-module.h index e6cf260e101..f1387635d00 100644 --- a/chromium/v8/src/objects/source-text-module.h +++ b/chromium/v8/src/objects/source-text-module.h @@ -6,6 +6,7 @@ #define V8_OBJECTS_SOURCE_TEXT_MODULE_H_ #include "src/objects/module.h" +#include "src/objects/promise.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -28,6 +29,10 @@ class SourceTextModule // kErrored. SharedFunctionInfo GetSharedFunctionInfo() const; + // Whether or not this module is an async module. Set during module creation + // and does not change afterwards. + DECL_BOOLEAN_ACCESSORS(async) + // Get the SourceTextModuleInfo associated with the code. inline SourceTextModuleInfo info() const; @@ -41,6 +46,14 @@ class SourceTextModule static int ImportIndex(int cell_index); static int ExportIndex(int cell_index); + // Used by builtins to fulfill or reject the promise associated + // with async SourceTextModules. + static void AsyncModuleExecutionFulfilled(Isolate* isolate, + Handle<SourceTextModule> module); + static void AsyncModuleExecutionRejected(Isolate* isolate, + Handle<SourceTextModule> module, + Handle<Object> exception); + // Get the namespace object for [module_request] of [module]. If it doesn't // exist yet, it is created. static Handle<JSModuleNamespace> GetModuleNamespace( @@ -54,12 +67,54 @@ class SourceTextModule friend class Factory; friend class Module; + // Appends a tuple of module and generator to the async parent modules + // ArrayList. + inline void AddAsyncParentModule(Isolate* isolate, + Handle<SourceTextModule> module); + + // Returns a SourceTextModule, the + // ith parent in depth first traversal order of a given async child. + inline Handle<SourceTextModule> GetAsyncParentModule(Isolate* isolate, + int index); + + // Returns the number of async parent modules for a given async child. + inline int AsyncParentModuleCount(); + + inline bool HasPendingAsyncDependencies(); + inline void IncrementPendingAsyncDependencies(); + inline void DecrementPendingAsyncDependencies(); + // TODO(neis): Don't store those in the module object? DECL_INT_ACCESSORS(dfs_index) DECL_INT_ACCESSORS(dfs_ancestor_index) - // Helpers for Instantiate and Evaluate. + // Storage for boolean flags. + DECL_INT_ACCESSORS(flags) + + // Bits for flags. + static const int kAsyncBit = 0; + static const int kAsyncEvaluatingBit = 1; + + // async_evaluating, top_level_capability, pending_async_dependencies, and + // async_parent_modules are used exclusively during evaluation of async + // modules and the modules which depend on them. + // + // Whether or not this module is async and evaluating or currently evaluating + // an async child. + DECL_BOOLEAN_ACCESSORS(async_evaluating) + + // The top level promise capability of this module. Will only be defined + // for cycle roots. + DECL_ACCESSORS(top_level_capability, HeapObject) + + // The number of currently evaluating async dependencies of this module. + DECL_INT_ACCESSORS(pending_async_dependencies) + + // The parent modules of a given async dependency, use async_parent_modules() + // to retrieve the ArrayList representation. + DECL_ACCESSORS(async_parent_modules, ArrayList) + // Helpers for Instantiate and Evaluate. static void CreateExport(Isolate* isolate, Handle<SourceTextModule> module, int cell_index, Handle<FixedArray> names); static void CreateIndirectExport(Isolate* isolate, @@ -95,7 +150,16 @@ class SourceTextModule Handle<SourceTextModule> module, Zone* zone, UnorderedModuleSet* visited); + // Implementation of spec concrete method Evaluate. + static V8_WARN_UNUSED_RESULT MaybeHandle<Object> EvaluateMaybeAsync( + Isolate* isolate, Handle<SourceTextModule> module); + + // Continued implementation of spec concrete method Evaluate. static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate( + Isolate* isolate, Handle<SourceTextModule> module); + + // Implementation of spec abstract operation InnerModuleEvaluation. + static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerModuleEvaluation( Isolate* isolate, Handle<SourceTextModule> module, ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index); @@ -103,6 +167,24 @@ class SourceTextModule Isolate* isolate, Handle<SourceTextModule> module, ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status); + // Implementation of spec GetAsyncCycleRoot. + static V8_WARN_UNUSED_RESULT Handle<SourceTextModule> GetAsyncCycleRoot( + Isolate* isolate, Handle<SourceTextModule> module); + + // Implementation of spec ExecuteModule is broken up into + // InnerExecuteAsyncModule for asynchronous modules and ExecuteModule + // for synchronous modules. + static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerExecuteAsyncModule( + Isolate* isolate, Handle<SourceTextModule> module, + Handle<JSPromise> capability); + + static V8_WARN_UNUSED_RESULT MaybeHandle<Object> ExecuteModule( + Isolate* isolate, Handle<SourceTextModule> module); + + // Implementation of spec ExecuteAsyncModule. + static void ExecuteAsyncModule(Isolate* isolate, + Handle<SourceTextModule> module); + static void Reset(Isolate* isolate, Handle<SourceTextModule> module); TQ_OBJECT_CONSTRUCTORS(SourceTextModule) @@ -169,9 +251,10 @@ class SourceTextModuleInfoEntry DECL_INT_ACCESSORS(end_pos) static Handle<SourceTextModuleInfoEntry> New( - Isolate* isolate, Handle<HeapObject> export_name, - Handle<HeapObject> local_name, Handle<HeapObject> import_name, - int module_request, int cell_index, int beg_pos, int end_pos); + Isolate* isolate, Handle<PrimitiveHeapObject> export_name, + Handle<PrimitiveHeapObject> local_name, + Handle<PrimitiveHeapObject> import_name, int module_request, + int cell_index, int beg_pos, int end_pos); TQ_OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry) }; diff --git a/chromium/v8/src/objects/stack-frame-info.cc b/chromium/v8/src/objects/stack-frame-info.cc index 323c4b8fcbb..040c6f7b326 100644 --- a/chromium/v8/src/objects/stack-frame-info.cc +++ b/chromium/v8/src/objects/stack-frame-info.cc @@ -299,10 +299,8 @@ void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame, } } -void SerializeJSStackFrame( - Isolate* isolate, Handle<StackTraceFrame> frame, - IncrementalStringBuilder& builder // NOLINT(runtime/references) -) { +void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame, + IncrementalStringBuilder* builder) { Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame); const bool is_toplevel = StackTraceFrame::IsToplevel(frame); @@ -316,96 +314,91 @@ void SerializeJSStackFrame( const bool is_method_call = !(is_toplevel || is_constructor); if (is_async) { - builder.AppendCString("async "); + builder->AppendCString("async "); } if (is_promise_all) { - builder.AppendCString("Promise.all (index "); - builder.AppendInt(StackTraceFrame::GetPromiseAllIndex(frame)); - builder.AppendCString(")"); + builder->AppendCString("Promise.all (index "); + builder->AppendInt(StackTraceFrame::GetPromiseAllIndex(frame)); + builder->AppendCString(")"); return; } if (is_method_call) { - AppendMethodCall(isolate, frame, &builder); + AppendMethodCall(isolate, frame, builder); } else if (is_constructor) { - builder.AppendCString("new "); + builder->AppendCString("new "); if (IsNonEmptyString(function_name)) { - builder.AppendString(Handle<String>::cast(function_name)); + builder->AppendString(Handle<String>::cast(function_name)); } else { - builder.AppendCString("<anonymous>"); + builder->AppendCString("<anonymous>"); } } else if (IsNonEmptyString(function_name)) { - builder.AppendString(Handle<String>::cast(function_name)); + builder->AppendString(Handle<String>::cast(function_name)); } else { - AppendFileLocation(isolate, frame, &builder); + AppendFileLocation(isolate, frame, builder); return; } - builder.AppendCString(" ("); - AppendFileLocation(isolate, frame, &builder); - builder.AppendCString(")"); + builder->AppendCString(" ("); + AppendFileLocation(isolate, frame, builder); + builder->AppendCString(")"); } -void SerializeAsmJsWasmStackFrame( - Isolate* isolate, Handle<StackTraceFrame> frame, - IncrementalStringBuilder& builder // NOLINT(runtime/references) -) { +void SerializeAsmJsWasmStackFrame(Isolate* isolate, + Handle<StackTraceFrame> frame, + IncrementalStringBuilder* builder) { // The string should look exactly as the respective javascript frame string. // Keep this method in line to // JSStackFrame::ToString(IncrementalStringBuilder&). Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame); if (IsNonEmptyString(function_name)) { - builder.AppendString(Handle<String>::cast(function_name)); - builder.AppendCString(" ("); + builder->AppendString(Handle<String>::cast(function_name)); + builder->AppendCString(" ("); } - AppendFileLocation(isolate, frame, &builder); + AppendFileLocation(isolate, frame, builder); - if (IsNonEmptyString(function_name)) builder.AppendCString(")"); + if (IsNonEmptyString(function_name)) builder->AppendCString(")"); return; } -void SerializeWasmStackFrame( - Isolate* isolate, Handle<StackTraceFrame> frame, - IncrementalStringBuilder& builder // NOLINT(runtime/references) -) { +void SerializeWasmStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame, + IncrementalStringBuilder* builder) { Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame); Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame); const bool has_name = !module_name->IsNull() || !function_name->IsNull(); if (has_name) { if (module_name->IsNull()) { - builder.AppendString(Handle<String>::cast(function_name)); + builder->AppendString(Handle<String>::cast(function_name)); } else { - builder.AppendString(Handle<String>::cast(module_name)); + builder->AppendString(Handle<String>::cast(module_name)); if (!function_name->IsNull()) { - builder.AppendCString("."); - builder.AppendString(Handle<String>::cast(function_name)); + builder->AppendCString("."); + builder->AppendString(Handle<String>::cast(function_name)); } } - builder.AppendCString(" ("); + builder->AppendCString(" ("); } const int wasm_func_index = StackTraceFrame::GetLineNumber(frame); - builder.AppendCString("wasm-function["); - builder.AppendInt(wasm_func_index); - builder.AppendCString("]:"); + builder->AppendCString("wasm-function["); + builder->AppendInt(wasm_func_index); + builder->AppendCString("]:"); char buffer[16]; SNPrintF(ArrayVector(buffer), "0x%x", StackTraceFrame::GetColumnNumber(frame)); - builder.AppendCString(buffer); + builder->AppendCString(buffer); - if (has_name) builder.AppendCString(")"); + if (has_name) builder->AppendCString(")"); } } // namespace -void SerializeStackTraceFrame( - Isolate* isolate, Handle<StackTraceFrame> frame, - IncrementalStringBuilder& builder // NOLINT(runtime/references) -) { +void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame, + IncrementalStringBuilder* builder) { // Ordering here is important, as AsmJs frames are also marked as Wasm. if (StackTraceFrame::IsAsmJsWasm(frame)) { SerializeAsmJsWasmStackFrame(isolate, frame, builder); @@ -419,7 +412,7 @@ void SerializeStackTraceFrame( MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame) { IncrementalStringBuilder builder(isolate); - SerializeStackTraceFrame(isolate, frame, builder); + SerializeStackTraceFrame(isolate, frame, &builder); return builder.Finish(); } diff --git a/chromium/v8/src/objects/stack-frame-info.h b/chromium/v8/src/objects/stack-frame-info.h index 7c4918a3c6b..54b64b61185 100644 --- a/chromium/v8/src/objects/stack-frame-info.h +++ b/chromium/v8/src/objects/stack-frame-info.h @@ -124,10 +124,8 @@ Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate, Handle<FixedArray> stack_trace); class IncrementalStringBuilder; -void SerializeStackTraceFrame( - Isolate* isolate, Handle<StackTraceFrame> frame, - IncrementalStringBuilder& builder // NOLINT(runtime/references) -); +void SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame, + IncrementalStringBuilder* builder); MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate, Handle<StackTraceFrame> frame); diff --git a/chromium/v8/src/objects/string-inl.h b/chromium/v8/src/objects/string-inl.h index 083928d2119..b4aea68cb16 100644 --- a/chromium/v8/src/objects/string-inl.h +++ b/chromium/v8/src/objects/string-inl.h @@ -778,6 +778,14 @@ bool String::AsArrayIndex(uint32_t* index) { return SlowAsArrayIndex(index); } +bool String::AsIntegerIndex(size_t* index) { + uint32_t field = hash_field(); + if (IsHashFieldComputed(field) && (field & kIsNotIntegerIndexMask)) { + return false; + } + return SlowAsIntegerIndex(index); +} + SubStringRange::SubStringRange(String string, const DisallowHeapAllocation& no_gc, int first, int length) diff --git a/chromium/v8/src/objects/string.cc b/chromium/v8/src/objects/string.cc index 41de3aef04c..a1eb7f43102 100644 --- a/chromium/v8/src/objects/string.cc +++ b/chromium/v8/src/objects/string.cc @@ -113,7 +113,10 @@ void String::MakeThin(Isolate* isolate, String internalized) { bool has_pointers = StringShape(*this).IsIndirect(); int old_size = this->Size(); - isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc); + // Slot invalidation is not necessary here: ThinString only stores tagged + // value, so it can't store an untagged value in a recorded slot. + isolate->heap()->NotifyObjectLayoutChange(*this, no_gc, + InvalidateRecordedSlots::kNo); bool one_byte = internalized.IsOneByteRepresentation(); Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map() : isolate->factory()->thin_string_map(); @@ -158,7 +161,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool has_pointers = StringShape(*this).IsIndirect(); if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation); + isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation, + InvalidateRecordedSlots::kYes); } // Morph the string to an external string by replacing the map and // reinitializing the fields. This won't work if the space the existing @@ -184,10 +188,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo); - if (has_pointers) { - isolate->heap()->ClearRecordedSlotRange(this->address(), - this->address() + new_size); - } // We are storing the new map using release store after creating a filler for // the left-over space to avoid races with the sweeper thread. @@ -232,7 +232,8 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { bool has_pointers = StringShape(*this).IsIndirect(); if (has_pointers) { - isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation); + isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation, + InvalidateRecordedSlots::kYes); } // Morph the string to an external string by replacing the map and // reinitializing the fields. This won't work if the space the existing @@ -257,10 +258,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { isolate->heap()->CreateFillerObjectAt( this->address() + new_size, size - new_size, has_pointers ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo); - if (has_pointers) { - isolate->heap()->ClearRecordedSlotRange(this->address(), - this->address() + new_size); - } // We are storing the new map using release store after creating a filler for // the left-over space to avoid races with the sweeper thread. @@ -598,9 +595,8 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) { String source = src; int from = f; int to = t; - while (true) { + while (from < to) { DCHECK_LE(0, from); - DCHECK_LE(from, to); DCHECK_LE(to, source.length()); switch (StringShape(source).full_representation_tag()) { case kOneByteStringTag | kExternalStringTag: { @@ -678,6 +674,7 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) { break; } } + DCHECK_EQ(from, to); } template <typename SourceChar> @@ -1358,25 +1355,39 @@ uint32_t String::ComputeAndSetHash() { return result; } -bool String::ComputeArrayIndex(uint32_t* index) { +bool String::SlowAsArrayIndex(uint32_t* index) { + DisallowHeapAllocation no_gc; int length = this->length(); + if (length <= kMaxCachedArrayIndexLength) { + Hash(); // Force computation of hash code. + uint32_t field = hash_field(); + if ((field & kIsNotArrayIndexMask) != 0) return false; + *index = ArrayIndexValueBits::decode(field); + return true; + } if (length == 0 || length > kMaxArrayIndexSize) return false; StringCharacterStream stream(*this); return StringToArrayIndex(&stream, index); } -bool String::SlowAsArrayIndex(uint32_t* index) { +bool String::SlowAsIntegerIndex(size_t* index) { DisallowHeapAllocation no_gc; - if (length() <= kMaxCachedArrayIndexLength) { - Hash(); // force computation of hash code + int length = this->length(); + if (length <= kMaxCachedArrayIndexLength) { + Hash(); // Force computation of hash code. uint32_t field = hash_field(); - if ((field & kIsNotArrayIndexMask) != 0) return false; - // Isolate the array index form the full hash field. + if ((field & kIsNotArrayIndexMask) != 0) { + // If it was short but it's not an array index, then it can't be an + // integer index either. + DCHECK_NE(0, field & kIsNotIntegerIndexMask); + return false; + } *index = ArrayIndexValueBits::decode(field); return true; - } else { - return ComputeArrayIndex(index); } + if (length == 0 || length > kMaxIntegerIndexSize) return false; + StringCharacterStream stream(*this); + return StringToArrayIndex(&stream, index); } void String::PrintOn(FILE* file) { diff --git a/chromium/v8/src/objects/string.h b/chromium/v8/src/objects/string.h index 27bd7e87652..fcdf75a9686 100644 --- a/chromium/v8/src/objects/string.h +++ b/chromium/v8/src/objects/string.h @@ -5,6 +5,8 @@ #ifndef V8_OBJECTS_STRING_H_ #define V8_OBJECTS_STRING_H_ +#include <memory> + #include "src/base/bits.h" #include "src/base/export-template.h" #include "src/objects/instance-type.h" @@ -306,8 +308,6 @@ class String : public TorqueGeneratedString<String, Name> { RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL, int* length_output = nullptr); - bool ComputeArrayIndex(uint32_t* index); - // Externalization. V8_EXPORT_PRIVATE bool MakeExternal( v8::String::ExternalStringResource* resource); @@ -316,8 +316,12 @@ class String : public TorqueGeneratedString<String, Name> { bool SupportsExternalization(); // Conversion. + // "array index": an index allowed by the ES spec for JSArrays. inline bool AsArrayIndex(uint32_t* index); uint32_t inline ToValidIndex(Object number); + // "integer index": the string is the decimal representation of an + // integer in the range of a size_t. Useful for TypedArray accesses. + inline bool AsIntegerIndex(size_t* index); // Trimming. enum TrimMode { kTrim, kTrimStart, kTrimEnd }; @@ -448,6 +452,7 @@ class String : public TorqueGeneratedString<String, Name> { // Slow case of AsArrayIndex. V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index); + V8_EXPORT_PRIVATE bool SlowAsIntegerIndex(size_t* index); // Compute and set the hash code. V8_EXPORT_PRIVATE uint32_t ComputeAndSetHash(); diff --git a/chromium/v8/src/objects/struct-inl.h b/chromium/v8/src/objects/struct-inl.h index af0fed126b4..34de8897861 100644 --- a/chromium/v8/src/objects/struct-inl.h +++ b/chromium/v8/src/objects/struct-inl.h @@ -22,12 +22,10 @@ namespace internal { TQ_OBJECT_CONSTRUCTORS_IMPL(Struct) TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2) TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple3) -OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct) +TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorPair) TQ_OBJECT_CONSTRUCTORS_IMPL(ClassPositions) -CAST_ACCESSOR(AccessorPair) - void Struct::InitializeBody(int object_size) { Object value = GetReadOnlyRoots().undefined_value(); for (int offset = kHeaderSize; offset < object_size; offset += kTaggedSize) { @@ -35,9 +33,6 @@ void Struct::InitializeBody(int object_size) { } } -ACCESSORS(AccessorPair, getter, Object, kGetterOffset) -ACCESSORS(AccessorPair, setter, Object, kSetterOffset) - TQ_SMI_ACCESSORS(ClassPositions, start) TQ_SMI_ACCESSORS(ClassPositions, end) diff --git a/chromium/v8/src/objects/struct.h b/chromium/v8/src/objects/struct.h index c9372d9ada0..f786c4711af 100644 --- a/chromium/v8/src/objects/struct.h +++ b/chromium/v8/src/objects/struct.h @@ -16,12 +16,13 @@ namespace v8 { namespace internal { // An abstract superclass, a marker class really, for simple structure classes. -// It doesn't carry much functionality but allows struct classes to be +// It doesn't carry any functionality but allows struct classes to be // identified in the type system. class Struct : public TorqueGeneratedStruct<Struct, HeapObject> { public: inline void InitializeBody(int object_size); void BriefPrintDetails(std::ostream& os); + STATIC_ASSERT(kHeaderSize == HeapObject::kHeaderSize); TQ_OBJECT_CONSTRUCTORS(Struct) }; @@ -46,13 +47,8 @@ class Tuple3 : public TorqueGeneratedTuple3<Tuple3, Tuple2> { // * a FunctionTemplateInfo: a real (lazy) accessor // * undefined: considered an accessor by the spec, too, strangely enough // * null: an accessor which has not been set -class AccessorPair : public Struct { +class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> { public: - DECL_ACCESSORS(getter, Object) - DECL_ACCESSORS(setter, Object) - - DECL_CAST(AccessorPair) - static Handle<AccessorPair> Copy(Isolate* isolate, Handle<AccessorPair> pair); inline Object get(AccessorComponent component); @@ -71,13 +67,8 @@ class AccessorPair : public Struct { // Dispatched behavior. DECL_PRINTER(AccessorPair) - DECL_VERIFIER(AccessorPair) - - // Layout description. - DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_ACCESSOR_PAIR_FIELDS) - OBJECT_CONSTRUCTORS(AccessorPair, Struct); + TQ_OBJECT_CONSTRUCTORS(AccessorPair) }; class ClassPositions diff --git a/chromium/v8/src/objects/synthetic-module.cc b/chromium/v8/src/objects/synthetic-module.cc index 0cca30a37b9..58e0c1b58cd 100644 --- a/chromium/v8/src/objects/synthetic-module.cc +++ b/chromium/v8/src/objects/synthetic-module.cc @@ -96,7 +96,7 @@ MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate, Utils::ToLocal(Handle<Module>::cast(module))) .ToLocal(&result)) { isolate->PromoteScheduledException(); - module->RecordError(isolate); + module->RecordErrorUsingPendingException(isolate); return MaybeHandle<Object>(); } diff --git a/chromium/v8/src/objects/transitions-inl.h b/chromium/v8/src/objects/transitions-inl.h index 048774f49b5..5694d66d948 100644 --- a/chromium/v8/src/objects/transitions-inl.h +++ b/chromium/v8/src/objects/transitions-inl.h @@ -64,6 +64,10 @@ Name TransitionArray::GetKey(int transition_number) { Get(ToKeyIndex(transition_number))->GetHeapObjectAssumeStrong()); } +Name TransitionArray::GetKey(InternalIndex index) { + return GetKey(index.as_int()); +} + Name TransitionsAccessor::GetKey(int transition_number) { switch (encoding()) { case kPrototypeInfo: @@ -95,7 +99,7 @@ HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) { // static PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) { DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name)); - int descriptor = target.LastAdded(); + InternalIndex descriptor = target.LastAdded(); DescriptorArray descriptors = target.instance_descriptors(); // Transitions are allowed only for the last added property. DCHECK(descriptors.GetKey(descriptor).Equals(name)); @@ -108,7 +112,7 @@ PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) { // static Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) { - int descriptor = transition.LastAdded(); + InternalIndex descriptor = transition.LastAdded(); return transition.instance_descriptors().GetKey(descriptor); } diff --git a/chromium/v8/src/objects/transitions.cc b/chromium/v8/src/objects/transitions.cc index 843b790b7d7..e0ba40ce7d0 100644 --- a/chromium/v8/src/objects/transitions.cc +++ b/chromium/v8/src/objects/transitions.cc @@ -247,7 +247,7 @@ bool TransitionsAccessor::CanHaveMoreTransitions() { bool TransitionsAccessor::IsMatchingMap(Map target, Name name, PropertyKind kind, PropertyAttributes attributes) { - int descriptor = target.LastAdded(); + InternalIndex descriptor = target.LastAdded(); DescriptorArray descriptors = target.instance_descriptors(); Name key = descriptors.GetKey(descriptor); if (key != name) return false; @@ -296,8 +296,7 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray( new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity); DCHECK_GT(new_capacity, capacity); int grow_by = new_capacity - capacity; - array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by, - AllocationType::kOld); + array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by); if (capacity < 0) { // There was no prototype transitions array before, so the size // couldn't be copied. Initialize it explicitly. diff --git a/chromium/v8/src/objects/transitions.h b/chromium/v8/src/objects/transitions.h index f21e8cd54e5..5a7db13e516 100644 --- a/chromium/v8/src/objects/transitions.h +++ b/chromium/v8/src/objects/transitions.h @@ -221,6 +221,7 @@ class TransitionArray : public WeakFixedArray { Map* target); // Required for templatized Search interface. + inline Name GetKey(InternalIndex index); static constexpr int kNotFound = -1; inline Name GetSortedKey(int transition_number); diff --git a/chromium/v8/src/objects/value-serializer.cc b/chromium/v8/src/objects/value-serializer.cc index 3b3506fbb91..af5cdc57eaf 100644 --- a/chromium/v8/src/objects/value-serializer.cc +++ b/chromium/v8/src/objects/value-serializer.cc @@ -52,8 +52,6 @@ static const uint32_t kLatestVersion = 13; static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(), "Exported format version must match latest version."); -static const int kPretenureThreshold = 100 * KB; - template <typename T> static size_t BytesNeededForVarint(T value) { static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value, @@ -554,7 +552,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) { case JS_PRIMITIVE_WRAPPER_TYPE: return WriteJSPrimitiveWrapper( Handle<JSPrimitiveWrapper>::cast(receiver)); - case JS_REGEXP_TYPE: + case JS_REG_EXP_TYPE: WriteJSRegExp(JSRegExp::cast(*receiver)); return ThrowIfOutOfMemory(); case JS_MAP_TYPE: @@ -568,7 +566,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) { return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver)); case JS_ERROR_TYPE: return WriteJSError(Handle<JSObject>::cast(receiver)); - case WASM_MODULE_TYPE: { + case WASM_MODULE_OBJECT_TYPE: { auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_); if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) { // Only write WebAssembly modules if not disabled by a flag. @@ -576,7 +574,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) { } break; } - case WASM_MEMORY_TYPE: { + case WASM_MEMORY_OBJECT_TYPE: { auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_); if (enabled_features.threads) { return WriteWasmMemory(Handle<WasmMemoryObject>::cast(receiver)); @@ -604,7 +602,7 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) { // map doesn't change. uint32_t properties_written = 0; bool map_changed = false; - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + for (InternalIndex i : map->IterateOwnDescriptors()) { Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_); if (!key->IsString()) continue; PropertyDetails details = map->instance_descriptors().GetDetails(i); @@ -1025,8 +1023,8 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) { return Nothing<bool>(); } - isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared( - object, isolate_); + GlobalBackingStoreRegistry::Register( + object->array_buffer().GetBackingStore()); WriteTag(SerializationTag::kWasmMemoryTransfer); WriteZigZag<int32_t>(object->maximum_pages()); @@ -1112,8 +1110,6 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate, delegate_(delegate), position_(data.begin()), end_(data.begin() + data.length()), - allocation_(data.length() > kPretenureThreshold ? AllocationType::kOld - : AllocationType::kYoung), id_map_(isolate->global_handles()->Create( ReadOnlyRoots(isolate_).empty_fixed_array())) {} @@ -1302,19 +1298,17 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() { case SerializationTag::kInt32: { Maybe<int32_t> number = ReadZigZag<int32_t>(); if (number.IsNothing()) return MaybeHandle<Object>(); - return isolate_->factory()->NewNumberFromInt(number.FromJust(), - allocation_); + return isolate_->factory()->NewNumberFromInt(number.FromJust()); } case SerializationTag::kUint32: { Maybe<uint32_t> number = ReadVarint<uint32_t>(); if (number.IsNothing()) return MaybeHandle<Object>(); - return isolate_->factory()->NewNumberFromUint(number.FromJust(), - allocation_); + return isolate_->factory()->NewNumberFromUint(number.FromJust()); } case SerializationTag::kDouble: { Maybe<double> number = ReadDouble(); if (number.IsNothing()) return MaybeHandle<Object>(); - return isolate_->factory()->NewNumber(number.FromJust(), allocation_); + return isolate_->factory()->NewNumber(number.FromJust()); } case SerializationTag::kBigInt: return ReadBigInt(); @@ -1398,8 +1392,7 @@ MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() { if (!ReadRawBytes(bytelength).To(&digits_storage)) { return MaybeHandle<BigInt>(); } - return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage, - allocation_); + return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage); } MaybeHandle<String> ValueDeserializer::ReadUtf8String() { @@ -1412,7 +1405,7 @@ MaybeHandle<String> ValueDeserializer::ReadUtf8String() { return MaybeHandle<String>(); } return isolate_->factory()->NewStringFromUtf8( - Vector<const char>::cast(utf8_bytes), allocation_); + Vector<const char>::cast(utf8_bytes)); } MaybeHandle<String> ValueDeserializer::ReadOneByteString() { @@ -1424,7 +1417,7 @@ MaybeHandle<String> ValueDeserializer::ReadOneByteString() { !ReadRawBytes(byte_length).To(&bytes)) { return MaybeHandle<String>(); } - return isolate_->factory()->NewStringFromOneByte(bytes, allocation_); + return isolate_->factory()->NewStringFromOneByte(bytes); } MaybeHandle<String> ValueDeserializer::ReadTwoByteString() { @@ -1443,7 +1436,7 @@ MaybeHandle<String> ValueDeserializer::ReadTwoByteString() { if (byte_length == 0) return isolate_->factory()->empty_string(); Handle<SeqTwoByteString> string; if (!isolate_->factory() - ->NewRawTwoByteString(byte_length / sizeof(uc16), allocation_) + ->NewRawTwoByteString(byte_length / sizeof(uc16)) .ToHandle(&string)) { return MaybeHandle<String>(); } @@ -1506,8 +1499,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() { uint32_t id = next_id_++; HandleScope scope(isolate_); - Handle<JSObject> object = isolate_->factory()->NewJSObject( - isolate_->object_function(), allocation_); + Handle<JSObject> object = + isolate_->factory()->NewJSObject(isolate_->object_function()); AddObjectWithID(id, object); uint32_t num_properties; @@ -1532,8 +1525,8 @@ MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() { uint32_t id = next_id_++; HandleScope scope(isolate_); - Handle<JSArray> array = isolate_->factory()->NewJSArray( - 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_); + Handle<JSArray> array = + isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND); JSArray::SetLength(array, length); AddObjectWithID(id, array); @@ -1569,8 +1562,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() { uint32_t id = next_id_++; HandleScope scope(isolate_); Handle<JSArray> array = isolate_->factory()->NewJSArray( - HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, - allocation_); + HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); AddObjectWithID(id, array); Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_); @@ -1631,22 +1623,21 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper( Handle<JSPrimitiveWrapper> value; switch (tag) { case SerializationTag::kTrueObject: - value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject( - isolate_->boolean_function(), allocation_)); + value = Handle<JSPrimitiveWrapper>::cast( + isolate_->factory()->NewJSObject(isolate_->boolean_function())); value->set_value(ReadOnlyRoots(isolate_).true_value()); break; case SerializationTag::kFalseObject: - value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject( - isolate_->boolean_function(), allocation_)); + value = Handle<JSPrimitiveWrapper>::cast( + isolate_->factory()->NewJSObject(isolate_->boolean_function())); value->set_value(ReadOnlyRoots(isolate_).false_value()); break; case SerializationTag::kNumberObject: { double number; if (!ReadDouble().To(&number)) return MaybeHandle<JSPrimitiveWrapper>(); - value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject( - isolate_->number_function(), allocation_)); - Handle<Object> number_object = - isolate_->factory()->NewNumber(number, allocation_); + value = Handle<JSPrimitiveWrapper>::cast( + isolate_->factory()->NewJSObject(isolate_->number_function())); + Handle<Object> number_object = isolate_->factory()->NewNumber(number); value->set_value(*number_object); break; } @@ -1654,8 +1645,8 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper( Handle<BigInt> bigint; if (!ReadBigInt().ToHandle(&bigint)) return MaybeHandle<JSPrimitiveWrapper>(); - value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject( - isolate_->bigint_function(), allocation_)); + value = Handle<JSPrimitiveWrapper>::cast( + isolate_->factory()->NewJSObject(isolate_->bigint_function())); value->set_value(*bigint); break; } @@ -1663,8 +1654,8 @@ MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper( Handle<String> string; if (!ReadString().ToHandle(&string)) return MaybeHandle<JSPrimitiveWrapper>(); - value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject( - isolate_->string_function(), allocation_)); + value = Handle<JSPrimitiveWrapper>::cast( + isolate_->factory()->NewJSObject(isolate_->string_function())); value->set_value(*string); break; } @@ -1801,13 +1792,12 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer( byte_length > static_cast<size_t>(end_ - position_)) { return MaybeHandle<JSArrayBuffer>(); } - const bool should_initialize = false; - Handle<JSArrayBuffer> array_buffer = isolate_->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, allocation_); - if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length, - should_initialize)) { - return MaybeHandle<JSArrayBuffer>(); - } + MaybeHandle<JSArrayBuffer> result = + isolate_->factory()->NewJSArrayBufferAndBackingStore( + byte_length, InitializedFlag::kUninitialized); + Handle<JSArrayBuffer> array_buffer; + if (!result.ToHandle(&array_buffer)) return result; + if (byte_length > 0) { memcpy(array_buffer->backing_store(), position_, byte_length); } @@ -1871,8 +1861,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView( return MaybeHandle<JSArrayBufferView>(); } Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray( - external_array_type, buffer, byte_offset, byte_length / element_size, - allocation_); + external_array_type, buffer, byte_offset, byte_length / element_size); AddObjectWithID(id, typed_array); return typed_array; } @@ -2049,9 +2038,6 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() { Handle<WasmMemoryObject> result = WasmMemoryObject::New(isolate_, buffer, maximum_pages); - isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared( - result, isolate_); - AddObjectWithID(id, result); return result; } @@ -2081,9 +2067,10 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map, DisallowHeapAllocation no_gc; DescriptorArray descriptors = object->map().instance_descriptors(); - for (unsigned i = 0; i < properties.size(); i++) { + for (InternalIndex i : InternalIndex::Range(properties.size())) { // Initializing store. - object->WriteToField(i, descriptors.GetDetails(i), *properties[i]); + object->WriteToField(i, descriptors.GetDetails(i), + *properties[i.raw_value()]); } } @@ -2150,7 +2137,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties( // (though generalization may be required), store the property value so // that we can copy them all at once. Otherwise, stop transitioning. if (transitioning) { - int descriptor = static_cast<int>(properties.size()); + InternalIndex descriptor(properties.size()); PropertyDetails details = target->instance_descriptors().GetDetails(descriptor); Representation expected_representation = details.representation(); @@ -2316,8 +2303,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() { size_t begin_properties = stack.size() - 2 * static_cast<size_t>(num_properties); - Handle<JSObject> js_object = isolate_->factory()->NewJSObject( - isolate_->object_function(), allocation_); + Handle<JSObject> js_object = + isolate_->factory()->NewJSObject(isolate_->object_function()); if (num_properties && !SetPropertiesFromKeyValuePairs( isolate_, js_object, &stack[begin_properties], num_properties) @@ -2344,8 +2331,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() { return MaybeHandle<Object>(); } - Handle<JSArray> js_array = isolate_->factory()->NewJSArray( - 0, TERMINAL_FAST_ELEMENTS_KIND, allocation_); + Handle<JSArray> js_array = + isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND); JSArray::SetLength(js_array, length); size_t begin_properties = stack.size() - 2 * static_cast<size_t>(num_properties); diff --git a/chromium/v8/src/objects/value-serializer.h b/chromium/v8/src/objects/value-serializer.h index cc9bc1caea8..839636ceef8 100644 --- a/chromium/v8/src/objects/value-serializer.h +++ b/chromium/v8/src/objects/value-serializer.h @@ -298,7 +298,6 @@ class ValueDeserializer { v8::ValueDeserializer::Delegate* const delegate_; const uint8_t* position_; const uint8_t* const end_; - AllocationType allocation_; uint32_t version_ = 0; uint32_t next_id_ = 0; bool expect_inline_wasm_ = false; diff --git a/chromium/v8/src/parsing/expression-scope-reparenter.cc b/chromium/v8/src/parsing/expression-scope-reparenter.cc index 3f62616ebd3..2f4914398fd 100644 --- a/chromium/v8/src/parsing/expression-scope-reparenter.cc +++ b/chromium/v8/src/parsing/expression-scope-reparenter.cc @@ -54,7 +54,14 @@ void Reparenter::VisitClassLiteral(ClassLiteral* class_literal) { #if DEBUG // The same goes for the rest of the class, but we do some // sanity checking in debug mode. - for (ClassLiteralProperty* prop : *class_literal->properties()) { + for (ClassLiteralProperty* prop : *class_literal->private_members()) { + // No need to visit the values, since all values are functions with + // the class scope on their scope chain. + DCHECK(prop->value()->IsFunctionLiteral()); + DCHECK_EQ(prop->value()->AsFunctionLiteral()->scope()->outer_scope(), + class_literal->scope()); + } + for (ClassLiteralProperty* prop : *class_literal->public_members()) { // No need to visit the values, since all values are functions with // the class scope on their scope chain. DCHECK(prop->value()->IsFunctionLiteral()); diff --git a/chromium/v8/src/parsing/expression-scope.h b/chromium/v8/src/parsing/expression-scope.h index ba931d36dab..709231ebb0f 100644 --- a/chromium/v8/src/parsing/expression-scope.h +++ b/chromium/v8/src/parsing/expression-scope.h @@ -625,14 +625,8 @@ class AccumulationScope { if (!scope->CanBeExpression()) return; scope_ = scope->AsExpressionParsingScope(); for (int i = 0; i < kNumberOfErrors; i++) { - // If the underlying scope is already invalid at the start, stop - // accumulating. That means an error was found outside of an - // accumulating path. - if (!scope_->is_valid(i)) { - scope_ = nullptr; - break; - } copy(i); + scope_->clear(i); } } diff --git a/chromium/v8/src/parsing/parse-info.cc b/chromium/v8/src/parsing/parse-info.cc index e927c1a0d1a..b0a455e88d8 100644 --- a/chromium/v8/src/parsing/parse-info.cc +++ b/chromium/v8/src/parsing/parse-info.cc @@ -7,7 +7,6 @@ #include "src/ast/ast-source-ranges.h" #include "src/ast/ast-value-factory.h" #include "src/ast/ast.h" -#include "src/base/template-utils.h" #include "src/compiler-dispatcher/compiler-dispatcher.h" #include "src/heap/heap-inl.h" #include "src/logging/counters.h" @@ -21,7 +20,7 @@ namespace v8 { namespace internal { ParseInfo::ParseInfo(AccountingAllocator* zone_allocator) - : zone_(base::make_unique<Zone>(zone_allocator, ZONE_NAME)), + : zone_(std::make_unique<Zone>(zone_allocator, ZONE_NAME)), flags_(0), extension_(nullptr), script_scope_(nullptr), @@ -66,6 +65,7 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator) set_allow_harmony_optional_chaining(FLAG_harmony_optional_chaining); set_allow_harmony_nullish(FLAG_harmony_nullish); set_allow_harmony_private_methods(FLAG_harmony_private_methods); + set_allow_harmony_top_level_await(FLAG_harmony_top_level_await); } ParseInfo::ParseInfo(Isolate* isolate) @@ -129,7 +129,7 @@ std::unique_ptr<ParseInfo> ParseInfo::FromParent( const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator, const FunctionLiteral* literal, const AstRawString* function_name) { std::unique_ptr<ParseInfo> result = - base::make_unique<ParseInfo>(zone_allocator); + std::make_unique<ParseInfo>(zone_allocator); // Replicate shared state of the outer_parse_info. result->flags_ = outer_parse_info->flags_; diff --git a/chromium/v8/src/parsing/parse-info.h b/chromium/v8/src/parsing/parse-info.h index 8afb1241551..dde331b88a4 100644 --- a/chromium/v8/src/parsing/parse-info.h +++ b/chromium/v8/src/parsing/parse-info.h @@ -110,6 +110,8 @@ class V8_EXPORT_PRIVATE ParseInfo { set_collect_source_positions) FLAG_ACCESSOR(kAllowHarmonyNullish, allow_harmony_nullish, set_allow_harmony_nullish) + FLAG_ACCESSOR(kAllowHarmonyTopLevelAwait, allow_harmony_top_level_await, + set_allow_harmony_top_level_await) #undef FLAG_ACCESSOR @@ -319,6 +321,7 @@ class V8_EXPORT_PRIVATE ParseInfo { kIsOneshotIIFE = 1 << 27, kCollectSourcePositions = 1 << 28, kAllowHarmonyNullish = 1 << 29, + kAllowHarmonyTopLevelAwait = 1 << 30, }; //------------- Inputs to parsing and scope analysis ----------------------- diff --git a/chromium/v8/src/parsing/parser-base.h b/chromium/v8/src/parsing/parser-base.h index 1b3bd64cddf..847774910a9 100644 --- a/chromium/v8/src/parsing/parser-base.h +++ b/chromium/v8/src/parsing/parser-base.h @@ -267,6 +267,7 @@ class ParserBase { allow_harmony_dynamic_import_(false), allow_harmony_import_meta_(false), allow_harmony_private_methods_(false), + allow_harmony_top_level_await_(false), allow_eval_cache_(true) { pointer_buffer_.reserve(32); variable_buffer_.reserve(32); @@ -280,6 +281,7 @@ class ParserBase { ALLOW_ACCESSORS(harmony_dynamic_import) ALLOW_ACCESSORS(harmony_import_meta) ALLOW_ACCESSORS(harmony_private_methods) + ALLOW_ACCESSORS(harmony_top_level_await) ALLOW_ACCESSORS(eval_cache) #undef ALLOW_ACCESSORS @@ -527,9 +529,9 @@ class ParserBase { struct ClassInfo { public: explicit ClassInfo(ParserBase* parser) - : variable(nullptr), - extends(parser->impl()->NullExpression()), - properties(parser->impl()->NewClassPropertyList(4)), + : extends(parser->impl()->NullExpression()), + public_members(parser->impl()->NewClassPropertyList(4)), + private_members(parser->impl()->NewClassPropertyList(4)), static_fields(parser->impl()->NewClassPropertyList(4)), instance_fields(parser->impl()->NewClassPropertyList(4)), constructor(parser->impl()->NullExpression()), @@ -540,12 +542,13 @@ class ParserBase { has_instance_members(false), requires_brand(false), is_anonymous(false), + has_private_methods(false), static_fields_scope(nullptr), instance_members_scope(nullptr), computed_field_count(0) {} - Variable* variable; ExpressionT extends; - ClassPropertyListT properties; + ClassPropertyListT public_members; + ClassPropertyListT private_members; ClassPropertyListT static_fields; ClassPropertyListT instance_fields; FunctionLiteralT constructor; @@ -557,6 +560,7 @@ class ParserBase { bool has_instance_members; bool requires_brand; bool is_anonymous; + bool has_private_methods; DeclarationScope* static_fields_scope; DeclarationScope* instance_members_scope; int computed_field_count; @@ -670,8 +674,8 @@ class ParserBase { return new (zone()) DeclarationScope(zone(), parent, EVAL_SCOPE); } - ClassScope* NewClassScope(Scope* parent) const { - return new (zone()) ClassScope(zone(), parent); + ClassScope* NewClassScope(Scope* parent, bool is_anonymous) const { + return new (zone()) ClassScope(zone(), parent, is_anonymous); } Scope* NewScope(ScopeType scope_type) const { @@ -942,7 +946,10 @@ class ParserBase { bool is_resumable() const { return IsResumableFunction(function_state_->kind()); } - + bool is_await_allowed() const { + return is_async_function() || (allow_harmony_top_level_await() && + IsModule(function_state_->kind())); + } const PendingCompilationErrorHandler* pending_error_handler() const { return pending_error_handler_; } @@ -1456,6 +1463,7 @@ class ParserBase { bool allow_harmony_dynamic_import_; bool allow_harmony_import_meta_; bool allow_harmony_private_methods_; + bool allow_harmony_top_level_await_; bool allow_eval_cache_; }; @@ -1582,16 +1590,17 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() { // // Here, we check if this is a new private name reference in a top // level function and throw an error if so. - ClassScope* class_scope = scope()->GetClassScope(); + PrivateNameScopeIterator private_name_scope_iter(scope()); // Parse the identifier so that we can display it in the error message name = impl()->GetIdentifier(); - if (class_scope == nullptr) { + if (private_name_scope_iter.Done()) { impl()->ReportMessageAt(Scanner::Location(pos, pos + 1), MessageTemplate::kInvalidPrivateFieldResolution, impl()->GetRawNameFromIdentifier(name)); return impl()->FailureExpression(); } - key = impl()->ExpressionFromPrivateName(class_scope, name, pos); + key = + impl()->ExpressionFromPrivateName(&private_name_scope_iter, name, pos); } else { ReportUnexpectedToken(next); return impl()->FailureExpression(); @@ -3062,7 +3071,7 @@ ParserBase<Impl>::ParseUnaryExpression() { Token::Value op = peek(); if (Token::IsUnaryOrCountOp(op)) return ParseUnaryOrPrefixExpression(); - if (is_async_function() && op == Token::AWAIT) { + if (is_await_allowed() && op == Token::AWAIT) { return ParseAwaitExpression(); } return ParsePostfixExpression(); @@ -3577,7 +3586,19 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters) { auto declaration_end = scope()->declarations()->end(); int initializer_end = end_position(); for (; declaration_it != declaration_end; ++declaration_it) { - declaration_it->var()->set_initializer_position(initializer_end); + Variable* var = declaration_it->var(); + + // The first time a variable is initialized (i.e. when the initializer + // position is unset), clear its maybe_assigned flag as it is not a true + // assignment. Since this is done directly on the Variable objects, it has + // no effect on VariableProxy objects appearing on the left-hand side of + // true assignments, so x will be still be marked as maybe_assigned for: + // (x = 1, y = (x = 2)) => {} + // and even: + // (x = (x = 2)) => {}. + if (var->initializer_position() == kNoSourcePosition) + var->clear_maybe_assigned(); + var->set_initializer_position(initializer_end); } impl()->AddFormalParameter(parameters, pattern, initializer, end_position(), @@ -4355,16 +4376,16 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral( } } - ClassScope* class_scope = NewClassScope(scope()); + ClassScope* class_scope = NewClassScope(scope(), is_anonymous); BlockState block_state(&scope_, class_scope); RaiseLanguageMode(LanguageMode::kStrict); ClassInfo class_info(this); class_info.is_anonymous = is_anonymous; - impl()->DeclareClassVariable(name, &class_info, class_token_pos); scope()->set_start_position(end_position()); if (Check(Token::EXTENDS)) { + ClassScope::HeritageParsingScope heritage(class_scope); FuncNameInferrerState fni_state(&fni_); ExpressionParsingScope scope(impl()); class_info.extends = ParseLeftHandSideExpression(); @@ -4399,7 +4420,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral( if (V8_UNLIKELY(prop_info.is_private)) { DCHECK(!is_constructor); - class_info.requires_brand |= !is_field; + class_info.requires_brand |= (!is_field && !prop_info.is_static); + class_info.has_private_methods |= + property_kind == ClassLiteralProperty::METHOD; impl()->DeclarePrivateClassMember(class_scope, prop_info.name, property, property_kind, prop_info.is_static, &class_info); @@ -4438,7 +4461,20 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral( } if (class_info.requires_brand) { - class_scope->DeclareBrandVariable(ast_value_factory(), kNoSourcePosition); + // TODO(joyee): implement static brand checking + class_scope->DeclareBrandVariable( + ast_value_factory(), IsStaticFlag::kNotStatic, kNoSourcePosition); + } + + bool should_save_class_variable_index = + class_scope->should_save_class_variable_index(); + if (!is_anonymous || should_save_class_variable_index) { + impl()->DeclareClassVariable(class_scope, name, &class_info, + class_token_pos); + if (should_save_class_variable_index) { + class_scope->class_variable()->set_is_used(); + class_scope->class_variable()->ForceContextAllocation(); + } } return impl()->RewriteClassLiteral(class_scope, name, &class_info, @@ -4861,7 +4897,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement( case Token::WHILE: return ParseWhileStatement(labels, own_labels); case Token::FOR: - if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) { + if (V8_UNLIKELY(is_await_allowed() && PeekAhead() == Token::AWAIT)) { return ParseForAwaitStatement(labels, own_labels); } return ParseForStatement(labels, own_labels); @@ -5921,7 +5957,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement( ZonePtrList<const AstRawString>* labels, ZonePtrList<const AstRawString>* own_labels) { // for await '(' ForDeclaration of AssignmentExpression ')' - DCHECK(is_async_function()); + DCHECK(is_await_allowed()); typename FunctionState::LoopScope loop_scope(function_state_); int stmt_pos = peek_position(); diff --git a/chromium/v8/src/parsing/parser.cc b/chromium/v8/src/parsing/parser.cc index 3a61253db5a..edb9604bb5c 100644 --- a/chromium/v8/src/parsing/parser.cc +++ b/chromium/v8/src/parsing/parser.cc @@ -427,6 +427,7 @@ Parser::Parser(ParseInfo* info) set_allow_harmony_nullish(info->allow_harmony_nullish()); set_allow_harmony_optional_chaining(info->allow_harmony_optional_chaining()); set_allow_harmony_private_methods(info->allow_harmony_private_methods()); + set_allow_harmony_top_level_await(info->allow_harmony_top_level_await()); for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount; ++feature) { use_counts_[feature] = 0; @@ -576,8 +577,32 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) { BuildInitialYield(kNoSourcePosition, kGeneratorFunction); body.Add( factory()->NewExpressionStatement(initial_yield, kNoSourcePosition)); - - ParseModuleItemList(&body); + if (allow_harmony_top_level_await()) { + // First parse statements into a buffer. Then, if there was a + // top level await, create an inner block and rewrite the body of the + // module as an async function. Otherwise merge the statements back + // into the main body. + BlockT block = impl()->NullBlock(); + { + StatementListT statements(pointer_buffer()); + ParseModuleItemList(&statements); + // Modules will always have an initial yield. If there are any + // additional suspends, i.e. awaits, then we treat the module as an + // AsyncModule. + if (function_state.suspend_count() > 1) { + scope->set_is_async_module(); + block = factory()->NewBlock(true, statements); + } else { + statements.MergeInto(&body); + } + } + if (IsAsyncModule(scope->function_kind())) { + impl()->RewriteAsyncFunctionBody( + &body, block, factory()->NewUndefinedLiteral(kNoSourcePosition)); + } + } else { + ParseModuleItemList(&body); + } if (!has_error() && !module()->Validate(this->scope()->AsModuleScope(), pending_error_handler(), zone())) { @@ -705,8 +730,17 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info, info->set_function_name(ast_value_factory()->GetString(name)); scanner_.Initialize(); - FunctionLiteral* result = - DoParseFunction(isolate, info, info->function_name()); + FunctionLiteral* result; + if (V8_UNLIKELY(shared_info->private_name_lookup_skips_outer_class() && + original_scope_->is_class_scope())) { + // If the function skips the outer class and the outer scope is a class, the + // function is in heritage position. Otherwise the function scope's skip bit + // will be correctly inherited from the outer scope. + ClassScope::HeritageParsingScope heritage(original_scope_->AsClassScope()); + result = DoParseFunction(isolate, info, info->function_name()); + } else { + result = DoParseFunction(isolate, info, info->function_name()); + } MaybeResetCharacterStream(info, result); MaybeProcessSourceRanges(info, result, stack_limit_); if (result != nullptr) { @@ -2484,10 +2518,10 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind, bookmark.Set(function_scope->start_position()); UnresolvedList::Iterator unresolved_private_tail; - ClassScope* closest_class_scope = function_scope->GetClassScope(); - if (closest_class_scope != nullptr) { + PrivateNameScopeIterator private_name_scope_iter(function_scope); + if (!private_name_scope_iter.Done()) { unresolved_private_tail = - closest_class_scope->GetUnresolvedPrivateNameTail(); + private_name_scope_iter.GetScope()->GetUnresolvedPrivateNameTail(); } // With no cached data, we partially parse the function, without building an @@ -2511,8 +2545,8 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind, // the state before preparsing. The caller may then fully parse the function // to identify the actual error. bookmark.Apply(); - if (closest_class_scope != nullptr) { - closest_class_scope->ResetUnresolvedPrivateNameTail( + if (!private_name_scope_iter.Done()) { + private_name_scope_iter.GetScope()->ResetUnresolvedPrivateNameTail( unresolved_private_tail); } function_scope->ResetAfterPreparsing(ast_value_factory_, true); @@ -2533,8 +2567,8 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind, *num_parameters = logger->num_parameters(); *function_length = logger->function_length(); SkipFunctionLiterals(logger->num_inner_functions()); - if (closest_class_scope != nullptr) { - closest_class_scope->MigrateUnresolvedPrivateNameTail( + if (!private_name_scope_iter.Done()) { + private_name_scope_iter.GetScope()->MigrateUnresolvedPrivateNameTail( factory(), unresolved_private_tail); } function_scope->AnalyzePartially(this, factory(), MaybeParsingArrowhead()); @@ -2739,17 +2773,20 @@ void Parser::ParseFunction( *suspend_count = function_state.suspend_count(); } -void Parser::DeclareClassVariable(const AstRawString* name, +void Parser::DeclareClassVariable(ClassScope* scope, const AstRawString* name, ClassInfo* class_info, int class_token_pos) { #ifdef DEBUG - scope()->SetScopeName(name); + scope->SetScopeName(name); #endif - if (name != nullptr) { - VariableProxy* proxy = - DeclareBoundVariable(name, VariableMode::kConst, class_token_pos); - class_info->variable = proxy->var(); - } + DCHECK_IMPLIES(name == nullptr, class_info->is_anonymous); + // Declare a special class variable for anonymous classes with the dot + // if we need to save it for static private method access. + Variable* class_variable = + scope->DeclareClassVariable(ast_value_factory(), name, class_token_pos); + Declaration* declaration = factory()->NewVariableDeclaration(class_token_pos); + scope->declarations()->Add(declaration); + declaration->set_var(class_variable); } // TODO(gsathya): Ideally, this should just bypass scope analysis and @@ -2764,13 +2801,15 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) { Variable* Parser::CreatePrivateNameVariable(ClassScope* scope, VariableMode mode, + IsStaticFlag is_static_flag, const AstRawString* name) { DCHECK_NOT_NULL(name); int begin = position(); int end = end_position(); bool was_added = false; DCHECK(IsConstVariableMode(mode)); - Variable* var = scope->DeclarePrivateName(name, mode, &was_added); + Variable* var = + scope->DeclarePrivateName(name, mode, is_static_flag, &was_added); if (!was_added) { Scanner::Location loc(begin, end); ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, var->raw_name()); @@ -2796,7 +2835,7 @@ void Parser::DeclarePublicClassField(ClassScope* scope, CreateSyntheticContextVariable(ClassFieldVariableName( ast_value_factory(), class_info->computed_field_count)); property->set_computed_name_var(computed_name_var); - class_info->properties->Add(property, zone()); + class_info->public_members->Add(property, zone()); } } @@ -2816,15 +2855,17 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope, } } - Variable* private_name_var = - CreatePrivateNameVariable(scope, GetVariableMode(kind), property_name); + Variable* private_name_var = CreatePrivateNameVariable( + scope, GetVariableMode(kind), + is_static ? IsStaticFlag::kStatic : IsStaticFlag::kNotStatic, + property_name); int pos = property->value()->position(); if (pos == kNoSourcePosition) { pos = property->key()->position(); } private_name_var->set_initializer_position(pos); property->set_private_name_var(private_name_var); - class_info->properties->Add(property, zone()); + class_info->private_members->Add(property, zone()); } // This method declares a property of the given class. It updates the @@ -2845,7 +2886,7 @@ void Parser::DeclarePublicClassMethod(const AstRawString* class_name, return; } - class_info->properties->Add(property, zone()); + class_info->public_members->Add(property, zone()); } FunctionLiteral* Parser::CreateInitializerFunction( @@ -2894,8 +2935,8 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope, } if (name != nullptr) { - DCHECK_NOT_NULL(class_info->variable); - class_info->variable->set_initializer_position(end_pos); + DCHECK_NOT_NULL(block_scope->class_variable()); + block_scope->class_variable()->set_initializer_position(end_pos); } FunctionLiteral* static_fields_initializer = nullptr; @@ -2916,11 +2957,12 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope, } ClassLiteral* class_literal = factory()->NewClassLiteral( - block_scope, class_info->variable, class_info->extends, - class_info->constructor, class_info->properties, + block_scope, class_info->extends, class_info->constructor, + class_info->public_members, class_info->private_members, static_fields_initializer, instance_members_initializer_function, pos, end_pos, class_info->has_name_static_property, - class_info->has_static_computed_names, class_info->is_anonymous); + class_info->has_static_computed_names, class_info->is_anonymous, + class_info->has_private_methods); AddFunctionForNameInference(class_info->constructor); return class_literal; @@ -3241,7 +3283,7 @@ void Parser::RewriteAsyncFunctionBody(ScopedPtrList<Statement>* body, // }) // } - block->statements()->Add(factory()->NewAsyncReturnStatement( + block->statements()->Add(factory()->NewSyntheticAsyncReturnStatement( return_value, return_value->position()), zone()); block = BuildRejectPromiseOnException(block); diff --git a/chromium/v8/src/parsing/parser.h b/chromium/v8/src/parsing/parser.h index 2bd555e8814..6f570b8751b 100644 --- a/chromium/v8/src/parsing/parser.h +++ b/chromium/v8/src/parsing/parser.h @@ -173,8 +173,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) { parsing::ReportErrorsAndStatisticsMode stats_mode); bool AllowsLazyParsingWithoutUnresolvedVariables() const { - return scope()->AllowsLazyParsingWithoutUnresolvedVariables( - original_scope_); + return !MaybeParsingArrowhead() && + scope()->AllowsLazyParsingWithoutUnresolvedVariables( + original_scope_); } bool parse_lazily() const { return mode_ == PARSE_LAZILY; } @@ -301,6 +302,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) { ZonePtrList<const AstRawString>* names); Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name); Variable* CreatePrivateNameVariable(ClassScope* scope, VariableMode mode, + IsStaticFlag is_static_flag, const AstRawString* name); FunctionLiteral* CreateInitializerFunction( const char* name, DeclarationScope* scope, @@ -314,8 +316,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) { Statement* DeclareClass(const AstRawString* variable_name, Expression* value, ZonePtrList<const AstRawString>* names, int class_token_pos, int end_pos); - void DeclareClassVariable(const AstRawString* name, ClassInfo* class_info, - int class_token_pos); + void DeclareClassVariable(ClassScope* scope, const AstRawString* name, + ClassInfo* class_info, int class_token_pos); void DeclareClassBrandVariable(ClassScope* scope, ClassInfo* class_info, int class_token_pos); void DeclarePrivateClassMember(ClassScope* scope, @@ -779,12 +781,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) { Expression* ExpressionFromLiteral(Token::Value token, int pos); - V8_INLINE VariableProxy* ExpressionFromPrivateName(ClassScope* class_scope, - const AstRawString* name, - int start_position) { + V8_INLINE VariableProxy* ExpressionFromPrivateName( + PrivateNameScopeIterator* private_name_scope, const AstRawString* name, + int start_position) { VariableProxy* proxy = factory()->ast_node_factory()->NewVariableProxy( name, NORMAL_VARIABLE, start_position); - class_scope->AddUnresolvedPrivateName(proxy); + private_name_scope->AddUnresolvedPrivateName(proxy); return proxy; } diff --git a/chromium/v8/src/parsing/preparse-data-impl.h b/chromium/v8/src/parsing/preparse-data-impl.h index 0bc8027266c..a993fdf93fd 100644 --- a/chromium/v8/src/parsing/preparse-data-impl.h +++ b/chromium/v8/src/parsing/preparse-data-impl.h @@ -7,6 +7,8 @@ #include "src/parsing/preparse-data.h" +#include <memory> + #include "src/common/assert-scope.h" namespace v8 { @@ -155,16 +157,18 @@ class BaseConsumedPreparseData : public ConsumedPreparseData { int* function_length, int* num_inner_functions, bool* uses_super_property, LanguageMode* language_mode) final; - void RestoreScopeAllocationData(DeclarationScope* scope) final; + void RestoreScopeAllocationData(DeclarationScope* scope, + AstValueFactory* ast_value_factory) final; #ifdef DEBUG bool VerifyDataStart(); #endif private: - void RestoreDataForScope(Scope* scope); + void RestoreDataForScope(Scope* scope, AstValueFactory* ast_value_factory); void RestoreDataForVariable(Variable* var); - void RestoreDataForInnerScopes(Scope* scope); + void RestoreDataForInnerScopes(Scope* scope, + AstValueFactory* ast_value_factory); std::unique_ptr<ByteData> scope_data_; // When consuming the data, these indexes point to the data we're going to diff --git a/chromium/v8/src/parsing/preparse-data.cc b/chromium/v8/src/parsing/preparse-data.cc index 8743732ea2c..460ae65a306 100644 --- a/chromium/v8/src/parsing/preparse-data.cc +++ b/chromium/v8/src/parsing/preparse-data.cc @@ -24,6 +24,10 @@ namespace { using ScopeSloppyEvalCanExtendVarsField = BitField8<bool, 0, 1>; using InnerScopeCallsEvalField = ScopeSloppyEvalCanExtendVarsField::Next<bool, 1>; +using NeedsPrivateNameContextChainRecalcField = + InnerScopeCallsEvalField::Next<bool, 1>; +using ShouldSaveClassVariableIndexField = + NeedsPrivateNameContextChainRecalcField::Next<bool, 1>; using VariableMaybeAssignedField = BitField8<bool, 0, 1>; using VariableContextAllocatedField = VariableMaybeAssignedField::Next<bool, 1>; @@ -322,7 +326,7 @@ void PreparseDataBuilder::SaveScopeAllocationData(DeclarationScope* scope, if (SaveDataForSkippableFunction(builder)) num_inner_with_data_++; } - // Don't save imcoplete scope information when bailed out. + // Don't save incomplete scope information when bailed out. if (!bailed_out_) { #ifdef DEBUG // function data items, kSkippableMinFunctionDataSize each. @@ -352,13 +356,20 @@ void PreparseDataBuilder::SaveDataForScope(Scope* scope) { byte_data_.WriteUint8(scope->scope_type()); #endif - uint8_t eval = + uint8_t eval_and_private_recalc = ScopeSloppyEvalCanExtendVarsField::encode( scope->is_declaration_scope() && scope->AsDeclarationScope()->sloppy_eval_can_extend_vars()) | - InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval()); + InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval()) | + NeedsPrivateNameContextChainRecalcField::encode( + scope->is_function_scope() && + scope->AsDeclarationScope() + ->needs_private_name_context_chain_recalc()) | + ShouldSaveClassVariableIndexField::encode( + scope->is_class_scope() && + scope->AsClassScope()->should_save_class_variable_index()); byte_data_.Reserve(kUint8Size); - byte_data_.WriteUint8(eval); + byte_data_.WriteUint8(eval_and_private_recalc); if (scope->is_function_scope()) { Variable* function = scope->AsDeclarationScope()->function_var(); @@ -562,7 +573,7 @@ BaseConsumedPreparseData<Data>::GetDataForSkippableFunction( template <class Data> void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData( - DeclarationScope* scope) { + DeclarationScope* scope, AstValueFactory* ast_value_factory) { DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE); typename ByteData::ReadingScope reading_scope(this); @@ -577,14 +588,15 @@ void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData( DCHECK_EQ(end_position_from_data, scope->end_position()); #endif - RestoreDataForScope(scope); + RestoreDataForScope(scope, ast_value_factory); // Check that we consumed all scope data. DCHECK_EQ(scope_data_->RemainingBytes(), 0); } template <typename Data> -void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) { +void BaseConsumedPreparseData<Data>::RestoreDataForScope( + Scope* scope, AstValueFactory* ast_value_factory) { if (scope->is_declaration_scope() && scope->AsDeclarationScope()->is_skipped_function()) { return; @@ -599,20 +611,48 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(Scope* scope) { DCHECK_EQ(scope_data_->ReadUint8(), scope->scope_type()); CHECK(scope_data_->HasRemainingBytes(ByteData::kUint8Size)); - uint32_t eval = scope_data_->ReadUint8(); - if (ScopeSloppyEvalCanExtendVarsField::decode(eval)) scope->RecordEvalCall(); - if (InnerScopeCallsEvalField::decode(eval)) scope->RecordInnerScopeEvalCall(); + uint32_t scope_data_flags = scope_data_->ReadUint8(); + if (ScopeSloppyEvalCanExtendVarsField::decode(scope_data_flags)) { + scope->RecordEvalCall(); + } + if (InnerScopeCallsEvalField::decode(scope_data_flags)) { + scope->RecordInnerScopeEvalCall(); + } + if (NeedsPrivateNameContextChainRecalcField::decode(scope_data_flags)) { + scope->AsDeclarationScope()->RecordNeedsPrivateNameContextChainRecalc(); + } + if (ShouldSaveClassVariableIndexField::decode(scope_data_flags)) { + Variable* var; + // An anonymous class whose class variable needs to be saved do not + // have the class variable created during reparse since we skip parsing + // the inner scopes that contain potential access to static private + // methods. So create it now. + if (scope->AsClassScope()->is_anonymous_class()) { + var = scope->AsClassScope()->DeclareClassVariable( + ast_value_factory, nullptr, kNoSourcePosition); + AstNodeFactory factory(ast_value_factory, ast_value_factory->zone()); + Declaration* declaration = + factory.NewVariableDeclaration(kNoSourcePosition); + scope->declarations()->Add(declaration); + declaration->set_var(var); + } else { + var = scope->AsClassScope()->class_variable(); + DCHECK_NOT_NULL(var); + } + var->set_is_used(); + var->ForceContextAllocation(); + scope->AsClassScope()->set_should_save_class_variable_index(); + } if (scope->is_function_scope()) { Variable* function = scope->AsDeclarationScope()->function_var(); if (function != nullptr) RestoreDataForVariable(function); } - for (Variable* var : *scope->locals()) { if (IsSerializableVariableMode(var->mode())) RestoreDataForVariable(var); } - RestoreDataForInnerScopes(scope); + RestoreDataForInnerScopes(scope, ast_value_factory); } template <typename Data> @@ -651,10 +691,11 @@ void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) { } template <typename Data> -void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes(Scope* scope) { +void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes( + Scope* scope, AstValueFactory* ast_value_factory) { for (Scope* inner = scope->inner_scope(); inner != nullptr; inner = inner->sibling()) { - RestoreDataForScope(inner); + RestoreDataForScope(inner, ast_value_factory); } } @@ -731,13 +772,13 @@ ProducedPreparseData* ZoneConsumedPreparseData::GetChildData(Zone* zone, std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For( Isolate* isolate, Handle<PreparseData> data) { DCHECK(!data.is_null()); - return base::make_unique<OnHeapConsumedPreparseData>(isolate, data); + return std::make_unique<OnHeapConsumedPreparseData>(isolate, data); } std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For( Zone* zone, ZonePreparseData* data) { if (data == nullptr) return {}; - return base::make_unique<ZoneConsumedPreparseData>(zone, data); + return std::make_unique<ZoneConsumedPreparseData>(zone, data); } } // namespace internal diff --git a/chromium/v8/src/parsing/preparse-data.h b/chromium/v8/src/parsing/preparse-data.h index 613f13bc82e..581adfa1d5b 100644 --- a/chromium/v8/src/parsing/preparse-data.h +++ b/chromium/v8/src/parsing/preparse-data.h @@ -5,6 +5,8 @@ #ifndef V8_PARSING_PREPARSE_DATA_H_ #define V8_PARSING_PREPARSE_DATA_H_ +#include <memory> + #include "src/common/globals.h" #include "src/handles/handles.h" #include "src/handles/maybe-handles.h" @@ -22,6 +24,7 @@ class Parser; class PreParser; class PreparseData; class ZonePreparseData; +class AstValueFactory; /* @@ -286,7 +289,8 @@ class ConsumedPreparseData { // Restores the information needed for allocating the Scope's (and its // subscopes') variables. - virtual void RestoreScopeAllocationData(DeclarationScope* scope) = 0; + virtual void RestoreScopeAllocationData( + DeclarationScope* scope, AstValueFactory* ast_value_factory) = 0; protected: ConsumedPreparseData() = default; diff --git a/chromium/v8/src/parsing/preparser.h b/chromium/v8/src/parsing/preparser.h index b4d66d726fd..adc3d09cac1 100644 --- a/chromium/v8/src/parsing/preparser.h +++ b/chromium/v8/src/parsing/preparser.h @@ -1108,9 +1108,10 @@ class PreParser : public ParserBase<PreParser> { Variable* DeclarePrivateVariableName(const AstRawString* name, ClassScope* scope, VariableMode mode, + IsStaticFlag is_static_flag, bool* was_added) { DCHECK(IsConstVariableMode(mode)); - return scope->DeclarePrivateName(name, mode, was_added); + return scope->DeclarePrivateName(name, mode, is_static_flag, was_added); } Variable* DeclareVariableName(const AstRawString* name, VariableMode mode, @@ -1226,14 +1227,15 @@ class PreParser : public ParserBase<PreParser> { &was_added); return PreParserStatement::Default(); } - V8_INLINE void DeclareClassVariable(const PreParserIdentifier& name, + V8_INLINE void DeclareClassVariable(ClassScope* scope, + const PreParserIdentifier& name, ClassInfo* class_info, int class_token_pos) { - if (!IsNull(name)) { - bool was_added; - DeclareVariableName(name.string_, VariableMode::kConst, scope(), - &was_added); - } + DCHECK_IMPLIES(IsNull(name), class_info->is_anonymous); + // Declare a special class variable for anonymous classes with the dot + // if we need to save it for static private method access. + scope->DeclareClassVariable(ast_value_factory(), name.string_, + class_token_pos); } V8_INLINE void DeclarePublicClassMethod(const PreParserIdentifier& class_name, const PreParserExpression& property, @@ -1258,8 +1260,10 @@ class PreParser : public ParserBase<PreParser> { bool is_static, ClassInfo* class_info) { bool was_added; - DeclarePrivateVariableName(property_name.string_, scope, - GetVariableMode(kind), &was_added); + DeclarePrivateVariableName( + property_name.string_, scope, GetVariableMode(kind), + is_static ? IsStaticFlag::kStatic : IsStaticFlag::kNotStatic, + &was_added); if (!was_added) { Scanner::Location loc(property.position(), property.position() + 1); ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, @@ -1591,12 +1595,12 @@ class PreParser : public ParserBase<PreParser> { return PreParserExpression::StringLiteral(); } - PreParserExpression ExpressionFromPrivateName(ClassScope* class_scope, - const PreParserIdentifier& name, - int start_position) { + PreParserExpression ExpressionFromPrivateName( + PrivateNameScopeIterator* private_name_scope, + const PreParserIdentifier& name, int start_position) { VariableProxy* proxy = factory()->ast_node_factory()->NewVariableProxy( name.string_, NORMAL_VARIABLE, start_position); - class_scope->AddUnresolvedPrivateName(proxy); + private_name_scope->AddUnresolvedPrivateName(proxy); return PreParserExpression::FromIdentifier(name); } @@ -1636,11 +1640,11 @@ class PreParser : public ParserBase<PreParser> { return PreParserStatement::Jump(); } - V8_INLINE void AddFormalParameter( - PreParserFormalParameters* parameters, - PreParserExpression& pattern, // NOLINT(runtime/references) - const PreParserExpression& initializer, int initializer_end_position, - bool is_rest) { + V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters, + const PreParserExpression& pattern, + const PreParserExpression& initializer, + int initializer_end_position, + bool is_rest) { DeclarationScope* scope = parameters->scope; scope->RecordParameter(is_rest); parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest); diff --git a/chromium/v8/src/parsing/scanner-character-streams.cc b/chromium/v8/src/parsing/scanner-character-streams.cc index 0cd295fd292..49c7e1f7933 100644 --- a/chromium/v8/src/parsing/scanner-character-streams.cc +++ b/chromium/v8/src/parsing/scanner-character-streams.cc @@ -265,7 +265,7 @@ class BufferedCharacterStream : public Utf16CharacterStream { } size_t length = Min(kBufferSize, range.length()); - i::CopyCharsUnsigned(buffer_, range.start, length); + i::CopyChars(buffer_, range.start, length); buffer_end_ = &buffer_[length]; return true; } diff --git a/chromium/v8/src/parsing/scanner-character-streams.h b/chromium/v8/src/parsing/scanner-character-streams.h index 4b855674803..c4c7064013f 100644 --- a/chromium/v8/src/parsing/scanner-character-streams.h +++ b/chromium/v8/src/parsing/scanner-character-streams.h @@ -5,6 +5,8 @@ #ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_ #define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_ +#include <memory> + #include "include/v8.h" // for v8::ScriptCompiler #include "src/common/globals.h" diff --git a/chromium/v8/src/parsing/scanner.h b/chromium/v8/src/parsing/scanner.h index c40d8f4ba39..d9216f222a0 100644 --- a/chromium/v8/src/parsing/scanner.h +++ b/chromium/v8/src/parsing/scanner.h @@ -8,6 +8,7 @@ #define V8_PARSING_SCANNER_H_ #include <algorithm> +#include <memory> #include "src/base/logging.h" #include "src/common/globals.h" @@ -443,7 +444,8 @@ class V8_EXPORT_PRIVATE Scanner { #ifdef DEBUG bool CanAccessLiteral() const { return token == Token::PRIVATE_NAME || token == Token::ILLEGAL || - token == Token::UNINITIALIZED || token == Token::REGEXP_LITERAL || + token == Token::ESCAPED_KEYWORD || token == Token::UNINITIALIZED || + token == Token::REGEXP_LITERAL || IsInRange(token, Token::NUMBER, Token::STRING) || Token::IsAnyIdentifier(token) || Token::IsKeyword(token) || IsInRange(token, Token::TEMPLATE_SPAN, Token::TEMPLATE_TAIL); @@ -585,15 +587,18 @@ class V8_EXPORT_PRIVATE Scanner { // token as a one-byte literal. E.g. Token::FUNCTION pretends to have a // literal "function". Vector<const uint8_t> literal_one_byte_string() const { - DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token)); + DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) || + current().token == Token::ESCAPED_KEYWORD); return current().literal_chars.one_byte_literal(); } Vector<const uint16_t> literal_two_byte_string() const { - DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token)); + DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) || + current().token == Token::ESCAPED_KEYWORD); return current().literal_chars.two_byte_literal(); } bool is_literal_one_byte() const { - DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token)); + DCHECK(current().CanAccessLiteral() || Token::IsKeyword(current().token) || + current().token == Token::ESCAPED_KEYWORD); return current().literal_chars.is_one_byte(); } // Returns the literal string for the next token (the token that diff --git a/chromium/v8/src/parsing/token.cc b/chromium/v8/src/parsing/token.cc index 4dbae2d3f97..ec4b623775a 100644 --- a/chromium/v8/src/parsing/token.cc +++ b/chromium/v8/src/parsing/token.cc @@ -34,7 +34,8 @@ const int8_t Token::precedence_[2][NUM_TOKENS] = {{TOKEN_LIST(T1, T1)}, #undef T2 #undef T1 -#define KT(a, b, c) IsPropertyNameBits::encode(Token::IsAnyIdentifier(a)), +#define KT(a, b, c) \ + IsPropertyNameBits::encode(Token::IsAnyIdentifier(a) || a == ESCAPED_KEYWORD), #define KK(a, b, c) \ IsKeywordBits::encode(true) | IsPropertyNameBits::encode(true), const uint8_t Token::token_flags[] = {TOKEN_LIST(KT, KK)}; diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc index 75b6aa7b77e..42e72207020 100644 --- a/chromium/v8/src/profiler/heap-snapshot-generator.cc +++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc @@ -1306,8 +1306,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj, Isolate* isolate = js_obj.GetIsolate(); if (js_obj.HasFastProperties()) { DescriptorArray descs = js_obj.map().instance_descriptors(); - int real_size = js_obj.map().NumberOfOwnDescriptors(); - for (int i = 0; i < real_size; i++) { + for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) { PropertyDetails details = descs.GetDetails(i); switch (details.location()) { case kField: { diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.h b/chromium/v8/src/profiler/heap-snapshot-generator.h index 360ed1f0092..e6c72ffcf99 100644 --- a/chromium/v8/src/profiler/heap-snapshot-generator.h +++ b/chromium/v8/src/profiler/heap-snapshot-generator.h @@ -6,6 +6,7 @@ #define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_ #include <deque> +#include <memory> #include <unordered_map> #include <unordered_set> #include <vector> diff --git a/chromium/v8/src/profiler/profile-generator-inl.h b/chromium/v8/src/profiler/profile-generator-inl.h index bb5ef0da5b7..e3dc193db27 100644 --- a/chromium/v8/src/profiler/profile-generator-inl.h +++ b/chromium/v8/src/profiler/profile-generator-inl.h @@ -7,6 +7,8 @@ #include "src/profiler/profile-generator.h" +#include <memory> + namespace v8 { namespace internal { diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc index f5f71846136..c8fe890b583 100644 --- a/chromium/v8/src/profiler/profile-generator.cc +++ b/chromium/v8/src/profiler/profile-generator.cc @@ -517,7 +517,7 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title, DisallowHeapAllocation no_gc; i::Address raw_filter_context = reinterpret_cast<i::Address>(options_.raw_filter_context()); - context_filter_ = base::make_unique<ContextFilter>(raw_filter_context); + context_filter_ = std::make_unique<ContextFilter>(raw_filter_context); } } diff --git a/chromium/v8/src/profiler/profiler-listener.cc b/chromium/v8/src/profiler/profiler-listener.cc index b00c1f5cfd7..68b3a19912f 100644 --- a/chromium/v8/src/profiler/profiler-listener.cc +++ b/chromium/v8/src/profiler/profiler-listener.cc @@ -28,6 +28,7 @@ ProfilerListener::ProfilerListener(Isolate* isolate, ProfilerListener::~ProfilerListener() = default; void ProfilerListener::CallbackEvent(Name name, Address entry_point) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = entry_point; @@ -38,6 +39,7 @@ void ProfilerListener::CallbackEvent(Name name, Address entry_point) { void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, const char* name) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = code.InstructionStart(); @@ -51,6 +53,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, Name name) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = code.InstructionStart(); @@ -66,6 +69,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, AbstractCode code, SharedFunctionInfo shared, Name script_name) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = code.InstructionStart(); @@ -96,43 +100,51 @@ CodeEntry* GetOrInsertCachedEntry( } // namespace void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, - AbstractCode abstract_code, - SharedFunctionInfo shared, - Name script_name, int line, int column) { + AbstractCode abstract_code_unsafe, + SharedFunctionInfo shared_unsafe, + Name script_name_unsafe, int line, + int column) { + HandleScope scope(isolate_); + Handle<AbstractCode> abstract_code_handle = + handle(abstract_code_unsafe, isolate_); + Handle<SharedFunctionInfo> shared_handle = handle(shared_unsafe, isolate_); + Handle<Name> script_name_handle = handle(script_name_unsafe, isolate_); + CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; - rec->instruction_start = abstract_code.InstructionStart(); + rec->instruction_start = abstract_code_handle->InstructionStart(); std::unique_ptr<SourcePositionTable> line_table; std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks; std::unordered_set<std::unique_ptr<CodeEntry>, CodeEntry::Hasher, CodeEntry::Equals> cached_inline_entries; bool is_shared_cross_origin = false; - if (shared.script().IsScript()) { - Script script = Script::cast(shared.script()); + if (shared_handle->script().IsScript()) { + Handle<Script> script = + handle(Script::cast(shared_handle->script()), isolate_); line_table.reset(new SourcePositionTable()); - HandleScope scope(isolate_); - is_shared_cross_origin = script.origin_options().IsSharedCrossOrigin(); + is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin(); // Add each position to the source position table and store inlining stacks // for inline positions. We store almost the same information in the // profiler as is stored on the code object, except that we transform source // positions to line numbers here, because we only care about attributing // ticks to a given line. - for (SourcePositionTableIterator it(abstract_code.source_position_table()); + for (SourcePositionTableIterator it( + abstract_code_handle->source_position_table()); !it.done(); it.Advance()) { int position = it.source_position().ScriptOffset(); int inlining_id = it.source_position().InliningId(); if (inlining_id == SourcePosition::kNotInlined) { - int line_number = script.GetLineNumber(position) + 1; + int line_number = script->GetLineNumber(position) + 1; line_table->SetPosition(it.code_offset(), line_number, inlining_id); } else { - DCHECK(abstract_code.IsCode()); - Code code = abstract_code.GetCode(); + DCHECK(abstract_code_handle->IsCode()); + Handle<Code> code = handle(abstract_code_handle->GetCode(), isolate_); std::vector<SourcePositionInfo> stack = - it.source_position().InliningStack(handle(code, isolate_)); + it.source_position().InliningStack(code); DCHECK(!stack.empty()); // When we have an inlining id and we are doing cross-script inlining, @@ -165,11 +177,10 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, SourcePosition(pos_info.shared->StartPosition()), pos_info.shared); - std::unique_ptr<CodeEntry> inline_entry = - base::make_unique<CodeEntry>( - tag, GetFunctionName(*pos_info.shared), resource_name, - start_pos_info.line + 1, start_pos_info.column + 1, nullptr, - code.InstructionStart(), inline_is_shared_cross_origin); + std::unique_ptr<CodeEntry> inline_entry = std::make_unique<CodeEntry>( + tag, GetFunctionName(*pos_info.shared), resource_name, + start_pos_info.line + 1, start_pos_info.column + 1, nullptr, + code->InstructionStart(), inline_is_shared_cross_origin); inline_entry->FillFunctionInfo(*pos_info.shared); // Create a canonical CodeEntry for each inlined frame and then re-use @@ -184,24 +195,25 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, } } } - rec->entry = - new CodeEntry(tag, GetFunctionName(shared), - GetName(InferScriptName(script_name, shared)), line, column, - std::move(line_table), abstract_code.InstructionStart(), - is_shared_cross_origin); + rec->entry = new CodeEntry( + tag, GetFunctionName(*shared_handle), + GetName(InferScriptName(*script_name_handle, *shared_handle)), line, + column, std::move(line_table), abstract_code_handle->InstructionStart(), + is_shared_cross_origin); if (!inline_stacks.empty()) { rec->entry->SetInlineStacks(std::move(cached_inline_entries), std::move(inline_stacks)); } - rec->entry->FillFunctionInfo(shared); - rec->instruction_size = abstract_code.InstructionSize(); + rec->entry->FillFunctionInfo(*shared_handle); + rec->instruction_size = abstract_code_handle->InstructionSize(); DispatchCodeEvent(evt_rec); } void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, const wasm::WasmCode* code, wasm::WasmName name) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = code->instruction_start(); @@ -214,6 +226,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag, } void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE); CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_; rec->from_instruction_start = from.InstructionStart(); @@ -223,6 +236,7 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) { void ProfilerListener::CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT); CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_; rec->instruction_start = code.InstructionStart(); @@ -232,6 +246,7 @@ void ProfilerListener::CodeDisableOptEvent(AbstractCode code, void ProfilerListener::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc, int fp_to_sp_delta) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT); CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_; Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc); @@ -248,6 +263,7 @@ void ProfilerListener::CodeDeoptEvent(Code code, DeoptimizeKind kind, } void ProfilerListener::GetterCallbackEvent(Name name, Address entry_point) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = entry_point; @@ -258,6 +274,7 @@ void ProfilerListener::GetterCallbackEvent(Name name, Address entry_point) { } void ProfilerListener::RegExpCodeCreateEvent(AbstractCode code, String source) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = code.InstructionStart(); @@ -270,6 +287,7 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode code, String source) { } void ProfilerListener::SetterCallbackEvent(Name name, Address entry_point) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; rec->instruction_start = entry_point; @@ -280,6 +298,7 @@ void ProfilerListener::SetterCallbackEvent(Name name, Address entry_point) { } void ProfilerListener::NativeContextMoveEvent(Address from, Address to) { + DisallowHeapAllocation no_gc; CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE); evt_rec.NativeContextMoveEventRecord_.from_address = from; evt_rec.NativeContextMoveEventRecord_.to_address = to; @@ -287,6 +306,7 @@ void ProfilerListener::NativeContextMoveEvent(Address from, Address to) { } Name ProfilerListener::InferScriptName(Name name, SharedFunctionInfo info) { + DisallowHeapAllocation no_gc; if (name.IsString() && String::cast(name).length()) return name; if (!info.script().IsScript()) return name; Object source_url = Script::cast(info.script()).source_url(); @@ -305,8 +325,10 @@ const char* ProfilerListener::GetFunctionName(SharedFunctionInfo shared) { } } -void ProfilerListener::AttachDeoptInlinedFrames(Code code, +void ProfilerListener::AttachDeoptInlinedFrames(Code code_unsafe, CodeDeoptEventRecord* rec) { + HandleScope scope(isolate_); + Handle<Code> code_handle = handle(code_unsafe, isolate_); int deopt_id = rec->deopt_id; SourcePosition last_position = SourcePosition::Unknown(); int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID) | @@ -316,7 +338,7 @@ void ProfilerListener::AttachDeoptInlinedFrames(Code code, rec->deopt_frames = nullptr; rec->deopt_frame_count = 0; - for (RelocIterator it(code, mask); !it.done(); it.next()) { + for (RelocIterator it(*code_handle, mask); !it.done(); it.next()) { RelocInfo* info = it.rinfo(); if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) { int script_offset = static_cast<int>(info->data()); @@ -335,7 +357,7 @@ void ProfilerListener::AttachDeoptInlinedFrames(Code code, // scope limits their lifetime. HandleScope scope(isolate_); std::vector<SourcePositionInfo> stack = - last_position.InliningStack(handle(code, isolate_)); + last_position.InliningStack(code_handle); CpuProfileDeoptFrame* deopt_frames = new CpuProfileDeoptFrame[stack.size()]; diff --git a/chromium/v8/src/profiler/sampling-heap-profiler.cc b/chromium/v8/src/profiler/sampling-heap-profiler.cc index de19d39eba6..f5aa1dc3a05 100644 --- a/chromium/v8/src/profiler/sampling-heap-profiler.cc +++ b/chromium/v8/src/profiler/sampling-heap-profiler.cc @@ -9,7 +9,6 @@ #include "src/api/api-inl.h" #include "src/base/ieee754.h" -#include "src/base/template-utils.h" #include "src/base/utils/random-number-generator.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate.h" @@ -89,7 +88,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) { AllocationNode* node = AddStack(); node->allocations_[size]++; auto sample = - base::make_unique<Sample>(size, node, loc, this, next_sample_id()); + std::make_unique<Sample>(size, node, loc, this, next_sample_id()); sample->global.SetWeak(sample.get(), OnWeakCallback, WeakCallbackType::kParameter); samples_.emplace(sample.get(), std::move(sample)); @@ -126,7 +125,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode( DCHECK_EQ(strcmp(child->name_, name), 0); return child; } - auto new_child = base::make_unique<AllocationNode>( + auto new_child = std::make_unique<AllocationNode>( parent, name, script_id, start_position, next_node_id()); return parent->AddChildNode(id, std::move(new_child)); } diff --git a/chromium/v8/src/profiler/tracing-cpu-profiler.h b/chromium/v8/src/profiler/tracing-cpu-profiler.h index d5888f54a35..7a8fabe9581 100644 --- a/chromium/v8/src/profiler/tracing-cpu-profiler.h +++ b/chromium/v8/src/profiler/tracing-cpu-profiler.h @@ -5,6 +5,8 @@ #ifndef V8_PROFILER_TRACING_CPU_PROFILER_H_ #define V8_PROFILER_TRACING_CPU_PROFILER_H_ +#include <memory> + #include "include/v8-platform.h" #include "src/base/atomic-utils.h" #include "src/base/macros.h" diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc index 9e00063487f..62a055e2a20 100644 --- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc +++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc @@ -170,8 +170,11 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) { } case CACHED_MSW: { Register to_advance = GetCachedRegister(reg); - __ Add(to_advance, to_advance, - static_cast<int64_t>(by) << kWRegSizeInBits); + // Sign-extend to int64, shift as uint64, cast back to int64. + __ Add( + to_advance, to_advance, + static_cast<int64_t>(static_cast<uint64_t>(static_cast<int64_t>(by)) + << kWRegSizeInBits)); break; } default: diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc index 13b5c85605e..8babb204dd1 100644 --- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc +++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc @@ -37,7 +37,10 @@ namespace internal { * The remaining registers are free for computations. * Each call to a public method should retain this convention. * - * The stack will have the following structure: + * The stack will have the following structure + * - fp[44] Address regexp (address of the JSRegExp object; unused in + * native code, passed to match signature of + * the interpreter): * - fp[40] Isolate* isolate (address of the current isolate) * - fp[36] lr save area (currently unused) * - fp[32] backchain (currently unused) @@ -83,7 +86,8 @@ namespace internal { * int num_capture_registers, * byte* stack_area_base, * bool direct_call = false, - * Isolate* isolate); + * Isolate* isolate, + * Address regexp); * The call is performed by NativeRegExpMacroAssembler::Execute() * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. */ diff --git a/chromium/v8/src/regexp/regexp-ast.h b/chromium/v8/src/regexp/regexp-ast.h index aab67cad154..3de29512ea1 100644 --- a/chromium/v8/src/regexp/regexp-ast.h +++ b/chromium/v8/src/regexp/regexp-ast.h @@ -477,7 +477,7 @@ class RegExpCapture final : public RegExpTree { int max_match() override { return body_->max_match(); } RegExpTree* body() { return body_; } void set_body(RegExpTree* body) { body_ = body; } - int index() { return index_; } + int index() const { return index_; } const ZoneVector<uc16>* name() const { return name_; } void set_name(const ZoneVector<uc16>* name) { name_ = name; } static int StartRegister(int index) { return index * 2; } diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.cc b/chromium/v8/src/regexp/regexp-bytecode-generator.cc index 85b144438ec..0dcc288d3cf 100644 --- a/chromium/v8/src/regexp/regexp-bytecode-generator.cc +++ b/chromium/v8/src/regexp/regexp-bytecode-generator.cc @@ -7,6 +7,7 @@ #include "src/ast/ast.h" #include "src/objects/objects-inl.h" #include "src/regexp/regexp-bytecode-generator-inl.h" +#include "src/regexp/regexp-bytecode-peephole.h" #include "src/regexp/regexp-bytecodes.h" #include "src/regexp/regexp-macro-assembler.h" @@ -18,6 +19,7 @@ RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone) buffer_(Vector<byte>::New(1024)), pc_(0), advance_current_end_(kInvalidPC), + jump_edges_(zone), isolate_(isolate) {} RegExpBytecodeGenerator::~RegExpBytecodeGenerator() { @@ -39,6 +41,7 @@ void RegExpBytecodeGenerator::Bind(Label* l) { int fixup = pos; pos = *reinterpret_cast<int32_t*>(buffer_.begin() + fixup); *reinterpret_cast<uint32_t*>(buffer_.begin() + fixup) = pc_; + jump_edges_.emplace(fixup, pc_); } } l->bind_to(pc_); @@ -46,16 +49,17 @@ void RegExpBytecodeGenerator::Bind(Label* l) { void RegExpBytecodeGenerator::EmitOrLink(Label* l) { if (l == nullptr) l = &backtrack_; + int pos = 0; if (l->is_bound()) { - Emit32(l->pos()); + pos = l->pos(); + jump_edges_.emplace(pc_, pos); } else { - int pos = 0; if (l->is_linked()) { pos = l->pos(); } l->link_to(pc_); - Emit32(pos); } + Emit32(pos); } void RegExpBytecodeGenerator::PopRegister(int register_index) { @@ -365,8 +369,16 @@ void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index, Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) { Bind(&backtrack_); Emit(BC_POP_BT, 0); - Handle<ByteArray> array = isolate_->factory()->NewByteArray(length()); - Copy(array->GetDataStartAddress()); + + Handle<ByteArray> array; + if (FLAG_regexp_peephole_optimization) { + array = RegExpBytecodePeepholeOptimization::OptimizeBytecode( + isolate_, zone(), source, buffer_.begin(), length(), jump_edges_); + } else { + array = isolate_->factory()->NewByteArray(length()); + Copy(array->GetDataStartAddress()); + } + return array; } diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.h b/chromium/v8/src/regexp/regexp-bytecode-generator.h index 84b7ce361c8..dfcc2ca5f8a 100644 --- a/chromium/v8/src/regexp/regexp-bytecode-generator.h +++ b/chromium/v8/src/regexp/regexp-bytecode-generator.h @@ -100,6 +100,12 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler { int advance_current_offset_; int advance_current_end_; + // Stores jump edges emitted for the bytecode (used by + // RegExpBytecodePeepholeOptimization). + // Key: jump source (offset in buffer_ where jump destination is stored). + // Value: jump destination (offset in buffer_ to jump to). + ZoneUnorderedMap<int, int> jump_edges_; + Isolate* isolate_; static const int kInvalidPC = -1; diff --git a/chromium/v8/src/regexp/regexp-bytecode-peephole.cc b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc new file mode 100644 index 00000000000..8f1f1d95a90 --- /dev/null +++ b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc @@ -0,0 +1,1037 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/regexp/regexp-bytecode-peephole.h" + +#include "src/execution/isolate.h" +#include "src/flags/flags.h" +#include "src/objects/fixed-array.h" +#include "src/objects/objects-inl.h" +#include "src/regexp/regexp-bytecodes.h" +#include "src/utils/memcopy.h" +#include "src/utils/utils.h" +#include "src/zone/zone-containers.h" +#include "src/zone/zone.h" + +namespace v8 { +namespace internal { + +namespace { + +struct BytecodeArgument { + int offset; + int length; + + BytecodeArgument(int offset, int length) : offset(offset), length(length) {} +}; + +struct BytecodeArgumentMapping : BytecodeArgument { + int new_length; + + BytecodeArgumentMapping(int offset, int length, int new_length) + : BytecodeArgument(offset, length), new_length(new_length) {} +}; + +struct BytecodeArgumentCheck : BytecodeArgument { + enum CheckType { kCheckAddress = 0, kCheckValue }; + CheckType type; + int check_offset; + int check_length; + + BytecodeArgumentCheck(int offset, int length, int check_offset) + : BytecodeArgument(offset, length), + type(kCheckAddress), + check_offset(check_offset) {} + BytecodeArgumentCheck(int offset, int length, int check_offset, + int check_length) + : BytecodeArgument(offset, length), + type(kCheckValue), + check_offset(check_offset), + check_length(check_length) {} +}; + +// Trie-Node for storing bytecode sequences we want to optimize. +class BytecodeSequenceNode { + public: + // Dummy bytecode used when we need to store/return a bytecode but it's not a + // valid bytecode in the current context. + static constexpr int kDummyBytecode = -1; + + BytecodeSequenceNode(int bytecode, Zone* zone); + // Adds a new node as child of the current node if it isn't a child already. + BytecodeSequenceNode& FollowedBy(int bytecode); + // Marks the end of a sequence and sets optimized bytecode to replace all + // bytecodes of the sequence with. + BytecodeSequenceNode& ReplaceWith(int bytecode); + // Maps arguments of bytecodes in the sequence to the optimized bytecode. + // Order of invocation determines order of arguments in the optimized + // bytecode. + // Invoking this method is only allowed on nodes that mark the end of a valid + // sequence (i.e. after ReplaceWith()). + // bytecode_index_in_sequence: Zero-based index of the referred bytecode + // within the sequence (e.g. the bytecode passed to CreateSequence() has + // index 0). + // argument_offset: Zero-based offset to the argument within the bytecode + // (e.g. the first argument that's not packed with the bytecode has offset 4). + // argument_byte_length: Length of the argument. + // new_argument_byte_length: Length of the argument in the new bytecode + // (= argument_byte_length if omitted). + BytecodeSequenceNode& MapArgument(int bytecode_index_in_sequence, + int argument_offset, + int argument_byte_length, + int new_argument_byte_length = 0); + // Adds a check to the sequence node making it only a valid sequence when the + // argument of the current bytecode at the specified offset matches the offset + // to check against. + // argument_offset: Zero-based offset to the argument within the bytecode + // (e.g. the first argument that's not packed with the bytecode has offset 4). + // argument_byte_length: Length of the argument. + // check_byte_offset: Zero-based offset relative to the beginning of the + // sequence that needs to match the value given by argument_offset. (e.g. + // check_byte_offset 0 matches the address of the first bytecode in the + // sequence). + BytecodeSequenceNode& IfArgumentEqualsOffset(int argument_offset, + int argument_byte_length, + int check_byte_offset); + // Adds a check to the sequence node making it only a valid sequence when the + // argument of the current bytecode at the specified offset matches the + // argument of another bytecode in the sequence. + // This is similar to IfArgumentEqualsOffset, except that this method matches + // the values of both arguments. + BytecodeSequenceNode& IfArgumentEqualsValueAtOffset( + int argument_offset, int argument_byte_length, + int other_bytecode_index_in_sequence, int other_argument_offset, + int other_argument_byte_length); + // Marks an argument as unused. + // All arguments that are not mapped explicitly have to be marked as unused. + // bytecode_index_in_sequence: Zero-based index of the referred bytecode + // within the sequence (e.g. the bytecode passed to CreateSequence() has + // index 0). + // argument_offset: Zero-based offset to the argument within the bytecode + // (e.g. the first argument that's not packed with the bytecode has offset 4). + // argument_byte_length: Length of the argument. + BytecodeSequenceNode& IgnoreArgument(int bytecode_index_in_sequence, + int argument_offset, + int argument_byte_length); + // Checks if the current node is valid for the sequence. I.e. all conditions + // set by IfArgumentEqualsOffset and IfArgumentEquals are fulfilled by this + // node for the actual bytecode sequence. + bool CheckArguments(const byte* bytecode, int pc); + // Returns whether this node marks the end of a valid sequence (i.e. can be + // replaced with an optimized bytecode). + bool IsSequence() const; + // Returns the length of the sequence in bytes. + int SequenceLength() const; + // Returns the optimized bytecode for the node or kDummyBytecode if it is not + // the end of a valid sequence. + int OptimizedBytecode() const; + // Returns the child of the current node matching the given bytecode or + // nullptr if no such child is found. + BytecodeSequenceNode* Find(int bytecode) const; + // Returns number of arguments mapped to the current node. + // Invoking this method is only allowed on nodes that mark the end of a valid + // sequence (i.e. if IsSequence()) + size_t ArgumentSize() const; + // Returns the argument-mapping of the argument at index. + // Invoking this method is only allowed on nodes that mark the end of a valid + // sequence (i.e. if IsSequence()) + BytecodeArgumentMapping ArgumentMapping(size_t index) const; + // Returns an iterator to begin of ignored arguments. + // Invoking this method is only allowed on nodes that mark the end of a valid + // sequence (i.e. if IsSequence()) + ZoneLinkedList<BytecodeArgument>::iterator ArgumentIgnoredBegin() const; + // Returns an iterator to end of ignored arguments. + // Invoking this method is only allowed on nodes that mark the end of a valid + // sequence (i.e. if IsSequence()) + ZoneLinkedList<BytecodeArgument>::iterator ArgumentIgnoredEnd() const; + // Returns whether the current node has ignored argument or not. + bool HasIgnoredArguments() const; + + private: + // Returns a node in the sequence specified by its index within the sequence. + BytecodeSequenceNode& GetNodeByIndexInSequence(int index_in_sequence); + Zone* zone() const; + + int bytecode_; + int bytecode_replacement_; + int index_in_sequence_; + int start_offset_; + BytecodeSequenceNode* parent_; + ZoneUnorderedMap<int, BytecodeSequenceNode*> children_; + ZoneVector<BytecodeArgumentMapping>* argument_mapping_; + ZoneLinkedList<BytecodeArgumentCheck>* argument_check_; + ZoneLinkedList<BytecodeArgument>* argument_ignored_; + + Zone* zone_; +}; + +class RegExpBytecodePeephole { + public: + RegExpBytecodePeephole(Zone* zone, size_t buffer_size, + const ZoneUnorderedMap<int, int>& jump_edges); + + // Parses bytecode and fills the internal buffer with the potentially + // optimized bytecode. Returns true when optimizations were performed, false + // otherwise. + bool OptimizeBytecode(const byte* bytecode, int length); + // Copies the internal bytecode buffer to another buffer. The caller is + // responsible for allocating/freeing the memory. + void CopyOptimizedBytecode(byte* to_address) const; + int Length() const; + + private: + // Sets up all sequences that are going to be used. + void DefineStandardSequences(); + // Starts a new bytecode sequence. + BytecodeSequenceNode& CreateSequence(int bytecode); + // Checks for optimization candidates at pc and emits optimized bytecode to + // the internal buffer. Returns the length of replaced bytecodes in bytes. + int TryOptimizeSequence(const byte* bytecode, int start_pc); + // Emits optimized bytecode to the internal buffer. start_pc points to the + // start of the sequence in bytecode and last_node is the last + // BytecodeSequenceNode of the matching sequence found. + void EmitOptimization(int start_pc, const byte* bytecode, + const BytecodeSequenceNode& last_node); + // Adds a relative jump source fixup at pos. + // Jump source fixups are used to find offsets in the new bytecode that + // contain jump sources. + void AddJumpSourceFixup(int fixup, int pos); + // Adds a relative jump destination fixup at pos. + // Jump destination fixups are used to find offsets in the new bytecode that + // can be jumped to. + void AddJumpDestinationFixup(int fixup, int pos); + // Sets an absolute jump destination fixup at pos. + void SetJumpDestinationFixup(int fixup, int pos); + // Prepare internal structures used to fixup jumps. + void PrepareJumpStructures(const ZoneUnorderedMap<int, int>& jump_edges); + // Updates all jump targets in the new bytecode. + void FixJumps(); + // Update a single jump. + void FixJump(int jump_source, int jump_destination); + void AddSentinelFixups(int pos); + template <typename T> + void EmitValue(T value); + template <typename T> + void OverwriteValue(int offset, T value); + void CopyRangeToOutput(const byte* orig_bytecode, int start, int length); + void SetRange(byte value, int count); + void EmitArgument(int start_pc, const byte* bytecode, + BytecodeArgumentMapping arg); + int pc() const; + Zone* zone() const; + + ZoneVector<byte> optimized_bytecode_buffer_; + BytecodeSequenceNode* sequences_; + // Jumps used in old bytecode. + // Key: Jump source (offset where destination is stored in old bytecode) + // Value: Destination + ZoneMap<int, int> jump_edges_; + // Jumps used in new bytecode. + // Key: Jump source (offset where destination is stored in new bytecode) + // Value: Destination + ZoneMap<int, int> jump_edges_mapped_; + // Number of times a jump destination is used within the bytecode. + // Key: Jump destination (offset in old bytecode). + // Value: Number of times jump destination is used. + ZoneMap<int, int> jump_usage_counts_; + // Maps offsets in old bytecode to fixups of sources (delta to new bytecode). + // Key: Offset in old bytecode from where the fixup is valid. + // Value: Delta to map jump source from old bytecode to new bytecode in bytes. + ZoneMap<int, int> jump_source_fixups_; + // Maps offsets in old bytecode to fixups of destinations (delta to new + // bytecode). + // Key: Offset in old bytecode from where the fixup is valid. + // Value: Delta to map jump destinations from old bytecode to new bytecode in + // bytes. + ZoneMap<int, int> jump_destination_fixups_; + + Zone* zone_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpBytecodePeephole); +}; + +template <typename T> +T GetValue(const byte* buffer, int pos) { + DCHECK(IsAligned(reinterpret_cast<Address>(buffer + pos), alignof(T))); + return *reinterpret_cast<const T*>(buffer + pos); +} + +int32_t GetArgumentValue(const byte* bytecode, int offset, int length) { + switch (length) { + case 1: + return GetValue<byte>(bytecode, offset); + break; + case 2: + return GetValue<int16_t>(bytecode, offset); + break; + case 4: + return GetValue<int32_t>(bytecode, offset); + break; + default: + UNREACHABLE(); + } +} + +BytecodeSequenceNode::BytecodeSequenceNode(int bytecode, Zone* zone) + : bytecode_(bytecode), + bytecode_replacement_(kDummyBytecode), + index_in_sequence_(0), + start_offset_(0), + parent_(nullptr), + children_(ZoneUnorderedMap<int, BytecodeSequenceNode*>(zone)), + argument_mapping_(new (zone->New(sizeof(*argument_mapping_))) + ZoneVector<BytecodeArgumentMapping>(zone)), + argument_check_(new (zone->New(sizeof(*argument_check_))) + ZoneLinkedList<BytecodeArgumentCheck>(zone)), + argument_ignored_(new (zone->New(sizeof(*argument_ignored_))) + ZoneLinkedList<BytecodeArgument>(zone)), + zone_(zone) {} + +BytecodeSequenceNode& BytecodeSequenceNode::FollowedBy(int bytecode) { + DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount); + + if (children_.find(bytecode) == children_.end()) { + BytecodeSequenceNode* new_node = + new (zone()->New(sizeof(BytecodeSequenceNode))) + BytecodeSequenceNode(bytecode, zone()); + // If node is not the first in the sequence, set offsets and parent. + if (bytecode_ != kDummyBytecode) { + new_node->start_offset_ = start_offset_ + RegExpBytecodeLength(bytecode_); + new_node->index_in_sequence_ = index_in_sequence_ + 1; + new_node->parent_ = this; + } + children_[bytecode] = new_node; + } + + return *children_[bytecode]; +} + +BytecodeSequenceNode& BytecodeSequenceNode::ReplaceWith(int bytecode) { + DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount); + + bytecode_replacement_ = bytecode; + + return *this; +} + +BytecodeSequenceNode& BytecodeSequenceNode::MapArgument( + int bytecode_index_in_sequence, int argument_offset, + int argument_byte_length, int new_argument_byte_length) { + DCHECK(IsSequence()); + DCHECK_LE(bytecode_index_in_sequence, index_in_sequence_); + + BytecodeSequenceNode& ref_node = + GetNodeByIndexInSequence(bytecode_index_in_sequence); + DCHECK_LT(argument_offset, RegExpBytecodeLength(ref_node.bytecode_)); + + int absolute_offset = ref_node.start_offset_ + argument_offset; + if (new_argument_byte_length == 0) { + new_argument_byte_length = argument_byte_length; + } + + argument_mapping_->push_back(BytecodeArgumentMapping{ + absolute_offset, argument_byte_length, new_argument_byte_length}); + + return *this; +} + +BytecodeSequenceNode& BytecodeSequenceNode::IfArgumentEqualsOffset( + int argument_offset, int argument_byte_length, int check_byte_offset) { + DCHECK_LT(argument_offset, RegExpBytecodeLength(bytecode_)); + DCHECK(argument_byte_length == 1 || argument_byte_length == 2 || + argument_byte_length == 4); + + int absolute_offset = start_offset_ + argument_offset; + + argument_check_->push_back(BytecodeArgumentCheck{ + absolute_offset, argument_byte_length, check_byte_offset}); + + return *this; +} + +BytecodeSequenceNode& BytecodeSequenceNode::IfArgumentEqualsValueAtOffset( + int argument_offset, int argument_byte_length, + int other_bytecode_index_in_sequence, int other_argument_offset, + int other_argument_byte_length) { + DCHECK_LT(argument_offset, RegExpBytecodeLength(bytecode_)); + DCHECK_LE(other_bytecode_index_in_sequence, index_in_sequence_); + DCHECK_EQ(argument_byte_length, other_argument_byte_length); + + BytecodeSequenceNode& ref_node = + GetNodeByIndexInSequence(other_bytecode_index_in_sequence); + DCHECK_LT(other_argument_offset, RegExpBytecodeLength(ref_node.bytecode_)); + + int absolute_offset = start_offset_ + argument_offset; + int other_absolute_offset = ref_node.start_offset_ + other_argument_offset; + + argument_check_->push_back( + BytecodeArgumentCheck{absolute_offset, argument_byte_length, + other_absolute_offset, other_argument_byte_length}); + + return *this; +} + +BytecodeSequenceNode& BytecodeSequenceNode::IgnoreArgument( + int bytecode_index_in_sequence, int argument_offset, + int argument_byte_length) { + DCHECK(IsSequence()); + DCHECK_LE(bytecode_index_in_sequence, index_in_sequence_); + + BytecodeSequenceNode& ref_node = + GetNodeByIndexInSequence(bytecode_index_in_sequence); + DCHECK_LT(argument_offset, RegExpBytecodeLength(ref_node.bytecode_)); + + int absolute_offset = ref_node.start_offset_ + argument_offset; + + argument_ignored_->push_back( + BytecodeArgument{absolute_offset, argument_byte_length}); + + return *this; +} + +bool BytecodeSequenceNode::CheckArguments(const byte* bytecode, int pc) { + bool is_valid = true; + for (auto check_iter = argument_check_->begin(); + check_iter != argument_check_->end() && is_valid; check_iter++) { + auto value = + GetArgumentValue(bytecode, pc + check_iter->offset, check_iter->length); + if (check_iter->type == BytecodeArgumentCheck::kCheckAddress) { + is_valid &= value == pc + check_iter->check_offset; + } else if (check_iter->type == BytecodeArgumentCheck::kCheckValue) { + auto other_value = GetArgumentValue( + bytecode, pc + check_iter->check_offset, check_iter->check_length); + is_valid &= value == other_value; + } else { + UNREACHABLE(); + } + } + return is_valid; +} + +bool BytecodeSequenceNode::IsSequence() const { + return bytecode_replacement_ != kDummyBytecode; +} + +int BytecodeSequenceNode::SequenceLength() const { + return start_offset_ + RegExpBytecodeLength(bytecode_); +} + +int BytecodeSequenceNode::OptimizedBytecode() const { + return bytecode_replacement_; +} + +BytecodeSequenceNode* BytecodeSequenceNode::Find(int bytecode) const { + auto found = children_.find(bytecode); + if (found == children_.end()) return nullptr; + return found->second; +} + +size_t BytecodeSequenceNode::ArgumentSize() const { + DCHECK(IsSequence()); + return argument_mapping_->size(); +} + +BytecodeArgumentMapping BytecodeSequenceNode::ArgumentMapping( + size_t index) const { + DCHECK(IsSequence()); + DCHECK(argument_mapping_ != nullptr); + DCHECK_GE(index, 0); + DCHECK_LT(index, argument_mapping_->size()); + + return argument_mapping_->at(index); +} + +ZoneLinkedList<BytecodeArgument>::iterator +BytecodeSequenceNode::ArgumentIgnoredBegin() const { + DCHECK(IsSequence()); + DCHECK(argument_ignored_ != nullptr); + return argument_ignored_->begin(); +} + +ZoneLinkedList<BytecodeArgument>::iterator +BytecodeSequenceNode::ArgumentIgnoredEnd() const { + DCHECK(IsSequence()); + DCHECK(argument_ignored_ != nullptr); + return argument_ignored_->end(); +} + +bool BytecodeSequenceNode::HasIgnoredArguments() const { + return argument_ignored_ != nullptr; +} + +BytecodeSequenceNode& BytecodeSequenceNode::GetNodeByIndexInSequence( + int index_in_sequence) { + DCHECK_LE(index_in_sequence, index_in_sequence_); + + if (index_in_sequence < index_in_sequence_) { + DCHECK(parent_ != nullptr); + return parent_->GetNodeByIndexInSequence(index_in_sequence); + } else { + return *this; + } +} + +Zone* BytecodeSequenceNode::zone() const { return zone_; } + +RegExpBytecodePeephole::RegExpBytecodePeephole( + Zone* zone, size_t buffer_size, + const ZoneUnorderedMap<int, int>& jump_edges) + : optimized_bytecode_buffer_(zone), + sequences_(new (zone->New(sizeof(*sequences_))) BytecodeSequenceNode( + BytecodeSequenceNode::kDummyBytecode, zone)), + jump_edges_(zone), + jump_edges_mapped_(zone), + jump_usage_counts_(zone), + jump_source_fixups_(zone), + jump_destination_fixups_(zone), + zone_(zone) { + optimized_bytecode_buffer_.reserve(buffer_size); + PrepareJumpStructures(jump_edges); + DefineStandardSequences(); + // Sentinel fixups at beginning of bytecode (position -1) so we don't have to + // check for end of iterator inside the fixup loop. + // In general fixups are deltas of original offsets of jump + // sources/destinations (in the old bytecode) to find them in the new + // bytecode. All jump targets are fixed after the new bytecode is fully + // emitted in the internal buffer. + AddSentinelFixups(-1); + // Sentinel fixups at end of (old) bytecode so we don't have to check for + // end of iterator inside the fixup loop. + DCHECK_LE(buffer_size, std::numeric_limits<int>::max()); + AddSentinelFixups(static_cast<int>(buffer_size)); +} + +void RegExpBytecodePeephole::DefineStandardSequences() { + // Commonly used sequences can be found by creating regexp bytecode traces + // (--trace-regexp-bytecodes) and using v8/tools/regexp-sequences.py. + CreateSequence(BC_LOAD_CURRENT_CHAR) + .FollowedBy(BC_CHECK_BIT_IN_TABLE) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_BIT_IN_TABLE) + .MapArgument(0, 1, 3) // load offset + .MapArgument(2, 1, 3, 4) // advance by + .MapArgument(1, 8, 16) // bit table + .MapArgument(1, 4, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(2, 4, 4); // loop jump + + CreateSequence(BC_CHECK_CURRENT_POSITION) + .FollowedBy(BC_LOAD_CURRENT_CHAR_UNCHECKED) + .FollowedBy(BC_CHECK_CHAR) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_CHAR_POS_CHECKED) + .MapArgument(1, 1, 3) // load offset + .MapArgument(3, 1, 3, 2) // advance_by + .MapArgument(2, 1, 3, 2) // c + .MapArgument(0, 1, 3, 4) // eats at least + .MapArgument(2, 4, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(3, 4, 4); // loop jump + + CreateSequence(BC_CHECK_CURRENT_POSITION) + .FollowedBy(BC_LOAD_CURRENT_CHAR_UNCHECKED) + .FollowedBy(BC_AND_CHECK_CHAR) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_CHAR_AND) + .MapArgument(1, 1, 3) // load offset + .MapArgument(3, 1, 3, 2) // advance_by + .MapArgument(2, 1, 3, 2) // c + .MapArgument(2, 4, 4) // mask + .MapArgument(0, 1, 3, 4) // eats at least + .MapArgument(2, 8, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(3, 4, 4); // loop jump + + // TODO(pthier): It might make sense for short sequences like this one to only + // optimize them if the resulting optimization is not longer than the current + // one. This could be the case if there are jumps inside the sequence and we + // have to replicate parts of the sequence. A method to mark such sequences + // might be useful. + CreateSequence(BC_LOAD_CURRENT_CHAR) + .FollowedBy(BC_CHECK_CHAR) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_CHAR) + .MapArgument(0, 1, 3) // load offset + .MapArgument(2, 1, 3, 2) // advance by + .MapArgument(1, 1, 3, 2) // character + .MapArgument(1, 4, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(2, 4, 4); // loop jump + + CreateSequence(BC_LOAD_CURRENT_CHAR) + .FollowedBy(BC_CHECK_CHAR) + .FollowedBy(BC_CHECK_CHAR) + // Sequence is only valid if the jump targets of both CHECK_CHAR bytecodes + // are equal. + .IfArgumentEqualsValueAtOffset(4, 4, 1, 4, 4) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_CHAR_OR_CHAR) + .MapArgument(0, 1, 3) // load offset + .MapArgument(3, 1, 3, 4) // advance by + .MapArgument(1, 1, 3, 2) // character 1 + .MapArgument(2, 1, 3, 2) // character 2 + .MapArgument(1, 4, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(2, 4, 4) // goto when match 2 + .IgnoreArgument(3, 4, 4); // loop jump + + CreateSequence(BC_LOAD_CURRENT_CHAR) + .FollowedBy(BC_CHECK_GT) + // Sequence is only valid if the jump target of CHECK_GT is the first + // bytecode AFTER the whole sequence. + .IfArgumentEqualsOffset(4, 4, 56) + .FollowedBy(BC_CHECK_BIT_IN_TABLE) + // Sequence is only valid if the jump target of CHECK_BIT_IN_TABLE is + // the ADVANCE_CP_AND_GOTO bytecode at the end of the sequence. + .IfArgumentEqualsOffset(4, 4, 48) + .FollowedBy(BC_GOTO) + // Sequence is only valid if the jump target of GOTO is the same as the + // jump target of CHECK_GT (i.e. both jump to the first bytecode AFTER the + // whole sequence. + .IfArgumentEqualsValueAtOffset(4, 4, 1, 4, 4) + .FollowedBy(BC_ADVANCE_CP_AND_GOTO) + // Sequence is only valid if the jump target of ADVANCE_CP_AND_GOTO is the + // first bytecode in this sequence. + .IfArgumentEqualsOffset(4, 4, 0) + .ReplaceWith(BC_SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) + .MapArgument(0, 1, 3) // load offset + .MapArgument(4, 1, 3, 2) // advance by + .MapArgument(1, 1, 3, 2) // character + .MapArgument(2, 8, 16) // bit table + .MapArgument(1, 4, 4) // goto when match + .MapArgument(0, 4, 4) // goto on failure + .IgnoreArgument(2, 4, 4) // indirect loop jump + .IgnoreArgument(3, 4, 4) // jump out of loop + .IgnoreArgument(4, 4, 4); // loop jump +} + +bool RegExpBytecodePeephole::OptimizeBytecode(const byte* bytecode, + int length) { + int old_pc = 0; + bool did_optimize = false; + + while (old_pc < length) { + int replaced_len = TryOptimizeSequence(bytecode, old_pc); + if (replaced_len > 0) { + old_pc += replaced_len; + did_optimize = true; + } else { + int bc = bytecode[old_pc]; + int bc_len = RegExpBytecodeLength(bc); + CopyRangeToOutput(bytecode, old_pc, bc_len); + old_pc += bc_len; + } + } + + if (did_optimize) { + FixJumps(); + } + + return did_optimize; +} + +void RegExpBytecodePeephole::CopyOptimizedBytecode(byte* to_address) const { + MemCopy(to_address, &(*optimized_bytecode_buffer_.begin()), Length()); +} + +int RegExpBytecodePeephole::Length() const { return pc(); } + +BytecodeSequenceNode& RegExpBytecodePeephole::CreateSequence(int bytecode) { + DCHECK(sequences_ != nullptr); + DCHECK(0 <= bytecode && bytecode < kRegExpBytecodeCount); + + return sequences_->FollowedBy(bytecode); +} + +int RegExpBytecodePeephole::TryOptimizeSequence(const byte* bytecode, + int start_pc) { + BytecodeSequenceNode* seq_node = sequences_; + BytecodeSequenceNode* valid_seq_end = nullptr; + + int current_pc = start_pc; + + // Check for the longest valid sequence matching any of the pre-defined + // sequences in the Trie data structure. + while ((seq_node = seq_node->Find(bytecode[current_pc]))) { + if (!seq_node->CheckArguments(bytecode, start_pc)) { + break; + } + if (seq_node->IsSequence()) { + valid_seq_end = seq_node; + } + current_pc += RegExpBytecodeLength(bytecode[current_pc]); + } + + if (valid_seq_end) { + EmitOptimization(start_pc, bytecode, *valid_seq_end); + return valid_seq_end->SequenceLength(); + } + + return 0; +} + +void RegExpBytecodePeephole::EmitOptimization( + int start_pc, const byte* bytecode, const BytecodeSequenceNode& last_node) { +#ifdef DEBUG + int optimized_start_pc = pc(); +#endif + // Jump sources that are mapped or marked as unused will be deleted at the end + // of this method. We don't delete them immediately as we might need the + // information when we have to preserve bytecodes at the end. + // TODO(pthier): Replace with a stack-allocated data structure. + ZoneLinkedList<int> delete_jumps = ZoneLinkedList<int>(zone()); + + uint32_t bc = last_node.OptimizedBytecode(); + EmitValue(bc); + + for (size_t arg = 0; arg < last_node.ArgumentSize(); arg++) { + BytecodeArgumentMapping arg_map = last_node.ArgumentMapping(arg); + int arg_pos = start_pc + arg_map.offset; + // If we map any jump source we mark the old source for deletion and insert + // a new jump. + auto jump_edge_iter = jump_edges_.find(arg_pos); + if (jump_edge_iter != jump_edges_.end()) { + int jump_source = jump_edge_iter->first; + int jump_destination = jump_edge_iter->second; + // Add new jump edge add current position. + jump_edges_mapped_.emplace(Length(), jump_destination); + // Mark old jump edge for deletion. + delete_jumps.push_back(jump_source); + // Decrement usage count of jump destination. + auto jump_count_iter = jump_usage_counts_.find(jump_destination); + DCHECK(jump_count_iter != jump_usage_counts_.end()); + int& usage_count = jump_count_iter->second; + --usage_count; + } + // TODO(pthier): DCHECK that mapped arguments are never sources of jumps + // to destinations inside the sequence. + EmitArgument(start_pc, bytecode, arg_map); + } + DCHECK_EQ(pc(), optimized_start_pc + + RegExpBytecodeLength(last_node.OptimizedBytecode())); + + // Remove jumps from arguments we ignore. + if (last_node.HasIgnoredArguments()) { + for (auto ignored_arg = last_node.ArgumentIgnoredBegin(); + ignored_arg != last_node.ArgumentIgnoredEnd(); ignored_arg++) { + auto jump_edge_iter = jump_edges_.find(start_pc + ignored_arg->offset); + if (jump_edge_iter != jump_edges_.end()) { + int jump_source = jump_edge_iter->first; + int jump_destination = jump_edge_iter->second; + // Mark old jump edge for deletion. + delete_jumps.push_back(jump_source); + // Decrement usage count of jump destination. + auto jump_count_iter = jump_usage_counts_.find(jump_destination); + DCHECK(jump_count_iter != jump_usage_counts_.end()); + int& usage_count = jump_count_iter->second; + --usage_count; + } + } + } + + int fixup_length = RegExpBytecodeLength(bc) - last_node.SequenceLength(); + + // Check if there are any jumps inside the old sequence. + // If so we have to keep the bytecodes that are jumped to around. + auto jump_destination_candidate = jump_usage_counts_.upper_bound(start_pc); + int jump_candidate_destination = jump_destination_candidate->first; + int jump_candidate_count = jump_destination_candidate->second; + // Jump destinations only jumped to from inside the sequence will be ignored. + while (jump_destination_candidate != jump_usage_counts_.end() && + jump_candidate_count == 0) { + ++jump_destination_candidate; + jump_candidate_destination = jump_destination_candidate->first; + jump_candidate_count = jump_destination_candidate->second; + } + + int preserve_from = start_pc + last_node.SequenceLength(); + if (jump_destination_candidate != jump_usage_counts_.end() && + jump_candidate_destination < start_pc + last_node.SequenceLength()) { + preserve_from = jump_candidate_destination; + // Check if any jump in the sequence we are preserving has a jump + // destination inside the optimized sequence before the current position we + // want to preserve. If so we have to preserve all bytecodes starting at + // this jump destination. + for (auto jump_iter = jump_edges_.lower_bound(preserve_from); + jump_iter != jump_edges_.end() && + jump_iter->first /* jump source */ < + start_pc + last_node.SequenceLength(); + ++jump_iter) { + int jump_destination = jump_iter->second; + if (jump_destination > start_pc && jump_destination < preserve_from) { + preserve_from = jump_destination; + } + } + + // We preserve everything to the end of the sequence. This is conservative + // since it would be enough to preserve all bytecudes up to an unconditional + // jump. + int preserve_length = start_pc + last_node.SequenceLength() - preserve_from; + fixup_length += preserve_length; + // Jumps after the start of the preserved sequence need fixup. + AddJumpSourceFixup(fixup_length, + start_pc + last_node.SequenceLength() - preserve_length); + // All jump targets after the start of the optimized sequence need to be + // fixed relative to the length of the optimized sequence including + // bytecodes we preserved. + AddJumpDestinationFixup(fixup_length, start_pc + 1); + // Jumps to the sequence we preserved need absolute fixup as they could + // occur before or after the sequence. + SetJumpDestinationFixup(pc() - preserve_from, preserve_from); + CopyRangeToOutput(bytecode, preserve_from, preserve_length); + } else { + AddJumpDestinationFixup(fixup_length, start_pc + 1); + // Jumps after the end of the old sequence need fixup. + AddJumpSourceFixup(fixup_length, start_pc + last_node.SequenceLength()); + } + + // Delete jumps we definitely don't need anymore + for (int del : delete_jumps) { + if (del < preserve_from) { + jump_edges_.erase(del); + } + } +} + +void RegExpBytecodePeephole::AddJumpSourceFixup(int fixup, int pos) { + auto previous_fixup = jump_source_fixups_.lower_bound(pos); + DCHECK(previous_fixup != jump_source_fixups_.end()); + DCHECK(previous_fixup != jump_source_fixups_.begin()); + + int previous_fixup_value = (--previous_fixup)->second; + jump_source_fixups_[pos] = previous_fixup_value + fixup; +} + +void RegExpBytecodePeephole::AddJumpDestinationFixup(int fixup, int pos) { + auto previous_fixup = jump_destination_fixups_.lower_bound(pos); + DCHECK(previous_fixup != jump_destination_fixups_.end()); + DCHECK(previous_fixup != jump_destination_fixups_.begin()); + + int previous_fixup_value = (--previous_fixup)->second; + jump_destination_fixups_[pos] = previous_fixup_value + fixup; +} + +void RegExpBytecodePeephole::SetJumpDestinationFixup(int fixup, int pos) { + auto previous_fixup = jump_destination_fixups_.lower_bound(pos); + DCHECK(previous_fixup != jump_destination_fixups_.end()); + DCHECK(previous_fixup != jump_destination_fixups_.begin()); + + int previous_fixup_value = (--previous_fixup)->second; + jump_destination_fixups_.emplace(pos, fixup); + jump_destination_fixups_.emplace(pos + 1, previous_fixup_value); +} + +void RegExpBytecodePeephole::PrepareJumpStructures( + const ZoneUnorderedMap<int, int>& jump_edges) { + for (auto jump_edge : jump_edges) { + int jump_source = jump_edge.first; + int jump_destination = jump_edge.second; + + jump_edges_.emplace(jump_source, jump_destination); + jump_usage_counts_[jump_destination]++; + } +} + +void RegExpBytecodePeephole::FixJumps() { + int position_fixup = 0; + // Next position where fixup changes. + auto next_source_fixup = jump_source_fixups_.lower_bound(0); + int next_source_fixup_offset = next_source_fixup->first; + int next_source_fixup_value = next_source_fixup->second; + + for (auto jump_edge : jump_edges_) { + int jump_source = jump_edge.first; + int jump_destination = jump_edge.second; + while (jump_source >= next_source_fixup_offset) { + position_fixup = next_source_fixup_value; + ++next_source_fixup; + next_source_fixup_offset = next_source_fixup->first; + next_source_fixup_value = next_source_fixup->second; + } + jump_source += position_fixup; + + FixJump(jump_source, jump_destination); + } + + // Mapped jump edges don't need source fixups, as the position already is an + // offset in the new bytecode. + for (auto jump_edge : jump_edges_mapped_) { + int jump_source = jump_edge.first; + int jump_destination = jump_edge.second; + + FixJump(jump_source, jump_destination); + } +} + +void RegExpBytecodePeephole::FixJump(int jump_source, int jump_destination) { + int fixed_jump_destination = + jump_destination + + (--jump_destination_fixups_.upper_bound(jump_destination))->second; + DCHECK_LT(fixed_jump_destination, Length()); +#ifdef DEBUG + // TODO(pthier): This check could be better if we track the bytecodes + // actually used and check if we jump to one of them. + byte jump_bc = optimized_bytecode_buffer_[fixed_jump_destination]; + DCHECK_GT(jump_bc, 0); + DCHECK_LT(jump_bc, kRegExpBytecodeCount); +#endif + + if (jump_destination != fixed_jump_destination) { + OverwriteValue<uint32_t>(jump_source, fixed_jump_destination); + } +} + +void RegExpBytecodePeephole::AddSentinelFixups(int pos) { + jump_source_fixups_.emplace(pos, 0); + jump_destination_fixups_.emplace(pos, 0); +} + +template <typename T> +void RegExpBytecodePeephole::EmitValue(T value) { + DCHECK(optimized_bytecode_buffer_.begin() + pc() == + optimized_bytecode_buffer_.end()); + byte* value_byte_iter = reinterpret_cast<byte*>(&value); + optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(), + value_byte_iter, + value_byte_iter + sizeof(T)); +} + +template <typename T> +void RegExpBytecodePeephole::OverwriteValue(int offset, T value) { + byte* value_byte_iter = reinterpret_cast<byte*>(&value); + byte* value_byte_iter_end = value_byte_iter + sizeof(T); + while (value_byte_iter < value_byte_iter_end) { + optimized_bytecode_buffer_[offset++] = *value_byte_iter++; + } +} + +void RegExpBytecodePeephole::CopyRangeToOutput(const byte* orig_bytecode, + int start, int length) { + DCHECK(optimized_bytecode_buffer_.begin() + pc() == + optimized_bytecode_buffer_.end()); + optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(), + orig_bytecode + start, + orig_bytecode + start + length); +} + +void RegExpBytecodePeephole::SetRange(byte value, int count) { + DCHECK(optimized_bytecode_buffer_.begin() + pc() == + optimized_bytecode_buffer_.end()); + optimized_bytecode_buffer_.insert(optimized_bytecode_buffer_.end(), count, + value); +} + +void RegExpBytecodePeephole::EmitArgument(int start_pc, const byte* bytecode, + BytecodeArgumentMapping arg) { + int arg_pos = start_pc + arg.offset; + switch (arg.length) { + case 1: + DCHECK_EQ(arg.new_length, arg.length); + EmitValue(GetValue<byte>(bytecode, arg_pos)); + break; + case 2: + DCHECK_EQ(arg.new_length, arg.length); + EmitValue(GetValue<uint16_t>(bytecode, arg_pos)); + break; + case 3: { + // Length 3 only occurs in 'packed' arguments where the lowermost byte is + // the current bytecode, and the remaining 3 bytes are the packed value. + // + // We load 4 bytes from position - 1 and shift out the bytecode. +#ifdef V8_TARGET_BIG_ENDIAN + UNIMPLEMENTED(); + int32_t val = 0; +#else + int32_t val = GetValue<int32_t>(bytecode, arg_pos - 1) >> kBitsPerByte; +#endif // V8_TARGET_BIG_ENDIAN + + switch (arg.new_length) { + case 2: + EmitValue<uint16_t>(val); + break; + case 3: { + // Pack with previously emitted value. + auto prev_val = + GetValue<int32_t>(&(*optimized_bytecode_buffer_.begin()), + Length() - sizeof(uint32_t)); +#ifdef V8_TARGET_BIG_ENDIAN + UNIMPLEMENTED(); + USE(prev_val); +#else + DCHECK_EQ(prev_val & 0xFFFFFF00, 0); + OverwriteValue<uint32_t>( + pc() - sizeof(uint32_t), + (static_cast<uint32_t>(val) << 8) | (prev_val & 0xFF)); +#endif // V8_TARGET_BIG_ENDIAN + break; + } + case 4: + EmitValue<uint32_t>(val); + break; + } + break; + } + case 4: + DCHECK_EQ(arg.new_length, arg.length); + EmitValue(GetValue<uint32_t>(bytecode, arg_pos)); + break; + case 8: + DCHECK_EQ(arg.new_length, arg.length); + EmitValue(GetValue<uint64_t>(bytecode, arg_pos)); + break; + default: + CopyRangeToOutput(bytecode, arg_pos, Min(arg.length, arg.new_length)); + if (arg.length < arg.new_length) { + SetRange(0x00, arg.new_length - arg.length); + } + break; + } +} + +int RegExpBytecodePeephole::pc() const { + DCHECK_LE(optimized_bytecode_buffer_.size(), std::numeric_limits<int>::max()); + return static_cast<int>(optimized_bytecode_buffer_.size()); +} + +Zone* RegExpBytecodePeephole::zone() const { return zone_; } + +} // namespace + +// static +Handle<ByteArray> RegExpBytecodePeepholeOptimization::OptimizeBytecode( + Isolate* isolate, Zone* zone, Handle<String> source, const byte* bytecode, + int length, const ZoneUnorderedMap<int, int>& jump_edges) { + RegExpBytecodePeephole peephole(zone, length, jump_edges); + bool did_optimize = peephole.OptimizeBytecode(bytecode, length); + Handle<ByteArray> array = isolate->factory()->NewByteArray(peephole.Length()); + peephole.CopyOptimizedBytecode(array->GetDataStartAddress()); + + if (did_optimize && FLAG_trace_regexp_peephole_optimization) { + PrintF("Original Bytecode:\n"); + RegExpBytecodeDisassemble(bytecode, length, source->ToCString().get()); + PrintF("Optimized Bytecode:\n"); + RegExpBytecodeDisassemble(array->GetDataStartAddress(), peephole.Length(), + source->ToCString().get()); + } + + return array; +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/regexp/regexp-bytecode-peephole.h b/chromium/v8/src/regexp/regexp-bytecode-peephole.h new file mode 100644 index 00000000000..f116e1ac418 --- /dev/null +++ b/chromium/v8/src/regexp/regexp-bytecode-peephole.h @@ -0,0 +1,31 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_ +#define V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_ + +#include "src/common/globals.h" +#include "src/zone/zone-containers.h" + +namespace v8 { +namespace internal { + +class ByteArray; + +// Peephole optimization for regexp interpreter bytecode. +// Pre-defined bytecode sequences occuring in the bytecode generated by the +// RegExpBytecodeGenerator can be optimized into a single bytecode. +class RegExpBytecodePeepholeOptimization : public AllStatic { + public: + // Performs peephole optimization on the given bytecode and returns the + // optimized bytecode. + static Handle<ByteArray> OptimizeBytecode( + Isolate* isolate, Zone* zone, Handle<String> source, const byte* bytecode, + int length, const ZoneUnorderedMap<int, int>& jump_edges); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_REGEXP_REGEXP_BYTECODE_PEEPHOLE_H_ diff --git a/chromium/v8/src/regexp/regexp-bytecodes.cc b/chromium/v8/src/regexp/regexp-bytecodes.cc new file mode 100644 index 00000000000..fbf8273ab4f --- /dev/null +++ b/chromium/v8/src/regexp/regexp-bytecodes.cc @@ -0,0 +1,46 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/regexp/regexp-bytecodes.h" + +#include <cctype> + +#include "src/utils/utils.h" + +namespace v8 { +namespace internal { + +void RegExpBytecodeDisassembleSingle(const byte* code_base, const byte* pc) { + PrintF("%s", RegExpBytecodeName(*pc)); + + // Args and the bytecode as hex. + for (int i = 0; i < RegExpBytecodeLength(*pc); i++) { + PrintF(", %02x", pc[i]); + } + PrintF(" "); + + // Args as ascii. + for (int i = 1; i < RegExpBytecodeLength(*pc); i++) { + unsigned char b = pc[i]; + PrintF("%c", std::isprint(b) ? b : '.'); + } + PrintF("\n"); +} + +void RegExpBytecodeDisassemble(const byte* code_base, int length, + const char* pattern) { + PrintF("[generated bytecode for regexp pattern: '%s']\n", pattern); + + ptrdiff_t offset = 0; + + while (offset < length) { + const byte* const pc = code_base + offset; + PrintF("%p %4" V8PRIxPTRDIFF " ", pc, offset); + RegExpBytecodeDisassembleSingle(code_base, pc); + offset += RegExpBytecodeLength(*pc); + } +} + +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/regexp/regexp-bytecodes.h b/chromium/v8/src/regexp/regexp-bytecodes.h index 3dd7637b88c..e25945d0a03 100644 --- a/chromium/v8/src/regexp/regexp-bytecodes.h +++ b/chromium/v8/src/regexp/regexp-bytecodes.h @@ -6,17 +6,27 @@ #define V8_REGEXP_REGEXP_BYTECODES_H_ #include "src/base/macros.h" +#include "src/common/globals.h" namespace v8 { namespace internal { -const int BYTECODE_MASK = 0xff; +// Maximum number of bytecodes that will be used (next power of 2 of actually +// defined bytecodes). +// All slots between the last actually defined bytecode and maximum id will be +// filled with BREAKs, indicating an invalid operation. This way using +// BYTECODE_MASK guarantees no OOB access to the dispatch table. +constexpr int kRegExpPaddedBytecodeCount = 1 << 6; +constexpr int BYTECODE_MASK = kRegExpPaddedBytecodeCount - 1; // The first argument is packed in with the byte code in one word, but so it // has 24 bits, but it can be positive and negative so only use 23 bits for // positive values. const unsigned int MAX_FIRST_ARG = 0x7fffffu; const int BYTECODE_SHIFT = 8; +STATIC_ASSERT(1 << BYTECODE_SHIFT > BYTECODE_MASK); +// TODO(pthier): Argument offsets of bytecodes should be easily accessible by +// name or at least by position. #define BYTECODE_ITERATOR(V) \ V(BREAK, 0, 4) /* bc8 */ \ V(PUSH_CP, 1, 4) /* bc8 pad24 */ \ @@ -34,25 +44,61 @@ const int BYTECODE_SHIFT = 8; V(FAIL, 13, 4) /* bc8 pad24 */ \ V(SUCCEED, 14, 4) /* bc8 pad24 */ \ V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \ - V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \ + /* Jump to another bytecode given its offset. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x10 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: 0x00 (unused) Padding */ \ + /* 0x20 - 0x3F: Address of bytecode to jump to */ \ + V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \ + /* Check if offset is in range and load character at given offset. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x11 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: Offset from current position */ \ + /* 0x20 - 0x3F: Address of bytecode when load is out of range */ \ V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \ + /* Load character at given offset without range checks. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x12 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: Offset from current position */ \ V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \ V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \ V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \ V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \ V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \ V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \ - V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \ + /* Check if current character is equal to a given character */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x19 (fixed) Bytecode */ \ + /* 0x08 - 0x0F: 0x00 (unused) Padding */ \ + /* 0x10 - 0x1F: Character to check */ \ + /* 0x20 - 0x3F: Address of bytecode when matched */ \ + V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \ V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \ V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \ - V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ + /* Checks if the current character combined with mask (bitwise and) */ \ + /* matches a character (e.g. used when two characters in a disjunction */ \ + /* differ by only a single bit */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x1c (fixed) Bytecode */ \ + /* 0x08 - 0x0F: 0x00 (unused) Padding */ \ + /* 0x10 - 0x1F: Character to match against (after mask aplied) */ \ + /* 0x20 - 0x3F: Bitmask bitwise and combined with current character */ \ + /* 0x40 - 0x5F: Address of bytecode when matched */ \ + V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \ V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \ V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \ V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \ - V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \ + /* Checks if the current character matches any of the characters encoded */ \ + /* in a bit table. Similar to/inspired by boyer moore string search */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x22 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: 0x00 (unused) Padding */ \ + /* 0x20 - 0x3F: Address of bytecode when bit is set */ \ + /* 0x40 - 0xBF: Bit table */ \ + V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \ V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \ V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \ V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \ @@ -67,10 +113,99 @@ const int BYTECODE_SHIFT = 8; V(CHECK_REGISTER_EQ_POS, 46, 8) /* bc8 reg_idx24 addr32 */ \ V(CHECK_AT_START, 47, 8) /* bc8 pad24 addr32 */ \ V(CHECK_NOT_AT_START, 48, 8) /* bc8 offset24 addr32 */ \ + /* Checks if the current position matches top of backtrack stack */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x31 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: 0x00 (unused) Padding */ \ + /* 0x20 - 0x3F: Address of bytecode when current matches tos */ \ V(CHECK_GREEDY, 49, 8) /* bc8 pad24 addr32 */ \ - V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \ + /* Advance character pointer by given offset and jump to another bytecode.*/ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x32 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: Number of characters to advance */ \ + /* 0x20 - 0x3F: Address of bytecode to jump to */ \ + V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \ V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */ \ - V(CHECK_CURRENT_POSITION, 52, 8) /* bc8 idx24 addr32 */ + /* Checks if current position + given offset is in range. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07: 0x34 (fixed) Bytecode */ \ + /* 0x08 - 0x1F: Offset from current position */ \ + /* 0x20 - 0x3F: Address of bytecode when position is out of range */ \ + V(CHECK_CURRENT_POSITION, 52, 8) /* bc8 idx24 addr32 */ \ + /* Combination of: */ \ + /* LOAD_CURRENT_CHAR, CHECK_BIT_IN_TABLE and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x35 (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x3F Number of characters to advance */ \ + /* 0x40 - 0xBF Bit Table */ \ + /* 0xC0 - 0xDF Address of bytecode when character is matched */ \ + /* 0xE0 - 0xFF Address of bytecode when no match */ \ + V(SKIP_UNTIL_BIT_IN_TABLE, 53, 32) \ + /* Combination of: */ \ + /* CHECK_CURRENT_POSITION, LOAD_CURRENT_CHAR_UNCHECKED, AND_CHECK_CHAR */ \ + /* and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x36 (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x2F Number of characters to advance */ \ + /* 0x30 - 0x3F Character to match against (after mask applied) */ \ + /* 0x40 - 0x5F: Bitmask bitwise and combined with current character */ \ + /* 0x60 - 0x7F Minimum number of characters this pattern consumes */ \ + /* 0x80 - 0x9F Address of bytecode when character is matched */ \ + /* 0xA0 - 0xBF Address of bytecode when no match */ \ + V(SKIP_UNTIL_CHAR_AND, 54, 24) \ + /* Combination of: */ \ + /* LOAD_CURRENT_CHAR, CHECK_CHAR and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x37 (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x2F Number of characters to advance */ \ + /* 0x30 - 0x3F Character to match */ \ + /* 0x40 - 0x5F Address of bytecode when character is matched */ \ + /* 0x60 - 0x7F Address of bytecode when no match */ \ + V(SKIP_UNTIL_CHAR, 55, 16) \ + /* Combination of: */ \ + /* CHECK_CURRENT_POSITION, LOAD_CURRENT_CHAR_UNCHECKED, CHECK_CHAR */ \ + /* and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x38 (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x2F Number of characters to advance */ \ + /* 0x30 - 0x3F Character to match */ \ + /* 0x40 - 0x5F Minimum number of characters this pattern consumes */ \ + /* 0x60 - 0x7F Address of bytecode when character is matched */ \ + /* 0x80 - 0x9F Address of bytecode when no match */ \ + V(SKIP_UNTIL_CHAR_POS_CHECKED, 56, 20) \ + /* Combination of: */ \ + /* LOAD_CURRENT_CHAR, CHECK_CHAR, CHECK_CHAR and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x39 (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x3F Number of characters to advance */ \ + /* 0x40 - 0x4F Character to match */ \ + /* 0x50 - 0x5F Other Character to match */ \ + /* 0x60 - 0x7F Address of bytecode when either character is matched */ \ + /* 0x80 - 0x9F Address of bytecode when no match */ \ + V(SKIP_UNTIL_CHAR_OR_CHAR, 57, 20) \ + /* Combination of: */ \ + /* LOAD_CURRENT_CHAR, CHECK_GT, CHECK_BIT_IN_TABLE, GOTO and */ \ + /* and ADVANCE_CP_AND_GOTO */ \ + /* Emitted by RegExpBytecodePeepholeOptimization. */ \ + /* Bit Layout: */ \ + /* 0x00 - 0x07 0x3A (fixed) Bytecode */ \ + /* 0x08 - 0x1F Load character offset from current position */ \ + /* 0x20 - 0x2F Number of characters to advance */ \ + /* 0x30 - 0x3F Character to check if it is less than current char */ \ + /* 0x40 - 0xBF Bit Table */ \ + /* 0xC0 - 0xDF Address of bytecode when character is matched */ \ + /* 0xE0 - 0xFF Address of bytecode when no match */ \ + V(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE, 58, 32) #define COUNT(...) +1 static constexpr int kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT); @@ -80,7 +215,7 @@ static constexpr int kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT); // contiguous, strictly increasing, and start at 0. // TODO(jgruber): Do not explicitly assign values, instead generate them // implicitly from the list order. -STATIC_ASSERT(kRegExpBytecodeCount == 53); +STATIC_ASSERT(kRegExpBytecodeCount == 59); #define DECLARE_BYTECODES(name, code, length) \ static constexpr int BC_##name = code; @@ -107,6 +242,10 @@ inline const char* RegExpBytecodeName(int bytecode) { return kRegExpBytecodeNames[bytecode]; } +void RegExpBytecodeDisassembleSingle(const byte* code_base, const byte* pc); +void RegExpBytecodeDisassemble(const byte* code_base, int length, + const char* pattern); + } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/regexp/regexp-compiler.cc b/chromium/v8/src/regexp/regexp-compiler.cc index 85da69f308b..d141f3c490d 100644 --- a/chromium/v8/src/regexp/regexp-compiler.cc +++ b/chromium/v8/src/regexp/regexp-compiler.cc @@ -725,6 +725,11 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character, unibrow::uchar* letters, int letter_length) { #ifdef V8_INTL_SUPPORT + // Special case for U+017F which has upper case in ASCII range. + if (character == 0x017f) { + letters[0] = character; + return 1; + } icu::UnicodeSet set; set.add(character); set = set.closeOver(USET_CASE_INSENSITIVE); @@ -734,10 +739,18 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character, UChar32 start = set.getRangeStart(i); UChar32 end = set.getRangeEnd(i); CHECK(end - start + items <= letter_length); - while (start <= end) { - if (one_byte_subject && start > String::kMaxOneByteCharCode) break; - letters[items++] = (unibrow::uchar)(start); - start++; + // Only add to the output if character is not in ASCII range + // or the case equivalent character is in ASCII range. + // #sec-runtime-semantics-canonicalize-ch + // 3.g If the numeric value of ch ≥ 128 and the numeric value of cu < 128, + // return ch. + if (!((start >= 128) && (character < 128))) { + // No range have start and end span across code point 128. + DCHECK((start >= 128) == (end >= 128)); + for (UChar32 cu = start; cu <= end; cu++) { + if (one_byte_subject && cu > String::kMaxOneByteCharCode) break; + letters[items++] = (unibrow::uchar)(cu); + } } } return items; diff --git a/chromium/v8/src/regexp/regexp-interpreter.cc b/chromium/v8/src/regexp/regexp-interpreter.cc index cf2fb55e4a8..df72951afbb 100644 --- a/chromium/v8/src/regexp/regexp-interpreter.cc +++ b/chromium/v8/src/regexp/regexp-interpreter.cc @@ -12,6 +12,7 @@ #include "src/objects/objects-inl.h" #include "src/regexp/regexp-bytecodes.h" #include "src/regexp/regexp-macro-assembler.h" +#include "src/regexp/regexp-stack.h" // For kMaximumStackSize. #include "src/regexp/regexp.h" #include "src/strings/unicode.h" #include "src/utils/utils.h" @@ -63,23 +64,6 @@ bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len, return true; } -void DisassembleSingleBytecode(const byte* code_base, const byte* pc) { - PrintF("%s", RegExpBytecodeName(*pc)); - - // Args and the bytecode as hex. - for (int i = 0; i < RegExpBytecodeLength(*pc); i++) { - PrintF(", %02x", pc[i]); - } - PrintF(" "); - - // Args as ascii. - for (int i = 1; i < RegExpBytecodeLength(*pc); i++) { - unsigned char b = pc[i]; - PrintF("%c", std::isprint(b) ? b : '.'); - } - PrintF("\n"); -} - #ifdef DEBUG void MaybeTraceInterpreter(const byte* code_base, const byte* pc, int stack_depth, int current_position, @@ -94,7 +78,7 @@ void MaybeTraceInterpreter(const byte* code_base, const byte* pc, PrintF(format, pc - code_base, stack_depth, current_position, current_char, printable ? current_char : '.'); - DisassembleSingleBytecode(code_base, pc); + RegExpBytecodeDisassembleSingle(code_base, pc); } } #endif // DEBUG @@ -118,7 +102,10 @@ class BacktrackStack { public: BacktrackStack() = default; - void push(int v) { data_.emplace_back(v); } + V8_WARN_UNUSED_RESULT bool push(int v) { + data_.emplace_back(v); + return (static_cast<int>(data_.size()) <= kMaxSize); + } int peek() const { DCHECK(!data_.empty()); return data_.back(); @@ -141,13 +128,17 @@ class BacktrackStack { // static stack-allocated backing store, but small enough not to waste space. static constexpr int kStaticCapacity = 64; - base::SmallVector<int, kStaticCapacity> data_; + using ValueT = int; + base::SmallVector<ValueT, kStaticCapacity> data_; + + static constexpr int kMaxSize = + RegExpStack::kMaximumStackSize / sizeof(ValueT); DISALLOW_COPY_AND_ASSIGN(BacktrackStack); }; -IrregexpInterpreter::Result StackOverflow(Isolate* isolate, - RegExp::CallOrigin call_origin) { +IrregexpInterpreter::Result ThrowStackOverflow(Isolate* isolate, + RegExp::CallOrigin call_origin) { CHECK(call_origin == RegExp::CallOrigin::kFromRuntime); // We abort interpreter execution after the stack overflow is thrown, and thus // allow allocation here despite the outer DisallowHeapAllocationScope. @@ -156,6 +147,17 @@ IrregexpInterpreter::Result StackOverflow(Isolate* isolate, return IrregexpInterpreter::EXCEPTION; } +// Only throws if called from the runtime, otherwise just returns the EXCEPTION +// status code. +IrregexpInterpreter::Result MaybeThrowStackOverflow( + Isolate* isolate, RegExp::CallOrigin call_origin) { + if (call_origin == RegExp::CallOrigin::kFromRuntime) { + return ThrowStackOverflow(isolate, call_origin); + } else { + return IrregexpInterpreter::EXCEPTION; + } +} + template <typename Char> void UpdateCodeAndSubjectReferences( Isolate* isolate, Handle<ByteArray> code_array, @@ -208,7 +210,7 @@ IrregexpInterpreter::Result HandleInterrupts( Handle<String> subject_handle(*subject_string_out, isolate); if (js_has_overflowed) { - return StackOverflow(isolate, call_origin); + return ThrowStackOverflow(isolate, call_origin); } else if (check.InterruptRequested()) { const bool was_one_byte = String::IsOneByteRepresentationUnderneath(*subject_string_out); @@ -238,6 +240,13 @@ IrregexpInterpreter::Result HandleInterrupts( return IrregexpInterpreter::SUCCESS; } +bool CheckBitInTable(const uint32_t current_char, const byte* const table) { + int mask = RegExpMacroAssembler::kTableMask; + int b = table[(current_char & mask) >> kBitsPerByteLog2]; + int bit = (current_char & (kBitsPerByte - 1)); + return (b & (1 << bit)) != 0; +} + // If computed gotos are supported by the compiler, we can get addresses to // labels directly in C/C++. Every bytecode handler has its own label and we // store the addresses in a dispatch table indexed by bytecode. To execute the @@ -262,7 +271,7 @@ IrregexpInterpreter::Result HandleInterrupts( #define DISPATCH() \ pc = next_pc; \ insn = next_insn; \ - break + goto switch_dispatch_continuation #endif // V8_USE_COMPUTED_GOTO // ADVANCE/SET_PC_FROM_OFFSET are separated from DISPATCH, because ideally some @@ -297,11 +306,52 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, DisallowHeapAllocation no_gc; #if V8_USE_COMPUTED_GOTO -#define DECLARE_DISPATCH_TABLE_ENTRY(name, code, length) &&BC_##name, - static const void* const dispatch_table[] = { - BYTECODE_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)}; + +// We have to make sure that no OOB access to the dispatch table is possible and +// all values are valid label addresses. +// Otherwise jumps to arbitrary addresses could potentially happen. +// This is ensured as follows: +// Every index to the dispatch table gets masked using BYTECODE_MASK in +// DECODE(). This way we can only get values between 0 (only the least +// significant byte of an integer is used) and kRegExpPaddedBytecodeCount - 1 +// (BYTECODE_MASK is defined to be exactly this value). +// All entries from kRegExpBytecodeCount to kRegExpPaddedBytecodeCount have to +// be filled with BREAKs (invalid operation). + +// Fill dispatch table from last defined bytecode up to the next power of two +// with BREAK (invalid operation). +// TODO(pthier): Find a way to fill up automatically (at compile time) +// 59 real bytecodes -> 5 fillers +#define BYTECODE_FILLER_ITERATOR(V) \ + V(BREAK) /* 1 */ \ + V(BREAK) /* 2 */ \ + V(BREAK) /* 3 */ \ + V(BREAK) /* 4 */ \ + V(BREAK) /* 5 */ + +#define COUNT(...) +1 + static constexpr int kRegExpBytecodeFillerCount = + BYTECODE_FILLER_ITERATOR(COUNT); +#undef COUNT + + // Make sure kRegExpPaddedBytecodeCount is actually the closest possible power + // of two. + DCHECK_EQ(kRegExpPaddedBytecodeCount, + base::bits::RoundUpToPowerOfTwo32(kRegExpBytecodeCount)); + + // Make sure every bytecode we get by using BYTECODE_MASK is well defined. + STATIC_ASSERT(kRegExpBytecodeCount <= kRegExpPaddedBytecodeCount); + STATIC_ASSERT(kRegExpBytecodeCount + kRegExpBytecodeFillerCount == + kRegExpPaddedBytecodeCount); + +#define DECLARE_DISPATCH_TABLE_ENTRY(name, ...) &&BC_##name, + static const void* const dispatch_table[kRegExpPaddedBytecodeCount] = { + BYTECODE_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY) + BYTECODE_FILLER_ITERATOR(DECLARE_DISPATCH_TABLE_ENTRY)}; #undef DECLARE_DISPATCH_TABLE_ENTRY -#endif +#undef BYTECODE_FILLER_ITERATOR + +#endif // V8_USE_COMPUTED_GOTO const byte* pc = code_array.GetDataStartAddress(); const byte* code_base = pc; @@ -329,17 +379,23 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, BYTECODE(BREAK) { UNREACHABLE(); } BYTECODE(PUSH_CP) { ADVANCE(PUSH_CP); - backtrack_stack.push(current); + if (!backtrack_stack.push(current)) { + return MaybeThrowStackOverflow(isolate, call_origin); + } DISPATCH(); } BYTECODE(PUSH_BT) { ADVANCE(PUSH_BT); - backtrack_stack.push(Load32Aligned(pc + 4)); + if (!backtrack_stack.push(Load32Aligned(pc + 4))) { + return MaybeThrowStackOverflow(isolate, call_origin); + } DISPATCH(); } BYTECODE(PUSH_REGISTER) { ADVANCE(PUSH_REGISTER); - backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]); + if (!backtrack_stack.push(registers[insn >> BYTECODE_SHIFT])) { + return MaybeThrowStackOverflow(isolate, call_origin); + } DISPATCH(); } BYTECODE(SET_REGISTER) { @@ -580,10 +636,7 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, DISPATCH(); } BYTECODE(CHECK_BIT_IN_TABLE) { - int mask = RegExpMacroAssembler::kTableMask; - byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)]; - int bit = (current_char & (kBitsPerByte - 1)); - if ((b & (1 << bit)) != 0) { + if (CheckBitInTable(current_char, pc + 8)) { SET_PC_FROM_OFFSET(Load32Aligned(pc + 4)); } else { ADVANCE(CHECK_BIT_IN_TABLE); @@ -762,6 +815,118 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, } DISPATCH(); } + BYTECODE(SKIP_UNTIL_CHAR) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint32_t advance = Load16Aligned(pc + 4); + uint32_t c = Load16Aligned(pc + 6); + while (static_cast<uintptr_t>(current + load_offset) < + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + if (c == current_char) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 8)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 12)); + DISPATCH(); + } + BYTECODE(SKIP_UNTIL_CHAR_AND) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint16_t advance = Load16Aligned(pc + 4); + uint16_t c = Load16Aligned(pc + 6); + uint32_t mask = Load32Aligned(pc + 8); + int32_t maximum_offset = Load32Aligned(pc + 12); + while (static_cast<uintptr_t>(current + maximum_offset) <= + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + if (c == (current_char & mask)) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 16)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 20)); + DISPATCH(); + } + BYTECODE(SKIP_UNTIL_CHAR_POS_CHECKED) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint16_t advance = Load16Aligned(pc + 4); + uint16_t c = Load16Aligned(pc + 6); + int32_t maximum_offset = Load32Aligned(pc + 8); + while (static_cast<uintptr_t>(current + maximum_offset) <= + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + if (c == current_char) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 12)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 16)); + DISPATCH(); + } + BYTECODE(SKIP_UNTIL_BIT_IN_TABLE) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint32_t advance = Load16Aligned(pc + 4); + const byte* table = pc + 8; + while (static_cast<uintptr_t>(current + load_offset) < + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + if (CheckBitInTable(current_char, table)) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 24)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 28)); + DISPATCH(); + } + BYTECODE(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint16_t advance = Load16Aligned(pc + 4); + uint16_t limit = Load16Aligned(pc + 6); + const byte* table = pc + 8; + while (static_cast<uintptr_t>(current + load_offset) < + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + if (current_char > limit) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 24)); + DISPATCH(); + } + if (!CheckBitInTable(current_char, table)) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 24)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 28)); + DISPATCH(); + } + BYTECODE(SKIP_UNTIL_CHAR_OR_CHAR) { + int load_offset = (insn >> BYTECODE_SHIFT); + uint32_t advance = Load32Aligned(pc + 4); + uint16_t c = Load16Aligned(pc + 8); + uint16_t c2 = Load16Aligned(pc + 10); + while (static_cast<uintptr_t>(current + load_offset) < + static_cast<uintptr_t>(subject.length())) { + current_char = subject[current + load_offset]; + // The two if-statements below are split up intentionally, as combining + // them seems to result in register allocation behaving quite + // differently and slowing down the resulting code. + if (c == current_char) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 12)); + DISPATCH(); + } + if (c2 == current_char) { + SET_PC_FROM_OFFSET(Load32Aligned(pc + 12)); + DISPATCH(); + } + current += advance; + } + SET_PC_FROM_OFFSET(Load32Aligned(pc + 16)); + DISPATCH(); + } #if V8_USE_COMPUTED_GOTO // Lint gets confused a lot if we just use !V8_USE_COMPUTED_GOTO or ifndef // V8_USE_COMPUTED_GOTO here. @@ -769,6 +934,9 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, default: UNREACHABLE(); } + // Label we jump to in DISPATCH(). There must be no instructions between the + // end of the switch, this label and the end of the loop. + switch_dispatch_continuation : {} #endif // V8_USE_COMPUTED_GOTO } } @@ -784,30 +952,11 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array, } // namespace // static -void IrregexpInterpreter::Disassemble(ByteArray byte_array, - const std::string& pattern) { - DisallowHeapAllocation no_gc; - - PrintF("[generated bytecode for regexp pattern: '%s']\n", pattern.c_str()); - - const byte* const code_base = byte_array.GetDataStartAddress(); - const int byte_array_length = byte_array.length(); - ptrdiff_t offset = 0; - - while (offset < byte_array_length) { - const byte* const pc = code_base + offset; - PrintF("%p %4" V8PRIxPTRDIFF " ", pc, offset); - DisassembleSingleBytecode(code_base, pc); - offset += RegExpBytecodeLength(*pc); - } -} - -// static IrregexpInterpreter::Result IrregexpInterpreter::Match( Isolate* isolate, JSRegExp regexp, String subject_string, int* registers, int registers_length, int start_position, RegExp::CallOrigin call_origin) { if (FLAG_regexp_tier_up) { - regexp.MarkTierUpForNextExec(); + regexp.TierUpTick(); } bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string); @@ -869,6 +1018,12 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs( String subject_string = String::cast(Object(subject)); JSRegExp regexp_obj = JSRegExp::cast(Object(regexp)); + if (regexp_obj.MarkedForTierUp()) { + // Returning RETRY will re-enter through runtime, where actual recompilation + // for tier-up takes place. + return IrregexpInterpreter::RETRY; + } + return Match(isolate, regexp_obj, subject_string, registers, registers_length, start_position, call_origin); } diff --git a/chromium/v8/src/regexp/regexp-interpreter.h b/chromium/v8/src/regexp/regexp-interpreter.h index fbc5a3b2906..2d0b74f1360 100644 --- a/chromium/v8/src/regexp/regexp-interpreter.h +++ b/chromium/v8/src/regexp/regexp-interpreter.h @@ -31,6 +31,8 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic { // In case a StackOverflow occurs, EXCEPTION is returned. The caller is // responsible for creating the exception. + // RETRY is returned if a retry through the runtime is needed (e.g. when + // interrupts have been scheduled or the regexp is marked for tier-up). // Arguments input_start, input_end and backtrack_stack are // unused. They are only passed to match the signature of the native irregex // code. @@ -46,8 +48,6 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic { int registers_length, int start_position, RegExp::CallOrigin call_origin); - static void Disassemble(ByteArray byte_array, const std::string& pattern); - private: static Result Match(Isolate* isolate, JSRegExp regexp, String subject_string, int* registers, int registers_length, int start_position, diff --git a/chromium/v8/src/regexp/regexp-parser.cc b/chromium/v8/src/regexp/regexp-parser.cc index d6e421cafa3..951f8153747 100644 --- a/chromium/v8/src/regexp/regexp-parser.cc +++ b/chromium/v8/src/regexp/regexp-parser.cc @@ -84,6 +84,9 @@ void RegExpParser::Advance() { ReportError(CStrVector( MessageFormatter::TemplateString(MessageTemplate::kStackOverflow))); } else if (zone()->excess_allocation()) { + if (FLAG_correctness_fuzzer_suppressions) { + FATAL("Aborting on excess zone allocation"); + } ReportError(CStrVector("Regular expression too large")); } else { current_ = ReadNext<true>(); @@ -984,18 +987,39 @@ RegExpCapture* RegExpParser::GetCapture(int index) { return captures_->at(index - 1); } +namespace { + +struct RegExpCaptureIndexLess { + bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const { + DCHECK_NOT_NULL(lhs); + DCHECK_NOT_NULL(rhs); + return lhs->index() < rhs->index(); + } +}; + +} // namespace + Handle<FixedArray> RegExpParser::CreateCaptureNameMap() { if (named_captures_ == nullptr || named_captures_->empty()) { return Handle<FixedArray>(); } + // Named captures are sorted by name (because the set is used to ensure + // name uniqueness). But the capture name map must to be sorted by index. + + ZoneVector<RegExpCapture*> sorted_named_captures( + named_captures_->begin(), named_captures_->end(), zone()); + std::sort(sorted_named_captures.begin(), sorted_named_captures.end(), + RegExpCaptureIndexLess{}); + DCHECK_EQ(sorted_named_captures.size(), named_captures_->size()); + Factory* factory = isolate()->factory(); - int len = static_cast<int>(named_captures_->size()) * 2; + int len = static_cast<int>(sorted_named_captures.size()) * 2; Handle<FixedArray> array = factory->NewFixedArray(len); int i = 0; - for (const auto& capture : *named_captures_) { + for (const auto& capture : sorted_named_captures) { Vector<const uc16> capture_name(capture->name()->data(), capture->name()->size()); // CSA code in ConstructNewResultFromMatchInfo requires these strings to be diff --git a/chromium/v8/src/regexp/regexp-stack.h b/chromium/v8/src/regexp/regexp-stack.h index 7ecaa40b819..d3c5415f1f3 100644 --- a/chromium/v8/src/regexp/regexp-stack.h +++ b/chromium/v8/src/regexp/regexp-stack.h @@ -73,6 +73,9 @@ class RegExpStack { char* RestoreStack(char* from); void FreeThreadResources() { thread_local_.Free(); } + // Maximal size of allocated stack area. + static constexpr size_t kMaximumStackSize = 64 * MB; + private: RegExpStack(); ~RegExpStack(); @@ -84,9 +87,6 @@ class RegExpStack { // Minimal size of allocated stack area. static const size_t kMinimumStackSize = 1 * KB; - // Maximal size of allocated stack area. - static const size_t kMaximumStackSize = 64 * MB; - // Structure holding the allocated memory, size and limit. struct ThreadLocal { ThreadLocal() { Clear(); } diff --git a/chromium/v8/src/regexp/regexp-utils.cc b/chromium/v8/src/regexp/regexp-utils.cc index c9194d5170c..73c2015dd91 100644 --- a/chromium/v8/src/regexp/regexp-utils.cc +++ b/chromium/v8/src/regexp/regexp-utils.cc @@ -171,12 +171,11 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) { // Check that the "exec" method is unmodified. // Check that the index refers to "exec" method (this has to be consistent // with the init order in the bootstrapper). + InternalIndex kExecIndex(JSRegExp::kExecFunctionDescriptorIndex); DCHECK_EQ(*(isolate->factory()->exec_string()), - proto_map.instance_descriptors().GetKey( - JSRegExp::kExecFunctionDescriptorIndex)); - if (proto_map.instance_descriptors() - .GetDetails(JSRegExp::kExecFunctionDescriptorIndex) - .constness() != PropertyConstness::kConst) { + proto_map.instance_descriptors().GetKey(kExecIndex)); + if (proto_map.instance_descriptors().GetDetails(kExecIndex).constness() != + PropertyConstness::kConst) { return false; } diff --git a/chromium/v8/src/regexp/regexp.cc b/chromium/v8/src/regexp/regexp.cc index e0bc4b8e323..a4ab48ed0ed 100644 --- a/chromium/v8/src/regexp/regexp.cc +++ b/chromium/v8/src/regexp/regexp.cc @@ -9,6 +9,7 @@ #include "src/heap/heap-inl.h" #include "src/objects/js-regexp-inl.h" #include "src/regexp/regexp-bytecode-generator.h" +#include "src/regexp/regexp-bytecodes.h" #include "src/regexp/regexp-compiler.h" #include "src/regexp/regexp-dotprinter.h" #include "src/regexp/regexp-interpreter.h" @@ -574,7 +575,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp, // match. // We need to reset the tier up to start over with compilation. if (FLAG_regexp_tier_up) { - regexp->ResetTierUp(); + regexp->ResetLastTierUpTick(); } is_one_byte = String::IsOneByteRepresentationUnderneath(*subject); EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte); @@ -600,6 +601,20 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec( } #endif + // For very long subject strings, the regexp interpreter is currently much + // slower than the jitted code execution. If the tier-up strategy is turned + // on, we want to avoid this performance penalty so we eagerly tier-up if the + // subject string length is equal or greater than the given heuristic value. + if (FLAG_regexp_tier_up && + subject->length() >= JSRegExp::kTierUpForSubjectLengthValue) { + regexp->MarkTierUpForNextExec(); + if (FLAG_trace_regexp_tier_up) { + PrintF( + "Forcing tier-up for very long strings in " + "RegExpImpl::IrregexpExec\n"); + } + } + // Prepare space for the return values. int required_registers = RegExp::IrregexpPrepare(isolate, regexp, subject); if (required_registers < 0) { @@ -860,14 +875,15 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, OFStream os(trace_scope.file()); Handle<Code> c(Code::cast(result.code), isolate); auto pattern_cstring = pattern->ToCString(); - c->Disassemble(pattern_cstring.get(), os); + c->Disassemble(pattern_cstring.get(), os, isolate); } #endif if (FLAG_print_regexp_bytecode && data->compilation_target == RegExpCompilationTarget::kBytecode) { Handle<ByteArray> bytecode(ByteArray::cast(result.code), isolate); auto pattern_cstring = pattern->ToCString(); - IrregexpInterpreter::Disassemble(*bytecode, pattern_cstring.get()); + RegExpBytecodeDisassemble(bytecode->GetDataStartAddress(), + bytecode->length(), pattern_cstring.get()); } } diff --git a/chromium/v8/src/regexp/regexp.h b/chromium/v8/src/regexp/regexp.h index 8ccc9789a30..6625b063bcf 100644 --- a/chromium/v8/src/regexp/regexp.h +++ b/chromium/v8/src/regexp/regexp.h @@ -55,10 +55,7 @@ struct RegExpCompileData { class RegExp final : public AllStatic { public: - // Whether the irregexp engine generates native code or interpreter bytecode. - static bool CanGenerateNativeCode() { - return !FLAG_regexp_interpret_all || FLAG_regexp_tier_up; - } + // Whether the irregexp engine generates interpreter bytecode. static bool CanGenerateBytecode() { return FLAG_regexp_interpret_all || FLAG_regexp_tier_up; } diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc index d4144e7e640..853d8b2815c 100644 --- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc +++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc @@ -38,7 +38,10 @@ namespace internal { * The remaining registers are free for computations. * Each call to a public method should retain this convention. * - * The stack will have the following structure: + * The stack will have the following structure + * - fp[112] Address regexp (address of the JSRegExp object; unused in + * native code, passed to match signature of + * the interpreter) * - fp[108] Isolate* isolate (address of the current isolate) * - fp[104] direct_call (if 1, direct call from JavaScript code, * if 0, call through the runtime system). @@ -85,7 +88,8 @@ namespace internal { * int num_capture_registers, * byte* stack_area_base, * bool direct_call = false, - * Isolate* isolate); + * Isolate* isolate, + * Address regexp); * The call is performed by NativeRegExpMacroAssembler::Execute() * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. */ @@ -204,7 +208,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) { Label backtrack_non_equal; __ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0)); __ bne(&backtrack_non_equal); - __ AddP(backtrack_stackpointer(), Operand(kPointerSize)); + __ AddP(backtrack_stackpointer(), Operand(kSystemPointerSize)); BranchOrBacktrack(al, on_equal); __ bind(&backtrack_non_equal); @@ -635,11 +639,11 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { // Requires us to save the callee-preserved registers r6-r13 // General convention is to also save r14 (return addr) and // sp/r15 as well in a single STM/STMG - __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize)); + __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize)); // Load stack parameters from caller stack frame - __ LoadMultipleP(r7, r9, - MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ LoadMultipleP( + r7, r9, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize)); // r7 = capture array size // r8 = stack area base // r9 = direct call @@ -654,7 +658,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ LoadRR(frame_pointer(), sp); - __ lay(sp, MemOperand(sp, -10 * kPointerSize)); + __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize)); __ mov(r1, Operand::Zero()); // success counter __ LoadRR(r0, r1); // offset of location __ StoreMultipleP(r0, r9, MemOperand(sp, 0)); @@ -672,7 +676,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { __ ble(&stack_limit_hit); // Check if there is room for the variable number of registers above // the stack limit. - __ CmpLogicalP(r2, Operand(num_registers_ * kPointerSize)); + __ CmpLogicalP(r2, Operand(num_registers_ * kSystemPointerSize)); __ bge(&stack_ok); // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. @@ -688,7 +692,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { __ bind(&stack_ok); // Allocate space on stack for registers. - __ lay(sp, MemOperand(sp, (-num_registers_ * kPointerSize))); + __ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize))); // Load string end. __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); // Load input start. @@ -731,12 +735,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { // Fill saved registers with initial value = start offset - 1 if (num_saved_registers_ > 8) { // One slot beyond address of register 0. - __ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize)); + __ lay(r3, + MemOperand(frame_pointer(), kRegisterZero + kSystemPointerSize)); __ Load(r4, Operand(num_saved_registers_)); Label init_loop; __ bind(&init_loop); - __ StoreP(r1, MemOperand(r3, -kPointerSize)); - __ lay(r3, MemOperand(r3, -kPointerSize)); + __ StoreP(r1, MemOperand(r3, -kSystemPointerSize)); + __ lay(r3, MemOperand(r3, -kSystemPointerSize)); __ BranchOnCount(r4, &init_loop); } else { for (int i = 0; i < num_saved_registers_; i++) { @@ -871,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) { // Skip sp past regexp registers and local variables.. __ LoadRR(sp, frame_pointer()); // Restore registers r6..r15. - __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize)); + __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize)); __ b(r14); @@ -1087,17 +1092,19 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) { // Code of self. __ mov(r3, Operand(masm_->CodeObject())); // r2 becomes return address pointer. - __ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize)); + __ lay(r2, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize)); ExternalReference stack_guard_check = ExternalReference::re_check_stack_guard_state(isolate()); __ mov(ip, Operand(stack_guard_check)); __ StoreReturnAddressAndCall(ip); - if (base::OS::ActivationFrameAlignment() > kPointerSize) { - __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize))); + if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { + __ LoadP( + sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize))); } else { - __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize))); + __ la(sp, + MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize))); } __ mov(code_pointer(), Operand(masm_->CodeObject())); @@ -1106,7 +1113,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) { // Helper function for reading a value out of a stack frame. template <typename T> static T& frame_entry(Address re_frame, int frame_offset) { - DCHECK_EQ(kPointerSize, sizeof(T)); + DCHECK_EQ(kSystemPointerSize, sizeof(T)); #ifdef V8_TARGET_ARCH_S390X return reinterpret_cast<T&>(Memory<uint64_t>(re_frame + frame_offset)); #else @@ -1140,7 +1147,7 @@ MemOperand RegExpMacroAssemblerS390::register_location(int register_index) { num_registers_ = register_index + 1; } return MemOperand(frame_pointer(), - kRegisterZero - register_index * kPointerSize); + kRegisterZero - register_index * kSystemPointerSize); } void RegExpMacroAssemblerS390::CheckPosition(int cp_offset, @@ -1200,7 +1207,7 @@ void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) { void RegExpMacroAssemblerS390::Push(Register source) { DCHECK(source != backtrack_stackpointer()); __ lay(backtrack_stackpointer(), - MemOperand(backtrack_stackpointer(), -kPointerSize)); + MemOperand(backtrack_stackpointer(), -kSystemPointerSize)); __ StoreP(source, MemOperand(backtrack_stackpointer())); } @@ -1208,7 +1215,7 @@ void RegExpMacroAssemblerS390::Pop(Register target) { DCHECK(target != backtrack_stackpointer()); __ LoadP(target, MemOperand(backtrack_stackpointer())); __ la(backtrack_stackpointer(), - MemOperand(backtrack_stackpointer(), kPointerSize)); + MemOperand(backtrack_stackpointer(), kSystemPointerSize)); } void RegExpMacroAssemblerS390::CheckPreemption() { @@ -1235,13 +1242,15 @@ void RegExpMacroAssemblerS390::CallCFunctionUsingStub( __ mov(code_pointer(), Operand(function)); Label ret; __ larl(r14, &ret); - __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize)); + __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize)); __ b(code_pointer()); __ bind(&ret); - if (base::OS::ActivationFrameAlignment() > kPointerSize) { - __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize))); + if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { + __ LoadP( + sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize))); } else { - __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize))); + __ la(sp, + MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize))); } __ mov(code_pointer(), Operand(masm_->CodeObject())); } diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h index 3a6a915263c..4811ac7382a 100644 --- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h +++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h @@ -95,26 +95,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390 kStoredRegisters + kCalleeRegisterSaveAreaSize; // Stack parameters placed by caller. static const int kCaptureArraySize = kCallerFrame; - static const int kStackAreaBase = kCallerFrame + kPointerSize; + static const int kStackAreaBase = kCallerFrame + kSystemPointerSize; // kDirectCall again - static const int kIsolate = kStackAreaBase + 2 * kPointerSize; + static const int kIsolate = kStackAreaBase + 2 * kSystemPointerSize; // Below the frame pointer. // Register parameters stored by setup code. - static const int kDirectCall = kFramePointer - kPointerSize; - static const int kStackHighEnd = kDirectCall - kPointerSize; - static const int kNumOutputRegisters = kStackHighEnd - kPointerSize; - static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; - static const int kInputEnd = kRegisterOutput - kPointerSize; - static const int kInputStart = kInputEnd - kPointerSize; - static const int kStartIndex = kInputStart - kPointerSize; - static const int kInputString = kStartIndex - kPointerSize; + static const int kDirectCall = kFramePointer - kSystemPointerSize; + static const int kStackHighEnd = kDirectCall - kSystemPointerSize; + static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize; + static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize; + static const int kInputEnd = kRegisterOutput - kSystemPointerSize; + static const int kInputStart = kInputEnd - kSystemPointerSize; + static const int kStartIndex = kInputStart - kSystemPointerSize; + static const int kInputString = kStartIndex - kSystemPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kSuccessfulCaptures = kInputString - kPointerSize; - static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; + static const int kSuccessfulCaptures = kInputString - kSystemPointerSize; + static const int kStringStartMinusOne = + kSuccessfulCaptures - kSystemPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kStringStartMinusOne - kPointerSize; + static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize; // Initial size of code buffer. static const int kRegExpCodeSize = 1024; diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc index 42ba13c4ee8..4352c3f67f3 100644 --- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc +++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc @@ -296,7 +296,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( } else { DCHECK(mode_ == UC16); // Save important/volatile registers before calling C function. -#ifndef _WIN64 +#ifndef V8_TARGET_OS_WIN // Caller save on Linux and callee save in Windows. __ pushq(rsi); __ pushq(rdi); @@ -311,7 +311,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Address byte_offset2 - Address of current character position. // size_t byte_length - length of capture in bytes(!) // Isolate* isolate or 0 if unicode flag. -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN DCHECK(rcx == arg_reg_1); DCHECK(rdx == arg_reg_2); // Compute and set byte_offset1 (start of capture). @@ -333,7 +333,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( if (read_backward) { __ subq(rsi, rbx); } -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN // Set byte_length. __ movq(arg_reg_3, rbx); @@ -358,7 +358,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Restore original values before reacting on result value. __ Move(code_object_pointer(), masm_.CodeObject()); __ popq(backtrack_stackpointer()); -#ifndef _WIN64 +#ifndef V8_TARGET_OS_WIN __ popq(rdi); __ popq(rsi); #endif @@ -683,7 +683,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ movq(rbp, rsp); // Save parameters and callee-save registers. Order here should correspond // to order of kBackup_ebx etc. -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots. // Store register parameters in pre-allocated stack slots, __ movq(Operand(rbp, kInputString), rcx); @@ -890,7 +890,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { } __ bind(&return_rax); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Restore callee save registers. __ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister)); __ popq(rbx); @@ -943,7 +943,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // Reached if the backtrack-stack limit has been hit. // Save registers before calling C function -#ifndef _WIN64 +#ifndef V8_TARGET_OS_WIN // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI. __ pushq(rsi); __ pushq(rdi); @@ -952,7 +952,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // Call GrowStack(backtrack_stackpointer()) static const int num_arguments = 3; __ PrepareCallCFunction(num_arguments); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Microsoft passes parameters in rcx, rdx, r8. // First argument, backtrack stackpointer, is already in rcx. __ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument @@ -974,7 +974,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ movq(backtrack_stackpointer(), rax); // Restore saved registers and continue. __ Move(code_object_pointer(), masm_.CodeObject()); -#ifndef _WIN64 +#ifndef V8_TARGET_OS_WIN __ popq(rdi); __ popq(rsi); #endif @@ -1159,7 +1159,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { // store anything volatile in a C call or overwritten by this function. static const int num_arguments = 3; __ PrepareCallCFunction(num_arguments); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Second argument: Code of self. (Do this before overwriting r8). __ movq(rdx, code_object_pointer()); // Third argument: RegExp code frame pointer. diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h index 9d011dcd467..1d95a2718fb 100644 --- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h +++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h @@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 static const int kReturn_eip = kFramePointer + kSystemPointerSize; static const int kFrameAlign = kReturn_eip + kSystemPointerSize; -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Parameters (first four passed as registers, but with room on stack). // In Microsoft 64-bit Calling Convention, there is room on the callers // stack (before the return address) to spill parameter registers. We @@ -131,7 +131,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64 static const int kIsolate = kDirectCall + kSystemPointerSize; #endif -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Microsoft calling convention has three callee-saved registers // (that we are using). We push these after the frame pointer. static const int kBackup_rsi = kFramePointer - kSystemPointerSize; diff --git a/chromium/v8/src/roots/roots.h b/chromium/v8/src/roots/roots.h index c82ec6d04f8..c262f639282 100644 --- a/chromium/v8/src/roots/roots.h +++ b/chromium/v8/src/roots/roots.h @@ -206,37 +206,37 @@ class Symbol; // Mutable roots that are known to be immortal immovable, for which we can // safely skip write barriers. -#define STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \ - ACCESSOR_INFO_ROOT_LIST(V) \ - /* Maps */ \ - V(Map, external_map, ExternalMap) \ - V(Map, message_object_map, JSMessageObjectMap) \ - /* Canonical empty values */ \ - V(Script, empty_script, EmptyScript) \ - V(FeedbackCell, many_closures_cell, ManyClosuresCell) \ - V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \ - /* Protectors */ \ - V(Cell, array_constructor_protector, ArrayConstructorProtector) \ - V(PropertyCell, no_elements_protector, NoElementsProtector) \ - V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \ - V(PropertyCell, array_species_protector, ArraySpeciesProtector) \ - V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \ - V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \ - V(Cell, string_length_protector, StringLengthProtector) \ - V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \ - V(PropertyCell, array_buffer_detaching_protector, \ - ArrayBufferDetachingProtector) \ - V(PropertyCell, promise_hook_protector, PromiseHookProtector) \ - V(Cell, promise_resolve_protector, PromiseResolveProtector) \ - V(PropertyCell, map_iterator_protector, MapIteratorProtector) \ - V(PropertyCell, promise_then_protector, PromiseThenProtector) \ - V(PropertyCell, set_iterator_protector, SetIteratorProtector) \ - V(PropertyCell, string_iterator_protector, StringIteratorProtector) \ - /* Caches */ \ - V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ - V(FixedArray, string_split_cache, StringSplitCache) \ - V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ - /* Indirection lists for isolate-independent builtins */ \ +#define STRONG_MUTABLE_IMMOVABLE_ROOT_LIST(V) \ + ACCESSOR_INFO_ROOT_LIST(V) \ + /* Maps */ \ + V(Map, external_map, ExternalMap) \ + V(Map, message_object_map, JSMessageObjectMap) \ + /* Canonical empty values */ \ + V(Script, empty_script, EmptyScript) \ + V(FeedbackCell, many_closures_cell, ManyClosuresCell) \ + V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \ + /* Protectors */ \ + V(PropertyCell, array_constructor_protector, ArrayConstructorProtector) \ + V(PropertyCell, no_elements_protector, NoElementsProtector) \ + V(PropertyCell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \ + V(PropertyCell, array_species_protector, ArraySpeciesProtector) \ + V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \ + V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \ + V(PropertyCell, string_length_protector, StringLengthProtector) \ + V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \ + V(PropertyCell, array_buffer_detaching_protector, \ + ArrayBufferDetachingProtector) \ + V(PropertyCell, promise_hook_protector, PromiseHookProtector) \ + V(PropertyCell, promise_resolve_protector, PromiseResolveProtector) \ + V(PropertyCell, map_iterator_protector, MapIteratorProtector) \ + V(PropertyCell, promise_then_protector, PromiseThenProtector) \ + V(PropertyCell, set_iterator_protector, SetIteratorProtector) \ + V(PropertyCell, string_iterator_protector, StringIteratorProtector) \ + /* Caches */ \ + V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ + V(FixedArray, string_split_cache, StringSplitCache) \ + V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ + /* Indirection lists for isolate-independent builtins */ \ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) // These root references can be updated by the mutator. @@ -255,8 +255,6 @@ class Symbol; /* Feedback vectors that we need for code coverage or type profile */ \ V(Object, feedback_vectors_for_profiling_tools, \ FeedbackVectorsForProfilingTools) \ - V(WeakArrayList, noscript_shared_function_infos, \ - NoScriptSharedFunctionInfos) \ V(FixedArray, serialized_objects, SerializedObjects) \ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \ V(TemplateList, message_listeners, MessageListeners) \ @@ -268,7 +266,9 @@ class Symbol; V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \ V(HeapObject, interpreter_entry_trampoline_for_profiling, \ InterpreterEntryTrampolineForProfiling) \ - V(Object, pending_optimize_for_test_bytecode, PendingOptimizeForTestBytecode) + V(Object, pending_optimize_for_test_bytecode, \ + PendingOptimizeForTestBytecode) \ + V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) // Entries in this list are limited to Smis and are not visited during GC. #define SMI_ROOT_LIST(V) \ diff --git a/chromium/v8/src/runtime/runtime-array.cc b/chromium/v8/src/runtime/runtime-array.cc index 6190b16cff1..34a8b2b9378 100644 --- a/chromium/v8/src/runtime/runtime-array.cc +++ b/chromium/v8/src/runtime/runtime-array.cc @@ -5,6 +5,7 @@ #include "src/debug/debug.h" #include "src/execution/arguments-inl.h" #include "src/execution/isolate-inl.h" +#include "src/execution/protectors-inl.h" #include "src/heap/factory.h" #include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop. #include "src/heap/heap-write-barrier-inl.h" @@ -136,8 +137,8 @@ RUNTIME_FUNCTION(Runtime_NewArray) { // just flip the bit on the global protector cell instead. // TODO(bmeurer): Find a better way to mark this. Global protectors // tend to back-fire over time... - if (isolate->IsArrayConstructorIntact()) { - isolate->InvalidateArrayConstructorProtector(); + if (Protectors::IsArrayConstructorIntact(isolate)) { + Protectors::InvalidateArrayConstructor(isolate); } } } diff --git a/chromium/v8/src/runtime/runtime-classes.cc b/chromium/v8/src/runtime/runtime-classes.cc index 522e93da3f2..a4e9680a1f5 100644 --- a/chromium/v8/src/runtime/runtime-classes.cc +++ b/chromium/v8/src/runtime/runtime-classes.cc @@ -130,7 +130,8 @@ Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) { inline void SetHomeObject(Isolate* isolate, JSFunction method, JSObject home_object) { if (method.shared().needs_home_object()) { - const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex; + const InternalIndex kPropertyIndex( + JSFunction::kMaybeHomeObjectDescriptorIndex); CHECK_EQ(method.map().instance_descriptors().GetKey(kPropertyIndex), ReadOnlyRoots(isolate).home_object_symbol()); @@ -303,7 +304,7 @@ bool AddDescriptorsByTemplate( // Count the number of properties that must be in the instance and // create the property array to hold the constants. int count = 0; - for (int i = 0; i < nof_descriptors; i++) { + for (InternalIndex i : InternalIndex::Range(nof_descriptors)) { PropertyDetails details = descriptors_template->GetDetails(i); if (details.location() == kDescriptor && details.kind() == kData) { count++; @@ -315,7 +316,7 @@ bool AddDescriptorsByTemplate( // Read values from |descriptors_template| and store possibly post-processed // values into "instantiated" |descriptors| array. int field_index = 0; - for (int i = 0; i < nof_descriptors; i++) { + for (InternalIndex i : InternalIndex::Range(nof_descriptors)) { Object value = descriptors_template->GetStrongValue(i); if (value.IsAccessorPair()) { Handle<AccessorPair> pair = AccessorPair::Copy( diff --git a/chromium/v8/src/runtime/runtime-compiler.cc b/chromium/v8/src/runtime/runtime-compiler.cc index 4364c55775e..c7f3201eacd 100644 --- a/chromium/v8/src/runtime/runtime-compiler.cc +++ b/chromium/v8/src/runtime/runtime-compiler.cc @@ -157,6 +157,9 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) { TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); TRACE_EVENT0("v8", "V8.DeoptimizeCode"); Handle<JSFunction> function = deoptimizer->function(); + // For OSR the optimized code isn't installed on the function, so get the + // code object from deoptimizer. + Handle<Code> optimized_code = deoptimizer->compiled_code(); DeoptimizeKind type = deoptimizer->deopt_kind(); // TODO(turbofan): We currently need the native context to materialize @@ -174,7 +177,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) { // Invalidate the underlying optimized code on non-lazy deopts. if (type != DeoptimizeKind::kLazy) { - Deoptimizer::DeoptimizeFunction(*function); + Deoptimizer::DeoptimizeFunction(*function, *optimized_code); } return ReadOnlyRoots(isolate).undefined_value(); @@ -224,8 +227,7 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) { RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { HandleScope scope(isolate); - DCHECK_EQ(1, args.length()); - CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + DCHECK_EQ(0, args.length()); // Only reachable when OST is enabled. CHECK(FLAG_use_osr); @@ -233,7 +235,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { // Determine frame triggering OSR request. JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = it.frame(); - DCHECK_EQ(frame->function(), *function); DCHECK(frame->is_interpreted()); // Determine the entry point for which this OSR request has been fired and @@ -242,6 +243,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { DCHECK(!ast_id.IsNone()); MaybeHandle<Code> maybe_result; + Handle<JSFunction> function(frame->function(), isolate); if (IsSuitableForOnStackReplacement(isolate, function)) { if (FLAG_trace_osr) { PrintF("[OSR - Compiling: "); diff --git a/chromium/v8/src/runtime/runtime-debug.cc b/chromium/v8/src/runtime/runtime-debug.cc index 0fbea6a193a..09dd4f8132d 100644 --- a/chromium/v8/src/runtime/runtime-debug.cc +++ b/chromium/v8/src/runtime/runtime-debug.cc @@ -491,8 +491,7 @@ int ScriptLinePosition(Handle<Script> script, int line) { if (line < 0) return -1; if (script->type() == Script::TYPE_WASM) { - return WasmModuleObject::cast(script->wasm_module_object()) - .GetFunctionOffset(line); + return GetWasmFunctionOffset(script->wasm_native_module()->module(), line); } Script::InitLineEnds(script); @@ -827,19 +826,6 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) { return ReadOnlyRoots(isolate).undefined_value(); } -RUNTIME_FUNCTION(Runtime_PerformSideEffectCheckForObject) { - HandleScope scope(isolate); - DCHECK_EQ(1, args.length()); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0); - - DCHECK_EQ(isolate->debug_execution_mode(), DebugInfo::kSideEffects); - if (!isolate->debug()->PerformSideEffectCheckForObject(object)) { - DCHECK(isolate->has_pending_exception()); - return ReadOnlyRoots(isolate).exception(); - } - return ReadOnlyRoots(isolate).undefined_value(); -} - RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) { HandleScope scope(isolate); DCHECK_EQ(0, args.length()); diff --git a/chromium/v8/src/runtime/runtime-forin.cc b/chromium/v8/src/runtime/runtime-forin.cc index 6042a867c92..0d7e1dc30b3 100644 --- a/chromium/v8/src/runtime/runtime-forin.cc +++ b/chromium/v8/src/runtime/runtime-forin.cc @@ -33,7 +33,10 @@ MaybeHandle<HeapObject> Enumerate(Isolate* isolate, if (!accumulator.is_receiver_simple_enum()) { Handle<FixedArray> keys; ASSIGN_RETURN_ON_EXCEPTION( - isolate, keys, accumulator.GetKeys(GetKeysConversion::kConvertToString), + isolate, keys, + accumulator.GetKeys(accumulator.may_have_elements() + ? GetKeysConversion::kConvertToString + : GetKeysConversion::kNoNumbers), HeapObject); // Test again, since cache may have been built by GetKeys() calls above. if (!accumulator.is_receiver_simple_enum()) return keys; diff --git a/chromium/v8/src/runtime/runtime-internal.cc b/chromium/v8/src/runtime/runtime-internal.cc index 80f9baa48d3..03c9e582d80 100644 --- a/chromium/v8/src/runtime/runtime-internal.cc +++ b/chromium/v8/src/runtime/runtime-internal.cc @@ -14,6 +14,7 @@ #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" #include "src/execution/messages.h" +#include "src/execution/runtime-profiler.h" #include "src/handles/maybe-handles.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" @@ -296,10 +297,11 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) { function->feedback_vector().set_invocation_count(1); return ReadOnlyRoots(isolate).undefined_value(); } - // Handle interrupts. { SealHandleScope shs(isolate); - return isolate->stack_guard()->HandleInterrupts(); + isolate->counters()->runtime_profiler_ticks()->Increment(); + isolate->runtime_profiler()->MarkCandidatesForOptimization(); + return ReadOnlyRoots(isolate).undefined_value(); } } diff --git a/chromium/v8/src/runtime/runtime-literals.cc b/chromium/v8/src/runtime/runtime-literals.cc index 0ffc6e932ef..497a27dbb90 100644 --- a/chromium/v8/src/runtime/runtime-literals.cc +++ b/chromium/v8/src/runtime/runtime-literals.cc @@ -111,8 +111,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk( if (copy->HasFastProperties(isolate)) { Handle<DescriptorArray> descriptors( copy->map(isolate).instance_descriptors(isolate), isolate); - int limit = copy->map(isolate).NumberOfOwnDescriptors(); - for (int i = 0; i < limit; i++) { + for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) { PropertyDetails details = descriptors->GetDetails(i); DCHECK_EQ(kField, details.location()); DCHECK_EQ(kData, details.kind()); @@ -595,10 +594,11 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) { CONVERT_SMI_ARG_CHECKED(literals_index, 1); CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); - Handle<FeedbackVector> vector = Handle<FeedbackVector>(); - if (!maybe_vector->IsUndefined()) { - DCHECK(maybe_vector->IsFeedbackVector()); + Handle<FeedbackVector> vector; + if (maybe_vector->IsFeedbackVector()) { vector = Handle<FeedbackVector>::cast(maybe_vector); + } else { + DCHECK(maybe_vector->IsUndefined()); } RETURN_RESULT_OR_FAILURE( isolate, CreateLiteral<ObjectLiteralHelper>( @@ -632,10 +632,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) { CONVERT_SMI_ARG_CHECKED(literals_index, 1); CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); - Handle<FeedbackVector> vector = Handle<FeedbackVector>(); - if (!maybe_vector->IsUndefined()) { - DCHECK(maybe_vector->IsFeedbackVector()); + Handle<FeedbackVector> vector; + if (maybe_vector->IsFeedbackVector()) { vector = Handle<FeedbackVector>::cast(maybe_vector); + } else { + DCHECK(maybe_vector->IsUndefined()); } RETURN_RESULT_OR_FAILURE( isolate, CreateLiteral<ArrayLiteralHelper>( @@ -649,11 +650,12 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) { CONVERT_SMI_ARG_CHECKED(index, 1); CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); - FeedbackSlot literal_slot(FeedbackVector::ToSlot(index)); - Handle<FeedbackVector> vector = Handle<FeedbackVector>(); - if (!maybe_vector->IsUndefined()) { - DCHECK(maybe_vector->IsFeedbackVector()); + + Handle<FeedbackVector> vector; + if (maybe_vector->IsFeedbackVector()) { vector = Handle<FeedbackVector>::cast(maybe_vector); + } else { + DCHECK(maybe_vector->IsUndefined()); } if (vector.is_null()) { Handle<JSRegExp> new_regexp; @@ -663,20 +665,21 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) { return *new_regexp; } - // Check if boilerplate exists. If not, create it first. - Handle<JSRegExp> boilerplate; + // This function assumes that the boilerplate does not yet exist. + FeedbackSlot literal_slot(FeedbackVector::ToSlot(index)); Handle<Object> literal_site(vector->Get(literal_slot)->cast<Object>(), isolate); - if (!HasBoilerplate(literal_site)) { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, boilerplate, - JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags))); - if (IsUninitializedLiteralSite(*literal_site)) { - PreInitializeLiteralSite(vector, literal_slot); - return *boilerplate; - } - vector->Set(literal_slot, *boilerplate); + CHECK(!HasBoilerplate(literal_site)); + + Handle<JSRegExp> boilerplate; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, boilerplate, + JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags))); + if (IsUninitializedLiteralSite(*literal_site)) { + PreInitializeLiteralSite(vector, literal_slot); + return *boilerplate; } + vector->Set(literal_slot, *boilerplate); return *JSRegExp::Copy(boilerplate); } diff --git a/chromium/v8/src/runtime/runtime-object.cc b/chromium/v8/src/runtime/runtime-object.cc index 310cdaab420..e07d91dd314 100644 --- a/chromium/v8/src/runtime/runtime-object.cc +++ b/chromium/v8/src/runtime/runtime-object.cc @@ -91,7 +91,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, // (2) The property to be deleted must be the last property. int nof = receiver_map->NumberOfOwnDescriptors(); if (nof == 0) return false; - int descriptor = nof - 1; + InternalIndex descriptor(nof - 1); Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(), isolate); if (descriptors->GetKey(descriptor) != *key) return false; @@ -132,8 +132,12 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, // for properties stored in the descriptor array. if (details.location() == kField) { DisallowHeapAllocation no_allocation; - isolate->heap()->NotifyObjectLayoutChange( - *receiver, receiver_map->instance_size(), no_allocation); + + // Invalidate slots manually later in case we delete an in-object tagged + // property. In this case we might later store an untagged value in the + // recorded slot. + isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation, + InvalidateRecordedSlots::kNo); FieldIndex index = FieldIndex::ForPropertyIndex(*receiver_map, details.field_index()); // Special case deleting the last out-of object property. @@ -149,8 +153,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver, // Slot clearing is the reason why this entire function cannot currently // be implemented in the DeleteProperty stub. if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) { + // We need to clear the recorded slot in this case because in-object + // slack tracking might not be finished. This ensures that we don't + // have recorded slots in free space. isolate->heap()->ClearRecordedSlot(*receiver, receiver->RawField(index.offset())); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver); + chunk->InvalidateRecordedSlots(*receiver); } } } diff --git a/chromium/v8/src/runtime/runtime-regexp.cc b/chromium/v8/src/runtime/runtime-regexp.cc index e197e16e112..980339ee5e5 100644 --- a/chromium/v8/src/runtime/runtime-regexp.cc +++ b/chromium/v8/src/runtime/runtime-regexp.cc @@ -613,20 +613,6 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString( JSRegExp::Type typeTag = regexp->TypeTag(); if (typeTag == JSRegExp::IRREGEXP) { - // Force tier up to native code for global replaces. The global replace is - // implemented differently for native code and bytecode execution, where the - // native code expects an array to store all the matches, and the bytecode - // matches one at a time, so it's easier to tier-up to native code from the - // start. - if (FLAG_regexp_tier_up) { - regexp->MarkTierUpForNextExec(); - if (FLAG_trace_regexp_tier_up) { - PrintF( - "Forcing tier-up of JSRegExp object %p in " - "StringReplaceGlobalRegExpWithString\n", - reinterpret_cast<void*>(regexp->ptr())); - } - } // Ensure the RegExp is compiled so we can access the capture-name map. if (RegExp::IrregexpPrepare(isolate, regexp, subject) == -1) { DCHECK(isolate->has_pending_exception()); @@ -1349,6 +1335,19 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace( RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0), String); + // Force tier up to native code for global replaces. The global replace is + // implemented differently for native code and bytecode execution, where the + // native code expects an array to store all the matches, and the bytecode + // matches one at a time, so it's easier to tier-up to native code from the + // start. + if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) { + regexp->MarkTierUpForNextExec(); + if (FLAG_trace_regexp_tier_up) { + PrintF("Forcing tier-up of JSRegExp object %p in RegExpReplace\n", + reinterpret_cast<void*>(regexp->ptr())); + } + } + if (replace->length() == 0) { if (string->IsOneByteRepresentation()) { Object result = diff --git a/chromium/v8/src/runtime/runtime-test.cc b/chromium/v8/src/runtime/runtime-test.cc index a766dd5db29..a58b28ce52f 100644 --- a/chromium/v8/src/runtime/runtime-test.cc +++ b/chromium/v8/src/runtime/runtime-test.cc @@ -32,6 +32,7 @@ #include "src/utils/ostreams.h" #include "src/wasm/memory-tracing.h" #include "src/wasm/module-compiler.h" +#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" @@ -676,6 +677,47 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) { return ReadOnlyRoots(isolate).undefined_value(); } +namespace { + +int FixedArrayLenFromSize(int size) { + return Min((size - FixedArray::kHeaderSize) / kTaggedSize, + FixedArray::kMaxRegularLength); +} + +void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) { + NewSpace* space = heap->new_space(); + int space_remaining = static_cast<int>(*space->allocation_limit_address() - + *space->allocation_top_address()); + while (space_remaining > 0) { + int length = FixedArrayLenFromSize(space_remaining); + if (length > 0) { + Handle<FixedArray> padding = + isolate->factory()->NewFixedArray(length, AllocationType::kYoung); + DCHECK(heap->new_space()->Contains(*padding)); + space_remaining -= padding->Size(); + } else { + // Not enough room to create another fixed array. Create a filler. + heap->CreateFillerObjectAt(*heap->new_space()->allocation_top_address(), + space_remaining, ClearRecordedSlots::kNo); + break; + } + } +} + +} // namespace + +RUNTIME_FUNCTION(Runtime_SimulateNewspaceFull) { + HandleScope scope(isolate); + Heap* heap = isolate->heap(); + NewSpace* space = heap->new_space(); + PauseAllocationObserversScope pause_observers(heap); + AlwaysAllocateScope always_allocate(heap); + do { + FillUpOneNewSpacePage(isolate, heap); + } while (space->AddFreshPage()); + + return ReadOnlyRoots(isolate).undefined_value(); +} RUNTIME_FUNCTION(Runtime_DebugPrint) { SealHandleScope shs(isolate); @@ -1008,7 +1050,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) { RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) { HandleScope scope(isolate); DCHECK_EQ(2, args.length()); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0); + CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0); CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1); Handle<Object> tag = WasmExceptionPackage::GetExceptionTag(isolate, exception); @@ -1024,7 +1066,7 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) { RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) { HandleScope scope(isolate); DCHECK_EQ(1, args.length()); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, exception, 0); + CONVERT_ARG_HANDLE_CHECKED(WasmExceptionPackage, exception, 0); Handle<Object> values_obj = WasmExceptionPackage::GetExceptionValues(isolate, exception); CHECK(values_obj->IsFixedArray()); // Only called with correct input. @@ -1107,20 +1149,22 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesProtector) { RUNTIME_FUNCTION(Runtime_MapIteratorProtector) { SealHandleScope shs(isolate); DCHECK_EQ(0, args.length()); - return isolate->heap()->ToBoolean(isolate->IsMapIteratorLookupChainIntact()); + return isolate->heap()->ToBoolean( + Protectors::IsMapIteratorLookupChainIntact(isolate)); } RUNTIME_FUNCTION(Runtime_SetIteratorProtector) { SealHandleScope shs(isolate); DCHECK_EQ(0, args.length()); - return isolate->heap()->ToBoolean(isolate->IsSetIteratorLookupChainIntact()); + return isolate->heap()->ToBoolean( + Protectors::IsSetIteratorLookupChainIntact(isolate)); } RUNTIME_FUNCTION(Runtime_StringIteratorProtector) { SealHandleScope shs(isolate); DCHECK_EQ(0, args.length()); return isolate->heap()->ToBoolean( - isolate->IsStringIteratorLookupChainIntact()); + Protectors::IsStringIteratorLookupChainIntact(isolate)); } // Take a compiled wasm module and serialize it into an array buffer, which is @@ -1132,17 +1176,22 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) { wasm::NativeModule* native_module = module_obj->native_module(); wasm::WasmSerializer wasm_serializer(native_module); - size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize(); - void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size); - Handle<JSArrayBuffer> array_buffer = - isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size); - if (!array_data || - !wasm_serializer.SerializeNativeModule( - {reinterpret_cast<uint8_t*>(array_data), compiled_size})) { - return ReadOnlyRoots(isolate).undefined_value(); + size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize(); + + MaybeHandle<JSArrayBuffer> result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + byte_length, InitializedFlag::kUninitialized); + + Handle<JSArrayBuffer> array_buffer; + if (result.ToHandle(&array_buffer) && + wasm_serializer.SerializeNativeModule( + {reinterpret_cast<uint8_t*>(array_buffer->backing_store()), + byte_length})) { + return *array_buffer; } - return *array_buffer; + + // Error. Return undefined. + return ReadOnlyRoots(isolate).undefined_value(); } // Take an array buffer and attempt to reconstruct a compiled wasm module. @@ -1210,7 +1259,8 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) { DCHECK_EQ(1, args.length()); CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0); int instance_count = 0; - WeakArrayList weak_instance_list = module_obj->weak_instance_list(); + WeakArrayList weak_instance_list = + module_obj->script().wasm_weak_instance_list(); for (int i = 0; i < weak_instance_list.length(); ++i) { if (weak_instance_list.Get(i)->IsWeak()) instance_count++; } @@ -1226,6 +1276,22 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) { return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num)); } +RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) { + DCHECK_EQ(1, args.length()); + HandleScope scope(isolate); + CONVERT_ARG_HANDLE_CHECKED(JSObject, argument, 0); + Handle<WasmModuleObject> module; + if (argument->IsWasmInstanceObject()) { + module = handle(Handle<WasmInstanceObject>::cast(argument)->module_object(), + isolate); + } else if (argument->IsWasmModuleObject()) { + module = Handle<WasmModuleObject>::cast(argument); + } + size_t num_spaces = + module->native_module()->GetNumberOfCodeSpacesForTesting(); + return *isolate->factory()->NewNumberFromSize(num_spaces); +} + RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) { DCHECK_EQ(2, args.length()); HandleScope scope(isolate); diff --git a/chromium/v8/src/runtime/runtime-typedarray.cc b/chromium/v8/src/runtime/runtime-typedarray.cc index 7fab051cbf6..327c1022388 100644 --- a/chromium/v8/src/runtime/runtime-typedarray.cc +++ b/chromium/v8/src/runtime/runtime-typedarray.cc @@ -27,22 +27,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) { isolate, NewTypeError(MessageTemplate::kNotTypedArray)); } Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument); - if (!array_buffer->is_detachable()) { - return ReadOnlyRoots(isolate).undefined_value(); - } - if (array_buffer->backing_store() == nullptr) { - CHECK_EQ(0, array_buffer->byte_length()); - return ReadOnlyRoots(isolate).undefined_value(); - } - // Shared array buffers should never be detached. - CHECK(!array_buffer->is_shared()); - DCHECK(!array_buffer->is_external()); - void* backing_store = array_buffer->backing_store(); - size_t byte_length = array_buffer->byte_length(); - array_buffer->set_is_external(true); - isolate->heap()->UnregisterArrayBuffer(*array_buffer); array_buffer->Detach(); - isolate->array_buffer_allocator()->Free(backing_store, byte_length); return ReadOnlyRoots(isolate).undefined_value(); } diff --git a/chromium/v8/src/runtime/runtime-wasm.cc b/chromium/v8/src/runtime/runtime-wasm.cc index 57e59c07be7..b0153b78286 100644 --- a/chromium/v8/src/runtime/runtime-wasm.cc +++ b/chromium/v8/src/runtime/runtime-wasm.cc @@ -150,7 +150,12 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) { CONVERT_ARG_CHECKED(Object, except_obj_raw, 0); // TODO(mstarzinger): Manually box because parameters are not visited yet. Handle<Object> except_obj(except_obj_raw, isolate); - return *WasmExceptionPackage::GetExceptionTag(isolate, except_obj); + if (!except_obj->IsWasmExceptionPackage(isolate)) { + return ReadOnlyRoots(isolate).undefined_value(); + } + Handle<WasmExceptionPackage> exception = + Handle<WasmExceptionPackage>::cast(except_obj); + return *WasmExceptionPackage::GetExceptionTag(isolate, exception); } RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) { @@ -162,7 +167,12 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) { CONVERT_ARG_CHECKED(Object, except_obj_raw, 0); // TODO(mstarzinger): Manually box because parameters are not visited yet. Handle<Object> except_obj(except_obj_raw, isolate); - return *WasmExceptionPackage::GetExceptionValues(isolate, except_obj); + if (!except_obj->IsWasmExceptionPackage(isolate)) { + return ReadOnlyRoots(isolate).undefined_value(); + } + Handle<WasmExceptionPackage> exception = + Handle<WasmExceptionPackage>::cast(except_obj); + return *WasmExceptionPackage::GetExceptionValues(isolate, exception); } RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) { diff --git a/chromium/v8/src/runtime/runtime.h b/chromium/v8/src/runtime/runtime.h index d705b05752c..8319aabe2c8 100644 --- a/chromium/v8/src/runtime/runtime.h +++ b/chromium/v8/src/runtime/runtime.h @@ -99,7 +99,7 @@ namespace internal { F(WeakCollectionSet, 4, 1) #define FOR_EACH_INTRINSIC_COMPILER(F, I) \ - F(CompileForOnStackReplacement, 1, 1) \ + F(CompileForOnStackReplacement, 0, 1) \ F(CompileLazy, 1, 1) \ F(CompileOptimized_Concurrent, 1, 1) \ F(CompileOptimized_NotConcurrent, 1, 1) \ @@ -319,7 +319,6 @@ namespace internal { F(ObjectValues, 1, 1) \ F(ObjectValuesSkipFastPath, 1, 1) \ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ - F(PerformSideEffectCheckForObject, 1, 1) \ F(SetDataProperties, 2, 1) \ F(SetKeyedProperty, 3, 1) \ F(SetNamedProperty, 3, 1) \ @@ -515,6 +514,7 @@ namespace internal { F(SetWasmCompileControls, 2, 1) \ F(SetWasmInstantiateControls, 0, 1) \ F(SetWasmThreadsEnabled, 1, 1) \ + F(SimulateNewspaceFull, 0, 1) \ F(StringIteratorProtector, 0, 1) \ F(SystemBreak, 0, 1) \ F(TraceEnter, 0, 1) \ @@ -523,6 +523,7 @@ namespace internal { F(UnblockConcurrentRecompilation, 0, 1) \ F(WasmGetNumberOfInstances, 1, 1) \ F(WasmNumInterpretedCalls, 1, 1) \ + F(WasmNumCodeSpaces, 1, 1) \ F(WasmTierUpFunction, 2, 1) \ F(WasmTraceMemory, 1, 1) \ I(DeoptimizeNow, 0, 1) diff --git a/chromium/v8/src/sanitizer/OWNERS b/chromium/v8/src/sanitizer/OWNERS index 96c9d10c122..29f827d160e 100644 --- a/chromium/v8/src/sanitizer/OWNERS +++ b/chromium/v8/src/sanitizer/OWNERS @@ -1,3 +1,3 @@ file:../../INFRA_OWNERS -clemensh@chromium.org +clemensb@chromium.org diff --git a/chromium/v8/src/snapshot/deserializer.cc b/chromium/v8/src/snapshot/deserializer.cc index 25e32e2cc0b..e477817d20b 100644 --- a/chromium/v8/src/snapshot/deserializer.cc +++ b/chromium/v8/src/snapshot/deserializer.cc @@ -291,23 +291,30 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, data_view.byte_offset()); } else if (obj.IsJSTypedArray()) { JSTypedArray typed_array = JSTypedArray::cast(obj); - // Only fixup for the off-heap case. - if (!typed_array.is_on_heap()) { - Smi store_index( - reinterpret_cast<Address>(typed_array.external_pointer())); - byte* backing_store = off_heap_backing_stores_[store_index.value()] + - typed_array.byte_offset(); - typed_array.set_external_pointer(backing_store); + // Fixup typed array pointers. + if (typed_array.is_on_heap()) { + typed_array.SetOnHeapDataPtr(HeapObject::cast(typed_array.base_pointer()), + typed_array.external_pointer()); + } else { + // Serializer writes backing store ref as a DataPtr() value. + size_t store_index = reinterpret_cast<size_t>(typed_array.DataPtr()); + auto backing_store = backing_stores_[store_index]; + auto start = backing_store + ? reinterpret_cast<byte*>(backing_store->buffer_start()) + : nullptr; + typed_array.SetOffHeapDataPtr(start, typed_array.byte_offset()); } } else if (obj.IsJSArrayBuffer()) { JSArrayBuffer buffer = JSArrayBuffer::cast(obj); // Only fixup for the off-heap case. if (buffer.backing_store() != nullptr) { - Smi store_index(reinterpret_cast<Address>(buffer.backing_store())); - void* backing_store = off_heap_backing_stores_[store_index.value()]; - - buffer.set_backing_store(backing_store); - isolate_->heap()->RegisterNewArrayBuffer(buffer); + // Serializer writes backing store ref in |backing_store| field. + size_t store_index = reinterpret_cast<size_t>(buffer.backing_store()); + auto backing_store = backing_stores_[store_index]; + SharedFlag shared = backing_store && backing_store->is_shared() + ? SharedFlag::kShared + : SharedFlag::kNotShared; + buffer.Setup(shared, backing_store); } } else if (obj.IsBytecodeArray()) { // TODO(mythria): Remove these once we store the default values for these @@ -523,9 +530,10 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, // Write barrier support costs around 1% in startup time. In fact there // are no new space objects in current boot snapshots, so it's not needed, // but that may change. - bool write_barrier_needed = (current_object_address != kNullAddress && - source_space != SnapshotSpace::kNew && - source_space != SnapshotSpace::kCode); + bool write_barrier_needed = + (current_object_address != kNullAddress && + source_space != SnapshotSpace::kNew && + source_space != SnapshotSpace::kCode && !FLAG_disable_write_barriers); while (current < limit) { byte data = source_.Get(); switch (data) { @@ -669,12 +677,12 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, case kOffHeapBackingStore: { int byte_length = source_.GetInt(); - byte* backing_store = static_cast<byte*>( - isolate->array_buffer_allocator()->AllocateUninitialized( - byte_length)); + std::unique_ptr<BackingStore> backing_store = + BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared, + InitializedFlag::kUninitialized); CHECK_NOT_NULL(backing_store); - source_.CopyRaw(backing_store, byte_length); - off_heap_backing_stores_.push_back(backing_store); + source_.CopyRaw(backing_store->buffer_start(), byte_length); + backing_stores_.push_back(std::move(backing_store)); break; } @@ -842,6 +850,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current, // Don't update current pointer here as it may be needed for write barrier. Write(current, heap_object_ref); if (emit_write_barrier && write_barrier_needed) { + DCHECK_IMPLIES(FLAG_disable_write_barriers, !write_barrier_needed); HeapObject host_object = HeapObject::FromAddress(current_object_address); SLOW_DCHECK(isolate->heap()->Contains(host_object)); GenerationalBarrier(host_object, MaybeObjectSlot(current.address()), diff --git a/chromium/v8/src/snapshot/deserializer.h b/chromium/v8/src/snapshot/deserializer.h index 8dce1b3f3fe..9f66c37ac56 100644 --- a/chromium/v8/src/snapshot/deserializer.h +++ b/chromium/v8/src/snapshot/deserializer.h @@ -10,6 +10,7 @@ #include "src/objects/allocation-site.h" #include "src/objects/api-callbacks.h" +#include "src/objects/backing-store.h" #include "src/objects/code.h" #include "src/objects/js-array.h" #include "src/objects/map.h" @@ -56,7 +57,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { allocator()->DecodeReservation(data->Reservations()); // We start the indices here at 1, so that we can distinguish between an // actual index and a nullptr in a deserialized object requiring fix-up. - off_heap_backing_stores_.push_back(nullptr); + backing_stores_.push_back({}); } void Initialize(Isolate* isolate); @@ -173,7 +174,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { std::vector<CallHandlerInfo> call_handler_infos_; std::vector<Handle<String>> new_internalized_strings_; std::vector<Handle<Script>> new_scripts_; - std::vector<byte*> off_heap_backing_stores_; + std::vector<std::shared_ptr<BackingStore>> backing_stores_; DeserializerAllocator allocator_; const bool deserializing_user_code_; diff --git a/chromium/v8/src/snapshot/embedded/embedded-file-writer.h b/chromium/v8/src/snapshot/embedded/embedded-file-writer.h index e487b9be9bc..75fdb2eac39 100644 --- a/chromium/v8/src/snapshot/embedded/embedded-file-writer.h +++ b/chromium/v8/src/snapshot/embedded/embedded-file-writer.h @@ -8,6 +8,7 @@ #include <cinttypes> #include <cstdio> #include <cstring> +#include <memory> #include "src/common/globals.h" #include "src/snapshot/embedded/embedded-data.h" diff --git a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc index a17f039fa29..7a04a9dfabf 100644 --- a/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc +++ b/chromium/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc @@ -136,16 +136,16 @@ std::unique_ptr<PlatformEmbeddedFileWriterBase> NewPlatformEmbeddedFileWriter( auto embedded_target_os = ToEmbeddedTargetOs(target_os); if (embedded_target_os == EmbeddedTargetOs::kAIX) { - return base::make_unique<PlatformEmbeddedFileWriterAIX>( - embedded_target_arch, embedded_target_os); + return std::make_unique<PlatformEmbeddedFileWriterAIX>(embedded_target_arch, + embedded_target_os); } else if (embedded_target_os == EmbeddedTargetOs::kMac) { - return base::make_unique<PlatformEmbeddedFileWriterMac>( - embedded_target_arch, embedded_target_os); + return std::make_unique<PlatformEmbeddedFileWriterMac>(embedded_target_arch, + embedded_target_os); } else if (embedded_target_os == EmbeddedTargetOs::kWin) { - return base::make_unique<PlatformEmbeddedFileWriterWin>( - embedded_target_arch, embedded_target_os); + return std::make_unique<PlatformEmbeddedFileWriterWin>(embedded_target_arch, + embedded_target_os); } else { - return base::make_unique<PlatformEmbeddedFileWriterGeneric>( + return std::make_unique<PlatformEmbeddedFileWriterGeneric>( embedded_target_arch, embedded_target_os); } diff --git a/chromium/v8/src/snapshot/natives-external.cc b/chromium/v8/src/snapshot/natives-external.cc index fe67f330872..4aa411dd92e 100644 --- a/chromium/v8/src/snapshot/natives-external.cc +++ b/chromium/v8/src/snapshot/natives-external.cc @@ -61,9 +61,10 @@ class NativesStore { // We expect the libraries in the following format: // int: # of sources. // 2N blobs: N pairs of source name + actual source. - int library_count = source->GetInt(); - for (int i = 0; i < library_count; ++i) + int library_count = source->GetIntSlow(); + for (int i = 0; i < library_count; ++i) { store->ReadNameAndContentPair(source); + } return store; } diff --git a/chromium/v8/src/snapshot/object-deserializer.cc b/chromium/v8/src/snapshot/object-deserializer.cc index 63a0cfca175..daada252ba2 100644 --- a/chromium/v8/src/snapshot/object-deserializer.cc +++ b/chromium/v8/src/snapshot/object-deserializer.cc @@ -60,7 +60,9 @@ void ObjectDeserializer::FlushICache() { DCHECK(deserializing_user_code()); for (Code code : new_code_objects()) { // Record all references to embedded objects in the new code object. +#ifndef V8_DISABLE_WRITE_BARRIERS WriteBarrierForCode(code); +#endif FlushInstructionCache(code.raw_instruction_start(), code.raw_instruction_size()); } diff --git a/chromium/v8/src/snapshot/partial-serializer.cc b/chromium/v8/src/snapshot/partial-serializer.cc index 7b4ffbb2bfe..c362fdb0ce4 100644 --- a/chromium/v8/src/snapshot/partial-serializer.cc +++ b/chromium/v8/src/snapshot/partial-serializer.cc @@ -176,7 +176,8 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) { } else { // If no serializer is provided and the field was empty, we serialize it // by default to nullptr. - if (serialize_embedder_fields_.callback == nullptr && object.ptr() == 0) { + if (serialize_embedder_fields_.callback == nullptr && + object == Smi::zero()) { serialized_data.push_back({nullptr, 0}); } else { DCHECK_NOT_NULL(serialize_embedder_fields_.callback); diff --git a/chromium/v8/src/snapshot/serializer.cc b/chromium/v8/src/snapshot/serializer.cc index 5b68aaa87bc..f009f08fc74 100644 --- a/chromium/v8/src/snapshot/serializer.cc +++ b/chromium/v8/src/snapshot/serializer.cc @@ -342,7 +342,7 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space, serializer_->SerializeObject(map); } -int32_t Serializer::ObjectSerializer::SerializeBackingStore( +uint32_t Serializer::ObjectSerializer::SerializeBackingStore( void* backing_store, int32_t byte_length) { SerializerReference reference = serializer_->reference_map()->LookupReference(backing_store); @@ -358,13 +358,15 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore( serializer_->reference_map()->Add(backing_store, reference); } - return static_cast<int32_t>(reference.off_heap_backing_store_index()); + return reference.off_heap_backing_store_index(); } void Serializer::ObjectSerializer::SerializeJSTypedArray() { JSTypedArray typed_array = JSTypedArray::cast(object_); - if (!typed_array.WasDetached()) { - if (!typed_array.is_on_heap()) { + if (typed_array.is_on_heap()) { + typed_array.RemoveExternalPointerCompensationForSerialization(); + } else { + if (!typed_array.WasDetached()) { // Explicitly serialize the backing store now. JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer()); CHECK_LE(buffer.byte_length(), Smi::kMaxValue); @@ -372,21 +374,20 @@ void Serializer::ObjectSerializer::SerializeJSTypedArray() { int32_t byte_length = static_cast<int32_t>(buffer.byte_length()); int32_t byte_offset = static_cast<int32_t>(typed_array.byte_offset()); - // We need to calculate the backing store from the external pointer + // We need to calculate the backing store from the data pointer // because the ArrayBuffer may already have been serialized. void* backing_store = reinterpret_cast<void*>( - reinterpret_cast<intptr_t>(typed_array.external_pointer()) - - byte_offset); - int32_t ref = SerializeBackingStore(backing_store, byte_length); - - // The external_pointer is the backing_store + typed_array->byte_offset. - // To properly share the buffer, we set the backing store ref here. On - // deserialization we re-add the byte_offset to external_pointer. - typed_array.set_external_pointer( - reinterpret_cast<void*>(Smi::FromInt(ref).ptr())); + reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset); + + uint32_t ref = SerializeBackingStore(backing_store, byte_length); + // To properly share the buffer, we set the backing store ref as an + // off-heap offset from nullptr. On deserialization we re-set data + // pointer to proper value. + typed_array.SetOffHeapDataPtr(nullptr, ref); + DCHECK_EQ(ref, reinterpret_cast<Address>(typed_array.DataPtr())); + } else { + typed_array.SetOffHeapDataPtr(nullptr, 0); } - } else { - typed_array.set_external_pointer(nullptr); } SerializeObject(); } @@ -400,8 +401,11 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() { // The embedder-allocated backing store only exists for the off-heap case. if (backing_store != nullptr) { - int32_t ref = SerializeBackingStore(backing_store, byte_length); - buffer.set_backing_store(reinterpret_cast<void*>(Smi::FromInt(ref).ptr())); + uint32_t ref = SerializeBackingStore(backing_store, byte_length); + // To properly share the buffer, we set the backing store ref as an + // a backing store address. On deserialization we re-set data pointer + // to proper value. + buffer.set_backing_store(reinterpret_cast<void*>(static_cast<size_t>(ref))); } SerializeObject(); buffer.set_backing_store(backing_store); diff --git a/chromium/v8/src/snapshot/serializer.h b/chromium/v8/src/snapshot/serializer.h index fad2ec8a88f..73a9a1eaac4 100644 --- a/chromium/v8/src/snapshot/serializer.h +++ b/chromium/v8/src/snapshot/serializer.h @@ -328,7 +328,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor { void SerializeContent(Map map, int size); void OutputRawData(Address up_to); void OutputCode(int size); - int32_t SerializeBackingStore(void* backing_store, int32_t byte_length); + uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length); void SerializeJSTypedArray(); void SerializeJSArrayBuffer(); void SerializeExternalString(); diff --git a/chromium/v8/src/snapshot/snapshot-source-sink.h b/chromium/v8/src/snapshot/snapshot-source-sink.h index f20f2ad33f5..9cdb85089e1 100644 --- a/chromium/v8/src/snapshot/snapshot-source-sink.h +++ b/chromium/v8/src/snapshot/snapshot-source-sink.h @@ -63,6 +63,24 @@ class SnapshotByteSource final { return answer; } + int GetIntSlow() { + // Unlike GetInt, this reads only up to the end of the blob, even if less + // than 4 bytes are remaining. + // TODO(jgruber): Remove once the use in MakeFromScriptsSource is gone. + DCHECK(position_ < length_); + uint32_t answer = data_[position_]; + if (position_ + 1 < length_) answer |= data_[position_ + 1] << 8; + if (position_ + 2 < length_) answer |= data_[position_ + 2] << 16; + if (position_ + 3 < length_) answer |= data_[position_ + 3] << 24; + int bytes = (answer & 3) + 1; + Advance(bytes); + uint32_t mask = 0xffffffffu; + mask >>= 32 - (bytes << 3); + answer &= mask; + answer >>= 2; + return answer; + } + // Returns length. int GetBlob(const byte** data); diff --git a/chromium/v8/src/strings/string-hasher-inl.h b/chromium/v8/src/strings/string-hasher-inl.h index b547d0a78da..0c69e6c7baa 100644 --- a/chromium/v8/src/strings/string-hasher-inl.h +++ b/chromium/v8/src/strings/string-hasher-inl.h @@ -34,32 +34,63 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) { uint32_t StringHasher::GetTrivialHash(int length) { DCHECK_GT(length, String::kMaxHashCalcLength); // String hash of a large string is simply the length. - return (length << String::kHashShift) | String::kIsNotArrayIndexMask; + return (static_cast<uint32_t>(length) << String::kHashShift) | + String::kIsNotArrayIndexMask | String::kIsNotIntegerIndexMask; } template <typename schar> uint32_t StringHasher::HashSequentialString(const schar* chars, int length, uint64_t seed) { - // Check whether the string is a valid array index. In that case, compute the - // array index hash. It'll fall through to compute a regular string hash from - // the start if it turns out that the string isn't a valid array index. - if (IsInRange(length, 1, String::kMaxArrayIndexSize)) { + DCHECK_LE(0, length); + DCHECK_IMPLIES(0 < length, chars != nullptr); + if (length >= 1) { if (IsDecimalDigit(chars[0]) && (length == 1 || chars[0] != '0')) { - uint32_t index = chars[0] - '0'; - int i = 1; - do { - if (i == length) { - return MakeArrayIndexHash(index, length); + uint32_t index = 0; + if (length <= String::kMaxArrayIndexSize) { + // Possible array index; try to compute the array index hash. + index = chars[0] - '0'; + int i = 1; + do { + if (i == length) { + return MakeArrayIndexHash(index, length); + } + } while (TryAddIndexChar(&index, chars[i++])); + } + // The following block wouldn't do anything on 32-bit platforms, + // because kMaxArrayIndexSize == kMaxIntegerIndexSize there, and + // if we wanted to compile it everywhere, then {index_big} would + // have to be a {size_t}, which the Mac compiler doesn't like to + // implicitly cast to uint64_t for the {TryAddIndexChar} call. +#if V8_HOST_ARCH_64_BIT + // No "else" here: if the block above was entered and fell through, + // we'll have to take this branch. + if (length <= String::kMaxIntegerIndexSize) { + // Not an array index, but it could still be an integer index. + // Perform a regular hash computation, and additionally check + // if there are non-digit characters. + uint32_t is_integer_index = 0; + uint32_t running_hash = static_cast<uint32_t>(seed); + uint64_t index_big = index; + const schar* end = &chars[length]; + while (chars != end) { + if (is_integer_index == 0 && !TryAddIndexChar(&index_big, *chars)) { + is_integer_index = String::kIsNotIntegerIndexMask; + } + running_hash = AddCharacterCore(running_hash, *chars++); } - } while (TryAddIndexChar(&index, chars[i++])); + return (GetHashCore(running_hash) << String::kHashShift) | + String::kIsNotArrayIndexMask | is_integer_index; + } +#endif + } + // No "else" here: if the first character was a decimal digit, we might + // still have to take this branch. + if (length > String::kMaxHashCalcLength) { + return GetTrivialHash(length); } - } else if (length > String::kMaxHashCalcLength) { - return GetTrivialHash(length); } - // Non-array-index hash. - DCHECK_LE(0, length); - DCHECK_IMPLIES(0 < length, chars != nullptr); + // Non-index hash. uint32_t running_hash = static_cast<uint32_t>(seed); const schar* end = &chars[length]; while (chars != end) { @@ -67,7 +98,7 @@ uint32_t StringHasher::HashSequentialString(const schar* chars, int length, } return (GetHashCore(running_hash) << String::kHashShift) | - String::kIsNotArrayIndexMask; + String::kIsNotArrayIndexMask | String::kIsNotIntegerIndexMask; } std::size_t SeededStringHasher::operator()(const char* name) const { diff --git a/chromium/v8/src/strings/string-stream.cc b/chromium/v8/src/strings/string-stream.cc index 25a8ffc3c1a..5747f66bba1 100644 --- a/chromium/v8/src/strings/string-stream.cc +++ b/chromium/v8/src/strings/string-stream.cc @@ -298,9 +298,8 @@ void StringStream::PrintName(Object name) { void StringStream::PrintUsingMap(JSObject js_object) { Map map = js_object.map(); - int real_size = map.NumberOfOwnDescriptors(); DescriptorArray descs = map.instance_descriptors(); - for (int i = 0; i < real_size; i++) { + for (InternalIndex i : map.IterateOwnDescriptors()) { PropertyDetails details = descs.GetDetails(i); if (details.location() == kField) { DCHECK_EQ(kData, details.kind()); diff --git a/chromium/v8/src/strings/string-stream.h b/chromium/v8/src/strings/string-stream.h index d7b616c6ff7..3a2ba0dd354 100644 --- a/chromium/v8/src/strings/string-stream.h +++ b/chromium/v8/src/strings/string-stream.h @@ -5,6 +5,8 @@ #ifndef V8_STRINGS_STRING_STREAM_H_ #define V8_STRINGS_STRING_STREAM_H_ +#include <memory> + #include "src/base/small-vector.h" #include "src/handles/handles.h" #include "src/objects/heap-object.h" diff --git a/chromium/v8/src/strings/uri.cc b/chromium/v8/src/strings/uri.cc index 430c8dd0ebe..de4e339b392 100644 --- a/chromium/v8/src/strings/uri.cc +++ b/chromium/v8/src/strings/uri.cc @@ -195,10 +195,14 @@ MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri, String); DisallowHeapAllocation no_gc; - CopyChars(result->GetChars(no_gc), one_byte_buffer.data(), - one_byte_buffer.size()); - CopyChars(result->GetChars(no_gc) + one_byte_buffer.size(), - two_byte_buffer.data(), two_byte_buffer.size()); + uc16* chars = result->GetChars(no_gc); + if (!one_byte_buffer.empty()) { + CopyChars(chars, one_byte_buffer.data(), one_byte_buffer.size()); + chars += one_byte_buffer.size(); + } + if (!two_byte_buffer.empty()) { + CopyChars(chars, two_byte_buffer.data(), two_byte_buffer.size()); + } return result; } diff --git a/chromium/v8/src/tasks/OWNERS b/chromium/v8/src/tasks/OWNERS index 2c6630da0c3..d31f346b03f 100644 --- a/chromium/v8/src/tasks/OWNERS +++ b/chromium/v8/src/tasks/OWNERS @@ -1,5 +1,5 @@ ahaas@chromium.org -clemensh@chromium.org +clemensb@chromium.org mlippautz@chromium.org mstarzinger@chromium.org rmcilroy@chromium.org diff --git a/chromium/v8/src/tasks/task-utils.cc b/chromium/v8/src/tasks/task-utils.cc index 2b75c4549c4..50edeccf148 100644 --- a/chromium/v8/src/tasks/task-utils.cc +++ b/chromium/v8/src/tasks/task-utils.cc @@ -42,22 +42,22 @@ class CancelableIdleFuncTask final : public CancelableIdleTask { std::unique_ptr<CancelableTask> MakeCancelableTask(Isolate* isolate, std::function<void()> func) { - return base::make_unique<CancelableFuncTask>(isolate, std::move(func)); + return std::make_unique<CancelableFuncTask>(isolate, std::move(func)); } std::unique_ptr<CancelableTask> MakeCancelableTask( CancelableTaskManager* manager, std::function<void()> func) { - return base::make_unique<CancelableFuncTask>(manager, std::move(func)); + return std::make_unique<CancelableFuncTask>(manager, std::move(func)); } std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask( Isolate* isolate, std::function<void(double)> func) { - return base::make_unique<CancelableIdleFuncTask>(isolate, std::move(func)); + return std::make_unique<CancelableIdleFuncTask>(isolate, std::move(func)); } std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask( CancelableTaskManager* manager, std::function<void(double)> func) { - return base::make_unique<CancelableIdleFuncTask>(manager, std::move(func)); + return std::make_unique<CancelableIdleFuncTask>(manager, std::move(func)); } } // namespace internal diff --git a/chromium/v8/src/torque/ast.h b/chromium/v8/src/torque/ast.h index 5ce25cf13ab..fcbb02124d4 100644 --- a/chromium/v8/src/torque/ast.h +++ b/chromium/v8/src/torque/ast.h @@ -90,7 +90,8 @@ namespace torque { AST_STATEMENT_NODE_KIND_LIST(V) \ AST_DECLARATION_NODE_KIND_LIST(V) \ V(Identifier) \ - V(LabelBlock) + V(LabelBlock) \ + V(ClassBody) struct AstNode { public: @@ -792,6 +793,12 @@ struct TypeDeclaration : Declaration { Identifier* name; }; +struct InstanceTypeConstraints { + InstanceTypeConstraints() : value(-1), num_flags_bits(-1) {} + int value; + int num_flags_bits; +}; + struct AbstractTypeDeclaration : TypeDeclaration { DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration) AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient, @@ -1069,24 +1076,38 @@ struct StructDeclaration : TypeDeclaration { bool IsGeneric() const { return !generic_parameters.empty(); } }; +struct ClassBody : AstNode { + DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassBody) + ClassBody(SourcePosition pos, std::vector<Declaration*> methods, + std::vector<ClassFieldExpression> fields) + : AstNode(kKind, pos), + methods(std::move(methods)), + fields(std::move(fields)) {} + std::vector<Declaration*> methods; + std::vector<ClassFieldExpression> fields; +}; + struct ClassDeclaration : TypeDeclaration { DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration) ClassDeclaration(SourcePosition pos, Identifier* name, ClassFlags flags, base::Optional<TypeExpression*> super, base::Optional<std::string> generates, std::vector<Declaration*> methods, - std::vector<ClassFieldExpression> fields) + std::vector<ClassFieldExpression> fields, + InstanceTypeConstraints instance_type_constraints) : TypeDeclaration(kKind, pos, name), flags(flags), super(super), generates(std::move(generates)), methods(std::move(methods)), - fields(std::move(fields)) {} + fields(std::move(fields)), + instance_type_constraints(std::move(instance_type_constraints)) {} ClassFlags flags; base::Optional<TypeExpression*> super; base::Optional<std::string> generates; std::vector<Declaration*> methods; std::vector<ClassFieldExpression> fields; + InstanceTypeConstraints instance_type_constraints; }; struct CppIncludeDeclaration : Declaration { diff --git a/chromium/v8/src/torque/class-debug-reader-generator.cc b/chromium/v8/src/torque/class-debug-reader-generator.cc index 6abdffcc91f..fca24099596 100644 --- a/chromium/v8/src/torque/class-debug-reader-generator.cc +++ b/chromium/v8/src/torque/class-debug-reader-generator.cc @@ -10,12 +10,20 @@ namespace v8 { namespace internal { namespace torque { +const char* tq_object_override_decls = + R"( std::vector<std::unique_ptr<ObjectProperty>> GetProperties( + d::MemoryAccessor accessor) const override; + const char* GetName() const override; + void Visit(TqObjectVisitor* visitor) const override; + bool IsSuperclassOf(const TqObject* other) const override; +)"; + namespace { void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, std::ostream& cc_contents, std::ostream& visitor, std::unordered_set<const ClassType*>* done) { // Make sure each class only gets generated once. - if (!type.IsExtern() || !done->insert(&type).second) return; + if (!done->insert(&type).second) return; const ClassType* super_type = type.GetSuperClass(); // We must emit the classes in dependency order. If the super class hasn't @@ -25,6 +33,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, done); } + // Classes with undefined layout don't grant any particular value here and may + // not correspond with actual C++ classes, so skip them. + if (type.HasUndefinedLayout()) return; + const std::string name = type.name(); const std::string super_name = super_type == nullptr ? "Object" : super_type->name(); @@ -32,10 +44,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, h_contents << " public:\n"; h_contents << " inline Tq" << name << "(uintptr_t address) : Tq" << super_name << "(address) {}\n"; - h_contents << " std::vector<std::unique_ptr<ObjectProperty>> " - "GetProperties(d::MemoryAccessor accessor) const override;\n"; - h_contents << " const char* GetName() const override;\n"; - h_contents << " void Visit(TqObjectVisitor* visitor) const override;\n"; + h_contents << tq_object_override_decls; cc_contents << "\nconst char* Tq" << name << "::GetName() const {\n"; cc_contents << " return \"v8::internal::" << name << "\";\n"; @@ -46,6 +55,13 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, cc_contents << " visitor->Visit" << name << "(this);\n"; cc_contents << "}\n"; + cc_contents << "\nbool Tq" << name + << "::IsSuperclassOf(const TqObject* other) const {\n"; + cc_contents + << " return GetName() != other->GetName() && dynamic_cast<const Tq" + << name << "*>(other) != nullptr;\n"; + cc_contents << "}\n"; + visitor << " virtual void Visit" << name << "(const Tq" << name << "* object) {\n"; visitor << " Visit" << super_name << "(object);\n"; @@ -71,9 +87,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, if (is_field_tagged) { field_value_type = "uintptr_t"; field_value_type_compressed = "i::Tagged_t"; - field_cc_type = "v8::internal::" + (field_class_type.has_value() - ? (*field_class_type)->name() - : "Object"); + field_cc_type = "v8::internal::" + + (field_class_type.has_value() + ? (*field_class_type)->GetGeneratedTNodeTypeName() + : "Object"); field_cc_type_compressed = COMPRESS_POINTERS_BOOL ? "v8::internal::TaggedValue" : field_cc_type; } else { @@ -107,7 +124,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, std::string index_param; std::string index_offset; if (field.index) { - const Type* index_type = (*field.index)->name_and_type.type; + const Type* index_type = field.index->type; std::string index_type_name; std::string index_value; if (index_type == TypeOracle::GetSmiType()) { @@ -129,18 +146,17 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, } get_props_impl << " Value<" << index_type_name << "> indexed_field_count = Get" - << CamelifyString((*field.index)->name_and_type.name) + << CamelifyString(field.index->name) << "Value(accessor);\n"; indexed_field_info = ", " + index_value + ", GetArrayKind(indexed_field_count.validity)"; index_param = ", size_t offset"; index_offset = " + offset * sizeof(value)"; } - get_props_impl - << " result.push_back(v8::base::make_unique<ObjectProperty>(\"" - << field_name << "\", \"" << field_cc_type_compressed << "\", \"" - << field_cc_type << "\", " << address_getter << "()" - << indexed_field_info << "));\n"; + get_props_impl << " result.push_back(std::make_unique<ObjectProperty>(\"" + << field_name << "\", \"" << field_cc_type_compressed + << "\", \"" << field_cc_type << "\", " << address_getter + << "()" << indexed_field_info << "));\n"; h_contents << " uintptr_t " << address_getter << "() const;\n"; h_contents << " Value<" << field_value_type << "> " << field_getter @@ -158,7 +174,8 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents, << address_getter << "()" << index_offset << ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n"; cc_contents << " return {validity, " - << (is_field_tagged ? "Decompress(value, address_)" : "value") + << (is_field_tagged ? "EnsureDecompressed(value, address_)" + : "value") << "};\n"; cc_contents << "}\n"; } @@ -192,6 +209,11 @@ void ImplementationVisitor::GenerateClassDebugReaders( h_contents << "\n#include \"tools/debug_helper/debug-helper-internal.h\"\n\n"; + h_contents << "// Unset a windgi.h macro that causes conflicts.\n"; + h_contents << "#ifdef GetBValue\n"; + h_contents << "#undef GetBValue\n"; + h_contents << "#endif\n\n"; + cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n"; cc_contents << "#include \"include/v8-internal.h\"\n\n"; cc_contents << "namespace i = v8::internal;\n\n"; diff --git a/chromium/v8/src/torque/constants.h b/chromium/v8/src/torque/constants.h index efbbf9588ee..ebfbab0cba5 100644 --- a/chromium/v8/src/torque/constants.h +++ b/chromium/v8/src/torque/constants.h @@ -55,6 +55,25 @@ static const char* const REFERENCE_TYPE_STRING = "Reference"; static const char* const SLICE_TYPE_STRING = "Slice"; static const char* const STRUCT_NAMESPACE_STRING = "_struct"; +static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint"; +static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier"; +static const char* const ANNOTATION_ABSTRACT = "@abstract"; +static const char* const ANNOTATION_INSTANTIATED_ABSTRACT_CLASS = + "@dirtyInstantiatedAbstractClass"; +static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT = + "@hasSameInstanceTypeAsParent"; +static const char* const ANNOTATION_GENERATE_CPP_CLASS = "@generateCppClass"; +static const char* const ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT = + "@highestInstanceTypeWithinParentClassRange"; +static const char* const ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT = + "@lowestInstanceTypeWithinParentClassRange"; +static const char* const ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE = + "@reserveBitsInInstanceType"; +static const char* const ANNOTATION_INSTANCE_TYPE_VALUE = + "@apiExposedInstanceTypeValue"; +static const char* const ANNOTATION_IF = "@if"; +static const char* const ANNOTATION_IFNOT = "@ifnot"; + inline bool IsConstexprName(const std::string& name) { return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) == CONSTEXPR_TYPE_PREFIX; @@ -80,7 +99,10 @@ enum class ClassFlag { kInstantiatedAbstractClass = 1 << 5, kHasSameInstanceTypeAsParent = 1 << 6, kGenerateCppClassDefinitions = 1 << 7, - kHasIndexedField = 1 << 8 + kHasIndexedField = 1 << 8, + kHighestInstanceTypeWithinParent = 1 << 9, + kLowestInstanceTypeWithinParent = 1 << 10, + kUndefinedLayout = 1 << 11, }; using ClassFlags = base::Flags<ClassFlag>; diff --git a/chromium/v8/src/torque/csa-generator.cc b/chromium/v8/src/torque/csa-generator.cc index 0c49033955b..2a10e4f3530 100644 --- a/chromium/v8/src/torque/csa-generator.cc +++ b/chromium/v8/src/torque/csa-generator.cc @@ -41,7 +41,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) { Stack<std::string> stack; for (const Type* t : block->InputTypes()) { stack.Push(FreshNodeName()); - out_ << " compiler::TNode<" << t->GetGeneratedTNodeTypeName() << "> " + out_ << " TNode<" << t->GetGeneratedTNodeTypeName() << "> " << stack.Top() << ";\n"; } out_ << " ca_.Bind(&" << BlockName(block); @@ -119,8 +119,8 @@ void CSAGenerator::EmitInstruction( for (const Type* lowered : LowerType(type)) { results.push_back(FreshNodeName()); stack->Push(results.back()); - out_ << " compiler::TNode<" << lowered->GetGeneratedTNodeTypeName() - << "> " << stack->Top() << ";\n"; + out_ << " TNode<" << lowered->GetGeneratedTNodeTypeName() << "> " + << stack->Top() << ";\n"; out_ << " USE(" << stack->Top() << ");\n"; } out_ << " "; @@ -175,7 +175,7 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction, for (const Type* type : LowerType(return_type)) { results.push_back(FreshNodeName()); stack->Push(results.back()); - out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> " + out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> " << stack->Top() << ";\n"; out_ << " USE(" << stack->Top() << ");\n"; } @@ -298,7 +298,7 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction, for (const Type* type : LowerType(return_type)) { results.push_back(FreshNodeName()); stack->Push(results.back()); - out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() << "> " + out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> " << stack->Top() << ";\n"; out_ << " USE(" << stack->Top() << ");\n"; } @@ -350,8 +350,8 @@ void CSAGenerator::EmitInstruction( for (const Type* type : LowerType(instruction.macro->signature().return_type)) { results.push_back(FreshNodeName()); - out_ << " compiler::TNode<" << type->GetGeneratedTNodeTypeName() - << "> " << results.back() << ";\n"; + out_ << " TNode<" << type->GetGeneratedTNodeTypeName() << "> " + << results.back() << ";\n"; out_ << " USE(" << results.back() << ");\n"; } } @@ -449,9 +449,8 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction, } else { std::string result_name = FreshNodeName(); if (result_types.size() == 1) { - out_ << " compiler::TNode<" - << result_types[0]->GetGeneratedTNodeTypeName() << "> " - << result_name << ";\n"; + out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName() + << "> " << result_name << ";\n"; } std::string catch_name = PreCallableExceptionPreparation(instruction.catch_block); @@ -499,8 +498,7 @@ void CSAGenerator::EmitInstruction( stack->Push(FreshNodeName()); std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName(); - out_ << " compiler::TNode<" << generated_type << "> " << stack->Top() - << " = "; + out_ << " TNode<" << generated_type << "> " << stack->Top() << " = "; if (generated_type != "Object") out_ << "TORQUE_CAST("; out_ << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::" "CallableFor(ca_." @@ -539,8 +537,7 @@ void CSAGenerator::PostCallableExceptionPreparation( if (!return_type->IsNever()) { out_ << " ca_.Goto(&" << catch_name << "_skip);\n"; } - out_ << " compiler::TNode<Object> " << catch_name - << "_exception_object;\n"; + out_ << " TNode<Object> " << catch_name << "_exception_object;\n"; out_ << " ca_.Bind(&" << catch_name << "__label, &" << catch_name << "_exception_object);\n"; out_ << " ca_.Goto(&" << block_name; @@ -575,9 +572,8 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction, } else { std::string result_name = FreshNodeName(); if (result_types.size() == 1) { - out_ << " compiler::TNode<" - << result_types[0]->GetGeneratedTNodeTypeName() << "> " - << result_name << ";\n"; + out_ << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName() + << "> " << result_name << ";\n"; } std::string catch_name = PreCallableExceptionPreparation(instruction.catch_block); @@ -718,10 +714,9 @@ void CSAGenerator::EmitInstruction( std::string offset_name = FreshNodeName(); stack->Push(offset_name); - out_ << " compiler::TNode<IntPtrT> " << offset_name - << " = ca_.IntPtrConstant("; - out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k" - << CamelifyString(field.name_and_type.name) << "Offset"; + out_ << " TNode<IntPtrT> " << offset_name << " = ca_.IntPtrConstant("; + out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k" + << CamelifyString(field.name_and_type.name) << "Offset"; out_ << ");\n" << " USE(" << stack->Top() << ");\n"; } @@ -772,8 +767,8 @@ void CSAGenerator::EmitCSAValue(VisitResult result, out << "}"; } else { DCHECK_EQ(1, result.stack_range().Size()); - out << "compiler::TNode<" << result.type()->GetGeneratedTNodeTypeName() - << ">{" << values.Peek(result.stack_range().begin()) << "}"; + out << "TNode<" << result.type()->GetGeneratedTNodeTypeName() << ">{" + << values.Peek(result.stack_range().begin()) << "}"; } } diff --git a/chromium/v8/src/torque/declaration-visitor.cc b/chromium/v8/src/torque/declaration-visitor.cc index f762337463c..c2fa1af98e8 100644 --- a/chromium/v8/src/torque/declaration-visitor.cc +++ b/chromium/v8/src/torque/declaration-visitor.cc @@ -93,20 +93,6 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl, } } - if (TorqueBuiltinDeclaration::DynamicCast(decl)) { - for (size_t i = 0; i < signature.types().size(); ++i) { - const Type* type = signature.types()[i]; - if (!type->IsSubtypeOf(TypeOracle::GetTaggedType())) { - const Identifier* id = signature.parameter_names.size() > i - ? signature.parameter_names[i] - : nullptr; - Error("Untagged argument ", id ? (id->value + " ") : "", "at position ", - i, " to builtin ", decl->name, " is not supported.") - .Position(id ? id->pos : decl->pos); - } - } - } - if (const StructType* struct_type = StructType::DynamicCast(signature.return_type)) { Error("Builtins ", decl->name, " cannot return structs ", diff --git a/chromium/v8/src/torque/declarations.h b/chromium/v8/src/torque/declarations.h index 240680fa1e1..f3d2544ae1c 100644 --- a/chromium/v8/src/torque/declarations.h +++ b/chromium/v8/src/torque/declarations.h @@ -5,6 +5,7 @@ #ifndef V8_TORQUE_DECLARATIONS_H_ #define V8_TORQUE_DECLARATIONS_H_ +#include <memory> #include <string> #include "src/torque/declarable.h" diff --git a/chromium/v8/src/torque/earley-parser.h b/chromium/v8/src/torque/earley-parser.h index 9f7ba6a7aee..43ad6eb4523 100644 --- a/chromium/v8/src/torque/earley-parser.h +++ b/chromium/v8/src/torque/earley-parser.h @@ -6,6 +6,7 @@ #define V8_TORQUE_EARLEY_PARSER_H_ #include <map> +#include <memory> #include <vector> #include "src/base/optional.h" @@ -82,6 +83,7 @@ enum class ParseResultHolderBase::TypeId { kTypeswitchCase, kStdVectorOfTypeswitchCase, kStdVectorOfIdentifierPtr, + kOptionalClassBody, kJsonValue, kJsonMember, @@ -248,7 +250,7 @@ class Symbol { size_t rule_number() const { return rules_.size(); } void AddRule(const Rule& rule) { - rules_.push_back(base::make_unique<Rule>(rule)); + rules_.push_back(std::make_unique<Rule>(rule)); rules_.back()->SetLeftHandSide(this); } diff --git a/chromium/v8/src/torque/global-context.cc b/chromium/v8/src/torque/global-context.cc index 13503038c55..e236de5a93e 100644 --- a/chromium/v8/src/torque/global-context.cc +++ b/chromium/v8/src/torque/global-context.cc @@ -19,7 +19,7 @@ GlobalContext::GlobalContext(Ast ast) CurrentSourcePosition::Scope current_source_position( SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}}); default_namespace_ = - RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName)); + RegisterDeclarable(std::make_unique<Namespace>(kBaseNamespaceName)); } TargetArchitecture::TargetArchitecture(bool force_32bit) diff --git a/chromium/v8/src/torque/global-context.h b/chromium/v8/src/torque/global-context.h index e1106adbd1c..940325b51aa 100644 --- a/chromium/v8/src/torque/global-context.h +++ b/chromium/v8/src/torque/global-context.h @@ -6,6 +6,7 @@ #define V8_TORQUE_GLOBAL_CONTEXT_H_ #include <map> +#include <memory> #include "src/common/globals.h" #include "src/torque/ast.h" diff --git a/chromium/v8/src/torque/implementation-visitor.cc b/chromium/v8/src/torque/implementation-visitor.cc index 8f36afd020f..0c50a700990 100644 --- a/chromium/v8/src/torque/implementation-visitor.cc +++ b/chromium/v8/src/torque/implementation-visitor.cc @@ -526,7 +526,6 @@ void ImplementationVisitor::Visit(Builtin* builtin) { source_out() << " USE(" << parameter0 << ");\n"; for (size_t i = 1; i < signature.parameter_names.size(); ++i) { - const std::string& parameter_name = signature.parameter_names[i]->value; const Type* type = signature.types()[i]; const bool mark_as_used = signature.implicit_count > i; std::string var = AddParameter(i, builtin, ¶meters, ¶meter_types, @@ -534,8 +533,8 @@ void ImplementationVisitor::Visit(Builtin* builtin) { source_out() << " " << type->GetGeneratedTypeName() << " " << var << " = " << "UncheckedCast<" << type->GetGeneratedTNodeTypeName() - << ">(Parameter(Descriptor::k" - << CamelifyString(parameter_name) << "));\n"; + << ">(Parameter(Descriptor::ParameterIndex<" << (i - 1) + << ">()));\n"; source_out() << " USE(" << var << ");\n"; } } @@ -1008,48 +1007,40 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) { #if defined(DEBUG) do_check = true; #endif - if (do_check) { - // CSA_ASSERT & co. are not used here on purpose for two reasons. First, - // Torque allows and handles two types of expressions in the if protocol - // automagically, ones that return TNode<BoolT> and those that use the - // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to - // handle this is embedded in the expression handling and to it's not - // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH - // isn't trivial up-front. Secondly, on failure, the assert text should be - // the corresponding Torque code, not the -gen.cc code, which would be the - // case when using CSA_ASSERT_XXX. - Block* true_block = assembler().NewBlock(assembler().CurrentStack()); - Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true); - GenerateExpressionBranch(stmt->expression, true_block, false_block); + Block* resume_block; + + if (!do_check) { + Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack()); + resume_block = assembler().NewBlock(assembler().CurrentStack()); + assembler().Goto(resume_block); + assembler().Bind(unreachable_block); + } + + // CSA_ASSERT & co. are not used here on purpose for two reasons. First, + // Torque allows and handles two types of expressions in the if protocol + // automagically, ones that return TNode<BoolT> and those that use the + // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to + // handle this is embedded in the expression handling and to it's not + // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH + // isn't trivial up-front. Secondly, on failure, the assert text should be + // the corresponding Torque code, not the -gen.cc code, which would be the + // case when using CSA_ASSERT_XXX. + Block* true_block = assembler().NewBlock(assembler().CurrentStack()); + Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true); + GenerateExpressionBranch(stmt->expression, true_block, false_block); - assembler().Bind(false_block); + assembler().Bind(false_block); - assembler().Emit(AbortInstruction{ - AbortInstruction::Kind::kAssertionFailure, - "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"}); + assembler().Emit(AbortInstruction{ + AbortInstruction::Kind::kAssertionFailure, + "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"}); - assembler().Bind(true_block); - } else { - // Visit the expression so bindings only used in asserts are marked - // as such. Otherwise they might be wrongly reported as unused bindings - // in release builds. - stmt->expression->VisitAllSubExpressions([](Expression* expression) { - if (auto id = IdentifierExpression::DynamicCast(expression)) { - ValueBindingsManager::Get().TryLookup(id->name->value); - } else if (auto call = CallExpression::DynamicCast(expression)) { - for (Identifier* label : call->labels) { - LabelBindingsManager::Get().TryLookup(label->value); - } - // TODO(szuend): In case the call expression resolves to a macro - // callable, mark the macro as used as well. - } else if (auto call = CallMethodExpression::DynamicCast(expression)) { - for (Identifier* label : call->labels) { - LabelBindingsManager::Get().TryLookup(label->value); - } - // TODO(szuend): Mark the underlying macro as used. - } - }); + assembler().Bind(true_block); + + if (!do_check) { + assembler().Bind(resume_block); } + return TypeOracle::GetVoidType(); } @@ -1214,16 +1205,16 @@ InitializerResults ImplementationVisitor::VisitInitializerResults( result.names.push_back(initializer.name); Expression* e = initializer.expression; const Field& field = class_type->LookupField(initializer.name->value); - auto field_index = field.index; + bool has_index = field.index.has_value(); if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) { - if (!field_index) { + if (!has_index) { ReportError( "spread expressions can only be used to initialize indexed class " "fields ('", initializer.name->value, "' is not)"); } e = s->spreadee; - } else if (field_index) { + } else if (has_index) { ReportError("the indexed class field '", initializer.name->value, "' must be initialized with a spread operator"); } @@ -1261,7 +1252,7 @@ void ImplementationVisitor::InitializeClass( void ImplementationVisitor::InitializeFieldFromSpread( VisitResult object, const Field& field, const InitializerResults& initializer_results) { - NameAndType index = (*field.index)->name_and_type; + const NameAndType& index = *field.index; VisitResult iterator = initializer_results.field_value_map.at(field.name_and_type.name); VisitResult length = initializer_results.field_value_map.at(index.name); @@ -1289,15 +1280,14 @@ VisitResult ImplementationVisitor::AddVariableObjectSize( } VisitResult index_field_size = VisitResult(TypeOracle::GetConstInt31Type(), "kTaggedSize"); - VisitResult initializer_value = initializer_results.field_value_map.at( - (*current_field->index)->name_and_type.name); + VisitResult initializer_value = + initializer_results.field_value_map.at(current_field->index->name); Arguments args; args.parameters.push_back(object_size); args.parameters.push_back(initializer_value); args.parameters.push_back(index_field_size); - object_size = - GenerateCall("%AddIndexedFieldSizeToObjectSize", args, - {(*current_field->index)->name_and_type.type}, false); + object_size = GenerateCall("%AddIndexedFieldSizeToObjectSize", args, + {current_field->index->type}, false); } ++current_field; } @@ -1860,12 +1850,12 @@ LocationReference ImplementationVisitor::GetLocationReference( { StackScope length_scope(this); // Get a reference to the length - const Field* index_field = field.index.value(); + const NameAndType& index_field = field.index.value(); GenerateCopy(object_result); - assembler().Emit(CreateFieldReferenceInstruction{ - object_result.type(), index_field->name_and_type.name}); + assembler().Emit(CreateFieldReferenceInstruction{object_result.type(), + index_field.name}); VisitResult length_reference( - TypeOracle::GetReferenceType(index_field->name_and_type.type), + TypeOracle::GetReferenceType(index_field.type), assembler().TopRange(2)); // Load the length from the reference and convert it to intptr @@ -2670,13 +2660,34 @@ void ImplementationVisitor::Visit(Declarable* declarable) { } } -void ImplementationVisitor::GenerateBuiltinDefinitions( +std::string MachineTypeString(const Type* type) { + if (type->IsSubtypeOf(TypeOracle::GetSmiType())) { + return "MachineType::TaggedSigned()"; + } + if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) { + return "MachineType::TaggedPointer()"; + } + if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) { + return "MachineType::AnyTagged()"; + } + return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value"; +} + +void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors( const std::string& output_directory) { - std::stringstream new_contents_stream; - std::string file_name = "builtin-definitions-tq.h"; + std::stringstream builtin_definitions; + std::string builtin_definitions_file_name = "builtin-definitions-tq.h"; + + // This file contains plain interface descriptor definitions and has to be + // included in the middle of interface-descriptors.h. Thus it is not a normal + // header file and uses the .inc suffix instead of the .h suffix. + std::stringstream interface_descriptors; + std::string interface_descriptors_file_name = "interface-descriptors-tq.inc"; { - IncludeGuardScope include_guard(new_contents_stream, file_name); - new_contents_stream + IncludeGuardScope builtin_definitions_include_guard( + builtin_definitions, builtin_definitions_file_name); + + builtin_definitions << "\n" "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, " "ASM) " @@ -2684,40 +2695,67 @@ void ImplementationVisitor::GenerateBuiltinDefinitions( for (auto& declarable : GlobalContext::AllDeclarables()) { Builtin* builtin = Builtin::DynamicCast(declarable.get()); if (!builtin || builtin->IsExternal()) continue; - size_t firstParameterIndex = 1; - bool declareParameters = true; if (builtin->IsStub()) { - new_contents_stream << "TFS(" << builtin->ExternalName(); + builtin_definitions << "TFC(" << builtin->ExternalName() << ", " + << builtin->ExternalName(); + std::string descriptor_name = builtin->ExternalName() + "Descriptor"; + constexpr size_t kFirstNonContextParameter = 1; + size_t parameter_count = + builtin->parameter_names().size() - kFirstNonContextParameter; + + interface_descriptors << "class " << descriptor_name + << " : public TorqueInterfaceDescriptor<" + << parameter_count << "> {\n"; + interface_descriptors << " DECLARE_DESCRIPTOR_WITH_BASE(" + << descriptor_name + << ", TorqueInterfaceDescriptor)\n"; + + interface_descriptors << " MachineType ReturnType() override {\n"; + interface_descriptors + << " return " + << MachineTypeString(builtin->signature().return_type) << ";\n"; + interface_descriptors << " }\n"; + + interface_descriptors << " std::array<MachineType, " << parameter_count + << "> ParameterTypes() override {\n"; + interface_descriptors << " return {"; + for (size_t i = kFirstNonContextParameter; + i < builtin->parameter_names().size(); ++i) { + bool last = i + 1 == builtin->parameter_names().size(); + const Type* type = builtin->signature().parameter_types.types[i]; + interface_descriptors << MachineTypeString(type) + << (last ? "" : ", "); + } + interface_descriptors << "};\n"; + + interface_descriptors << " }\n"; + interface_descriptors << "};\n\n"; } else { - new_contents_stream << "TFJ(" << builtin->ExternalName(); + builtin_definitions << "TFJ(" << builtin->ExternalName(); if (builtin->IsVarArgsJavaScript()) { - new_contents_stream + builtin_definitions << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel"; - declareParameters = false; } else { DCHECK(builtin->IsFixedArgsJavaScript()); // FixedArg javascript builtins need to offer the parameter // count. int parameter_count = static_cast<int>(builtin->signature().ExplicitCount()); - new_contents_stream << ", " << parameter_count; + builtin_definitions << ", " << parameter_count; // And the receiver is explicitly declared. - new_contents_stream << ", kReceiver"; - firstParameterIndex = builtin->signature().implicit_count; - } - } - if (declareParameters) { - for (size_t i = firstParameterIndex; - i < builtin->parameter_names().size(); ++i) { - Identifier* parameter = builtin->parameter_names()[i]; - new_contents_stream << ", k" << CamelifyString(parameter->value); + builtin_definitions << ", kReceiver"; + for (size_t i = builtin->signature().implicit_count; + i < builtin->parameter_names().size(); ++i) { + Identifier* parameter = builtin->parameter_names()[i]; + builtin_definitions << ", k" << CamelifyString(parameter->value); + } } } - new_contents_stream << ") \\\n"; + builtin_definitions << ") \\\n"; } - new_contents_stream << "\n"; + builtin_definitions << "\n"; - new_contents_stream + builtin_definitions << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n"; for (const BuiltinPointerType* type : TypeOracle::AllBuiltinPointerTypes()) { @@ -2728,13 +2766,15 @@ void ImplementationVisitor::GenerateBuiltinDefinitions( SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}}); ReportError("unable to find any builtin with type \"", *type, "\""); } - new_contents_stream << " V(" << type->function_pointer_type_id() << "," + builtin_definitions << " V(" << type->function_pointer_type_id() << "," << example_builtin->ExternalName() << ")\\\n"; } - new_contents_stream << "\n"; + builtin_definitions << "\n"; } - std::string new_contents(new_contents_stream.str()); - WriteFile(output_directory + "/" + file_name, new_contents); + WriteFile(output_directory + "/" + builtin_definitions_file_name, + builtin_definitions.str()); + WriteFile(output_directory + "/" + interface_descriptors_file_name, + interface_descriptors.str()); } namespace { @@ -2894,40 +2934,8 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator { private: std::ostream& out_; }; -} // namespace - -void ImplementationVisitor::GenerateInstanceTypes( - const std::string& output_directory) { - std::stringstream header; - std::string file_name = "instance-types-tq.h"; - { - IncludeGuardScope(header, file_name); - header << "#define TORQUE_DEFINED_INSTANCE_TYPES(V) \\\n"; - for (const TypeAlias* alias : GlobalContext::GetClasses()) { - const ClassType* type = ClassType::DynamicCast(alias->type()); - if (type->IsExtern()) continue; - std::string type_name = - CapifyStringWithUnderscores(type->name()) + "_TYPE"; - header << " V(" << type_name << ") \\\n"; - } - header << "\n\n"; - - header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n"; - for (const TypeAlias* alias : GlobalContext::GetClasses()) { - const ClassType* type = ClassType::DynamicCast(alias->type()); - if (type->IsExtern()) continue; - std::string type_name = - CapifyStringWithUnderscores(type->name()) + "_TYPE"; - std::string variable_name = SnakeifyString(type->name()); - header << " V(_, " << type_name << ", " << type->name() << ", " - << variable_name << ") \\\n"; - } - header << "\n"; - } - std::string output_header_path = output_directory + "/" + file_name; - WriteFile(output_header_path, header.str()); -} +} // namespace void ImplementationVisitor::GenerateCppForInternalClasses( const std::string& output_directory) { @@ -3148,7 +3156,7 @@ void CppClassGenerator::GenerateClassConstructors() { if (type_->IsInstantiatedAbstractClass()) { // This is a hack to prevent wrong instance type checks. inl_ << " // Instance check omitted because class is annotated with " - "@dirtyInstantiatedAbstractClass.\n"; + << ANNOTATION_INSTANTIATED_ABSTRACT_CLASS << ".\n"; } else { inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n"; } @@ -3241,7 +3249,8 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) { const std::string offset = "k" + CamelifyString(name) + "Offset"; base::Optional<const ClassType*> class_type = field_type->ClassSupertype(); - std::string type = class_type ? (*class_type)->name() : "Object"; + std::string type = + class_type ? (*class_type)->GetGeneratedTNodeTypeName() : "Object"; // Generate declarations in header. if (!class_type && field_type != TypeOracle::GetObjectType()) { @@ -3302,7 +3311,6 @@ void ImplementationVisitor::GenerateClassDefinitions( { IncludeGuardScope header_guard(header, basename + ".h"); - header << "#include \"src/objects/heap-number.h\"\n"; header << "#include \"src/objects/objects.h\"\n"; header << "#include \"src/objects/smi.h\"\n"; header << "#include \"torque-generated/field-offsets-tq.h\"\n"; @@ -3314,9 +3322,11 @@ void ImplementationVisitor::GenerateClassDefinitions( IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h"); inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n"; inline_header << "#include \"src/objects/js-promise.h\"\n"; + inline_header << "#include \"src/objects/js-weak-refs.h\"\n"; inline_header << "#include \"src/objects/module.h\"\n"; inline_header << "#include \"src/objects/objects-inl.h\"\n"; - inline_header << "#include \"src/objects/script.h\"\n\n"; + inline_header << "#include \"src/objects/script.h\"\n"; + inline_header << "#include \"src/objects/shared-function-info.h\"\n\n"; IncludeObjectMacrosScope inline_header_macros(inline_header); NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"}); @@ -3328,6 +3338,7 @@ void ImplementationVisitor::GenerateClassDefinitions( implementation << "#include \"src/objects/embedder-data-array-inl.h\"\n"; implementation << "#include \"src/objects/js-generator-inl.h\"\n"; implementation << "#include \"src/objects/js-regexp-inl.h\"\n"; + implementation << "#include \"src/objects/js-weak-refs-inl.h\"\n"; implementation << "#include \"src/objects/js-regexp-string-iterator-inl.h\"\n"; implementation << "#include \"src/objects/literal-objects-inl.h\"\n"; @@ -3346,7 +3357,7 @@ void ImplementationVisitor::GenerateClassDefinitions( // Generate forward declarations for every class. for (const TypeAlias* alias : GlobalContext::GetClasses()) { const ClassType* type = ClassType::DynamicCast(alias->type()); - header << "class " << type->name() << ";\n"; + header << "class " << type->GetGeneratedTNodeTypeName() << ";\n"; } for (const TypeAlias* alias : GlobalContext::GetClasses()) { @@ -3439,13 +3450,13 @@ void GenerateClassFieldVerifier(const std::string& class_name, if (!field_type->IsSubtypeOf(TypeOracle::GetObjectType())) return; if (f.index) { - if ((*f.index)->name_and_type.type != TypeOracle::GetSmiType()) { + if (f.index->type != TypeOracle::GetSmiType()) { ReportError("Non-SMI values are not (yet) supported as indexes."); } // We already verified the index field because it was listed earlier, so we // can assume it's safe to read here. cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name - << "::k" << CamelifyString((*f.index)->name_and_type.name) + << "::k" << CamelifyString(f.index->name) << "Offset>::load(o).value(); ++i) {\n"; } else { cc_contents << " {\n"; diff --git a/chromium/v8/src/torque/implementation-visitor.h b/chromium/v8/src/torque/implementation-visitor.h index eb1a6c4452f..e1ebfeeb176 100644 --- a/chromium/v8/src/torque/implementation-visitor.h +++ b/chromium/v8/src/torque/implementation-visitor.h @@ -5,6 +5,7 @@ #ifndef V8_TORQUE_IMPLEMENTATION_VISITOR_H_ #define V8_TORQUE_IMPLEMENTATION_VISITOR_H_ +#include <memory> #include <string> #include "src/base/macros.h" @@ -260,7 +261,7 @@ class BlockBindings { void Add(std::string name, T value, bool mark_as_used = false) { ReportErrorIfAlreadyBound(name); auto binding = - base::make_unique<Binding<T>>(manager_, name, std::move(value)); + std::make_unique<Binding<T>>(manager_, name, std::move(value)); if (mark_as_used) binding->SetUsed(); bindings_.push_back(std::move(binding)); } @@ -268,7 +269,7 @@ class BlockBindings { void Add(const Identifier* name, T value, bool mark_as_used = false) { ReportErrorIfAlreadyBound(name->value); auto binding = - base::make_unique<Binding<T>>(manager_, name, std::move(value)); + std::make_unique<Binding<T>>(manager_, name, std::move(value)); if (mark_as_used) binding->SetUsed(); bindings_.push_back(std::move(binding)); } @@ -342,7 +343,8 @@ bool IsCompatibleSignature(const Signature& sig, const TypeVector& types, class ImplementationVisitor { public: - void GenerateBuiltinDefinitions(const std::string& output_directory); + void GenerateBuiltinDefinitionsAndInterfaceDescriptors( + const std::string& output_directory); void GenerateClassFieldOffsets(const std::string& output_directory); void GeneratePrintDefinitions(const std::string& output_directory); void GenerateClassDefinitions(const std::string& output_directory); diff --git a/chromium/v8/src/torque/instance-type-generator.cc b/chromium/v8/src/torque/instance-type-generator.cc new file mode 100644 index 00000000000..275e7064852 --- /dev/null +++ b/chromium/v8/src/torque/instance-type-generator.cc @@ -0,0 +1,376 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/torque/implementation-visitor.h" + +namespace v8 { +namespace internal { +namespace torque { + +namespace { + +// Contains all necessary state for a single class type during the process of +// assigning instance types, and provides a convenient way to access the list of +// types that inherit from this one. +struct InstanceTypeTree { + explicit InstanceTypeTree(const ClassType* type) + : type(type), + parent(nullptr), + start(INT_MAX), + end(INT_MIN), + value(-1), + num_values(0), + num_own_values(0) {} + const ClassType* type; + InstanceTypeTree* parent; + std::vector<std::unique_ptr<InstanceTypeTree>> children; + int start; // Start of range for this and subclasses, or INT_MAX. + int end; // End of range for this and subclasses, or INT_MIN. + int value; // Assigned value for this class itself, or -1 when unassigned. + int num_values; // Number of values assigned for this and subclasses. + int num_own_values; // How many values this needs (not including subclasses). +}; + +// Assembles all class types into a tree, but doesn't yet attempt to assign +// instance types for them. +std::unique_ptr<InstanceTypeTree> BuildInstanceTypeTree() { + // First, build InstanceTypeTree instances for every class but don't try to + // attach them to their subclasses yet. + std::unordered_map<const ClassType*, InstanceTypeTree*> map_by_type; + std::vector<std::unique_ptr<InstanceTypeTree>> unparented_types; + for (auto& p : GlobalContext::AllDeclarables()) { + if (const TypeAlias* alias = TypeAlias::DynamicCast(p.get())) { + const Type* type = alias->type(); + const ClassType* class_type = ClassType::DynamicCast(type); + if (class_type == nullptr) { + continue; + } + auto& map_slot = map_by_type[class_type]; + if (map_slot != nullptr) { + continue; // We already encountered this type. + } + std::unique_ptr<InstanceTypeTree> type_tree = + std::make_unique<InstanceTypeTree>(class_type); + map_slot = type_tree.get(); + unparented_types.push_back(std::move(type_tree)); + } + } + + // Second, assemble them all into a tree following the inheritance hierarchy. + std::unique_ptr<InstanceTypeTree> root; + for (auto& type_tree : unparented_types) { + const ClassType* parent = type_tree->type->GetSuperClass(); + if (parent == nullptr) { + if (root != nullptr) + Error("Expected only one root class type. Found: ", root->type->name(), + " and ", type_tree->type->name()) + .Position(type_tree->type->GetPosition()); + root = std::move(type_tree); + } else { + map_by_type[parent]->children.push_back(std::move(type_tree)); + } + } + return root; +} + +// Propagates constraints about instance types from children to their parents. +void PropagateInstanceTypeConstraints(InstanceTypeTree* root) { + for (auto& child : root->children) { + PropagateInstanceTypeConstraints(child.get()); + if (child->start < root->start) root->start = child->start; + if (child->end > root->end) root->end = child->end; + root->num_values += child->num_values; + } + const InstanceTypeConstraints& constraints = + root->type->GetInstanceTypeConstraints(); + if ((!root->type->IsAbstract() || + root->type->IsInstantiatedAbstractClass()) && + !root->type->HasSameInstanceTypeAsParent()) { + root->num_own_values = 1; + } + root->num_values += root->num_own_values; + if (constraints.num_flags_bits != -1) { + // Children won't get any types assigned; must be done manually in C++. + root->children.clear(); + root->num_values = 1 << constraints.num_flags_bits; + root->num_own_values = root->num_values; + root->start = 0; + root->end = root->num_values - 1; + } + if (constraints.value != -1) { + if (root->num_own_values != 1) { + Error("Instance type value requested for abstract class ", + root->type->name()) + .Position(root->type->GetPosition()); + } + root->value = constraints.value; + if (constraints.value < root->start) root->start = constraints.value; + if (constraints.value > root->end) root->end = constraints.value; + } +} + +// Assigns values for the type itself, not including any children. Returns the +// next available value. +int SelectOwnValues(InstanceTypeTree* root, int start_value) { + if (root->value == -1) { + root->value = start_value; + } else if (root->value < start_value) { + Error("Failed to assign instance type ", root->value, " to ", + root->type->name()) + .Position(root->type->GetPosition()); + } + return root->value + root->num_own_values; +} + +// Sorting function for types that don't have specific values they must include. +// Prioritizes bigger type ranges (those with more subtypes) first, and +// then sorts alphabetically within each size category. +struct CompareUnconstrainedTypes { + constexpr bool operator()(const InstanceTypeTree* a, + const InstanceTypeTree* b) const { + return (a->num_values > b->num_values) + ? true + : (a->num_values < b->num_values) + ? false + : std::less<std::string>()(a->type->name(), + b->type->name()); + } +}; + +// Assigns concrete values for every instance type range, and sorts the children +// at each layer of the tree into increasing order. Appends the newly-assigned +// tree to the destination vector. Returns the first unassigned value after +// those that have been used. +int SolveInstanceTypeConstraints( + std::unique_ptr<InstanceTypeTree> root, int start_value, + std::vector<std::unique_ptr<InstanceTypeTree>>* destination) { + if (root->start < start_value) { + Error("Failed to assign instance type ", root->start, " to ", + root->type->name()) + .Position(root->type->GetPosition()); + } + + // First, separate the children into four groups: + // - The one child that must go first, if it exists; + // - Children with specific value requirements ("constrained"); + // - Children without specific value requirements ("unconstrained"); + // - The one child that must go last, if it exists. + std::unique_ptr<InstanceTypeTree> lowest_child; + std::unique_ptr<InstanceTypeTree> highest_child; + std::multimap<int, std::unique_ptr<InstanceTypeTree>> + constrained_children_by_start; + // Using std::map because you can't std::move out of a std::set until C++17. + std::map<InstanceTypeTree*, std::unique_ptr<InstanceTypeTree>, + CompareUnconstrainedTypes> + unconstrained_children_by_size; + for (auto& child : root->children) { + if (child->type->IsHighestInstanceTypeWithinParent()) { + if (highest_child) { + Error("Two classes requested to be the highest instance type: ", + highest_child->type->name(), " and ", child->type->name(), + " within range for parent class ", root->type->name()) + .Position(child->type->GetPosition()); + } + if (child->type->IsLowestInstanceTypeWithinParent()) { + Error( + "Class requested to be both highest and lowest instance type " + "within its parent range: ", + child->type->name()) + .Position(child->type->GetPosition()); + } + highest_child = std::move(child); + } else if (child->type->IsLowestInstanceTypeWithinParent()) { + if (lowest_child) { + Error("Two classes requested to be the lowest instance type: ", + lowest_child->type->name(), " and ", child->type->name(), + " within range for parent class ", root->type->name()) + .Position(child->type->GetPosition()); + } + lowest_child = std::move(child); + } else if (child->start > child->end) { + unconstrained_children_by_size.insert( + std::make_pair(child.get(), std::move(child))); + } else { + constrained_children_by_start.insert( + std::make_pair(child->start, std::move(child))); + } + } + root->children.clear(); + + bool own_type_pending = root->num_own_values > 0; + + // Second, iterate and place the children in ascending order. + if (lowest_child != nullptr) { + start_value = SolveInstanceTypeConstraints(std::move(lowest_child), + start_value, &root->children); + } + for (auto& constrained_child_pair : constrained_children_by_start) { + // Select the next constrained child type in ascending order. + std::unique_ptr<InstanceTypeTree> constrained_child = + std::move(constrained_child_pair.second); + + // Try to place the root type before the constrained child type if it fits. + if (own_type_pending) { + if ((root->value != -1 && root->value < constrained_child->start) || + (root->value == -1 && + start_value + root->num_own_values <= constrained_child->start)) { + start_value = SelectOwnValues(root.get(), start_value); + own_type_pending = false; + } + } + + // Try to find any unconstrained children that fit before the constrained + // one. This simple greedy algorithm just puts the biggest unconstrained + // children in first, which might not fill the space as efficiently as + // possible but is good enough for our needs. + for (auto it = unconstrained_children_by_size.begin(); + it != unconstrained_children_by_size.end();) { + if (it->second->num_values + start_value <= constrained_child->start) { + start_value = SolveInstanceTypeConstraints( + std::move(it->second), start_value, &root->children); + it = unconstrained_children_by_size.erase(it); + } else { + ++it; + } + } + + // Place the constrained child type. + start_value = SolveInstanceTypeConstraints(std::move(constrained_child), + start_value, &root->children); + } + if (own_type_pending) { + start_value = SelectOwnValues(root.get(), start_value); + own_type_pending = false; + } + for (auto& child_pair : unconstrained_children_by_size) { + start_value = SolveInstanceTypeConstraints(std::move(child_pair.second), + start_value, &root->children); + } + if (highest_child != nullptr) { + start_value = SolveInstanceTypeConstraints(std::move(highest_child), + start_value, &root->children); + } + + // Finally, set the range for this class to include all placed subclasses. + root->end = start_value - 1; + root->start = + root->children.empty() ? start_value : root->children.front()->start; + if (root->value != -1 && root->value < root->start) { + root->start = root->value; + } + root->num_values = root->end - root->start + 1; + + if (root->num_values > 0) { + destination->push_back(std::move(root)); + } + return start_value; +} + +std::unique_ptr<InstanceTypeTree> SolveInstanceTypeConstraints( + std::unique_ptr<InstanceTypeTree> root) { + std::vector<std::unique_ptr<InstanceTypeTree>> destination; + SolveInstanceTypeConstraints(std::move(root), 0, &destination); + return destination.empty() ? nullptr : std::move(destination.front()); +} + +std::unique_ptr<InstanceTypeTree> AssignInstanceTypes() { + std::unique_ptr<InstanceTypeTree> root = BuildInstanceTypeTree(); + if (root != nullptr) { + PropagateInstanceTypeConstraints(root.get()); + root = SolveInstanceTypeConstraints(std::move(root)); + } + return root; +} + +// Prints items in macro lists for the given type and its descendants. +// - definitions: This list is pairs of instance type name and assigned value, +// such as V(ODDBALL_TYPE, 67). It includes FIRST_* and LAST_* items for each +// type that has more than one associated InstanceType. Items within those +// ranges are indented for readability. +// - values: This list is just instance type names, like V(ODDBALL_TYPE). It +// does not include any FIRST_* and LAST_* range markers. +void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions, + std::ostream& values, const std::string& indent) { + std::string type_name = + CapifyStringWithUnderscores(root->type->name()) + "_TYPE"; + std::string inner_indent = indent; + + if (root->num_values > 1) { + definitions << indent << "V(FIRST_" << type_name << ", " << root->start + << ") \\\n"; + inner_indent += " "; + } + if (root->num_own_values == 1) { + definitions << inner_indent << "V(" << type_name << ", " << root->value + << ") \\\n"; + values << " V(" << type_name << ") \\\n"; + } + for (auto& child : root->children) { + PrintInstanceTypes(child.get(), definitions, values, inner_indent); + } + // We can't emit LAST_STRING_TYPE because it's not a valid flags combination. + // So if the class type has multiple own values, which only happens when using + // ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE, then omit the end marker. + if (root->num_values > 1 && root->num_own_values <= 1) { + definitions << indent << "V(LAST_" << type_name << ", " << root->end + << ") \\\n"; + } +} + +} // namespace + +void ImplementationVisitor::GenerateInstanceTypes( + const std::string& output_directory) { + std::stringstream header; + std::string file_name = "instance-types-tq.h"; + { + IncludeGuardScope guard(header, file_name); + + header << "// Instance types for all classes except for those that use " + "InstanceType as flags.\n"; + header << "#define TORQUE_ASSIGNED_INSTANCE_TYPES(V) \\\n"; + std::unique_ptr<InstanceTypeTree> instance_types = AssignInstanceTypes(); + std::stringstream values_list; + if (instance_types != nullptr) { + PrintInstanceTypes(instance_types.get(), header, values_list, " "); + } + header << "\n\n"; + + header << "// Instance types for all classes except for those that use " + "InstanceType as flags.\n"; + header << "#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(V) \\\n"; + header << values_list.str(); + header << "\n\n"; + + header << "// Instance types for Torque-internal classes.\n"; + header << "#define TORQUE_INTERNAL_INSTANCE_TYPES(V) \\\n"; + for (const TypeAlias* alias : GlobalContext::GetClasses()) { + const ClassType* type = ClassType::DynamicCast(alias->type()); + if (type->IsExtern()) continue; + std::string type_name = + CapifyStringWithUnderscores(type->name()) + "_TYPE"; + header << " V(" << type_name << ") \\\n"; + } + header << "\n\n"; + + header << "// Struct list entries for Torque-internal classes.\n"; + header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n"; + for (const TypeAlias* alias : GlobalContext::GetClasses()) { + const ClassType* type = ClassType::DynamicCast(alias->type()); + if (type->IsExtern()) continue; + std::string type_name = + CapifyStringWithUnderscores(type->name()) + "_TYPE"; + std::string variable_name = SnakeifyString(type->name()); + header << " V(_, " << type_name << ", " << type->name() << ", " + << variable_name << ") \\\n"; + } + header << "\n"; + } + std::string output_header_path = output_directory + "/" + file_name; + WriteFile(output_header_path, header.str()); +} + +} // namespace torque +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/torque/ls/json.h b/chromium/v8/src/torque/ls/json.h index 43d9f7ab112..1a033cf6141 100644 --- a/chromium/v8/src/torque/ls/json.h +++ b/chromium/v8/src/torque/ls/json.h @@ -6,11 +6,11 @@ #define V8_TORQUE_LS_JSON_H_ #include <map> +#include <memory> #include <string> #include <vector> #include "src/base/logging.h" -#include "src/base/template-utils.h" namespace v8 { namespace internal { @@ -44,7 +44,7 @@ struct JsonValue { static JsonValue From(JsonObject object) { JsonValue result; result.tag = JsonValue::OBJECT; - result.object_ = base::make_unique<JsonObject>(std::move(object)); + result.object_ = std::make_unique<JsonObject>(std::move(object)); return result; } @@ -65,7 +65,7 @@ struct JsonValue { static JsonValue From(JsonArray array) { JsonValue result; result.tag = JsonValue::ARRAY; - result.array_ = base::make_unique<JsonArray>(std::move(array)); + result.array_ = std::make_unique<JsonArray>(std::move(array)); return result; } diff --git a/chromium/v8/src/torque/ls/message-handler.cc b/chromium/v8/src/torque/ls/message-handler.cc index 6ec124b5a26..becc97c9dc8 100644 --- a/chromium/v8/src/torque/ls/message-handler.cc +++ b/chromium/v8/src/torque/ls/message-handler.cc @@ -87,6 +87,8 @@ void ResetCompilationErrorDiagnostics(MessageWriter writer) { class DiagnosticCollector { public: void AddTorqueMessage(const TorqueMessage& message) { + if (!ShouldAddMessageOfKind(message.kind)) return; + SourceId id = message.position ? message.position->source : SourceId::Invalid(); auto& notification = GetOrCreateNotificationForSource(id); @@ -120,6 +122,20 @@ class DiagnosticCollector { return notification; } + bool ShouldAddMessageOfKind(TorqueMessage::Kind kind) { + // An error can easily cause a lot of false positive lint messages, due to + // unused variables, macros, etc. Thus we suppress subsequent lint messages + // when there are errors. + switch (kind) { + case TorqueMessage::Kind::kError: + suppress_lint_messages_ = true; + return true; + case TorqueMessage::Kind::kLint: + if (suppress_lint_messages_) return false; + return true; + } + } + void PopulateRangeFromSourcePosition(Range range, const SourcePosition& position) { range.start().set_line(position.start.line); @@ -138,6 +154,7 @@ class DiagnosticCollector { } std::map<SourceId, PublishDiagnosticsNotification> notifications_; + bool suppress_lint_messages_ = false; }; void SendCompilationDiagnostics(const TorqueCompilerResult& result, diff --git a/chromium/v8/src/torque/server-data.h b/chromium/v8/src/torque/server-data.h index 04cd0b317f8..b80d1b67f47 100644 --- a/chromium/v8/src/torque/server-data.h +++ b/chromium/v8/src/torque/server-data.h @@ -6,6 +6,7 @@ #define V8_TORQUE_SERVER_DATA_H_ #include <map> +#include <memory> #include <vector> #include "src/base/macros.h" @@ -47,12 +48,12 @@ class LanguageServerData : public ContextualClass<LanguageServerData> { static void SetGlobalContext(GlobalContext global_context) { Get().global_context_ = - base::make_unique<GlobalContext>(std::move(global_context)); + std::make_unique<GlobalContext>(std::move(global_context)); Get().PrepareAllDeclarableSymbols(); } static void SetTypeOracle(TypeOracle type_oracle) { - Get().type_oracle_ = base::make_unique<TypeOracle>(std::move(type_oracle)); + Get().type_oracle_ = std::make_unique<TypeOracle>(std::move(type_oracle)); } static const Symbols& SymbolsForSourceId(SourceId id) { diff --git a/chromium/v8/src/torque/torque-compiler.cc b/chromium/v8/src/torque/torque-compiler.cc index 3968b001fb4..6d2b14fc187 100644 --- a/chromium/v8/src/torque/torque-compiler.cc +++ b/chromium/v8/src/torque/torque-compiler.cc @@ -79,7 +79,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) { ReportAllUnusedMacros(); - implementation_visitor.GenerateBuiltinDefinitions(output_directory); + implementation_visitor.GenerateBuiltinDefinitionsAndInterfaceDescriptors( + output_directory); implementation_visitor.GenerateClassFieldOffsets(output_directory); implementation_visitor.GeneratePrintDefinitions(output_directory); implementation_visitor.GenerateClassDefinitions(output_directory); diff --git a/chromium/v8/src/torque/torque-parser.cc b/chromium/v8/src/torque/torque-parser.cc index d9973dde3c8..3639bef97cc 100644 --- a/chromium/v8/src/torque/torque-parser.cc +++ b/chromium/v8/src/torque/torque-parser.cc @@ -214,6 +214,10 @@ template <> V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<std::vector<Identifier*>>::id = ParseResultTypeId::kStdVectorOfIdentifierPtr; +template <> +V8_EXPORT_PRIVATE const ParseResultTypeId + ParseResultHolder<base::Optional<ClassBody*>>::id = + ParseResultTypeId::kOptionalClassBody; namespace { @@ -675,7 +679,9 @@ class AnnotationSet { Lint("Annotation ", a.name->value, error_message) .Position(a.name->pos); } - map_[a.name->value].push_back(*a.param); + if (!map_.insert({a.name->value, {*a.param, a.name->pos}}).second) { + Lint("Duplicate annotation ", a.name->value).Position(a.name->pos); + } } else { if (allowed_without_param.find(a.name->value) == allowed_without_param.end()) { @@ -693,41 +699,104 @@ class AnnotationSet { } } - bool Contains(const std::string& s) { return set_.find(s) != set_.end(); } - const std::vector<std::string>& GetParams(const std::string& s) { - return map_[s]; + bool Contains(const std::string& s) const { + return set_.find(s) != set_.end(); + } + base::Optional<std::pair<std::string, SourcePosition>> GetParam( + const std::string& s) const { + auto it = map_.find(s); + return it == map_.end() + ? base::Optional<std::pair<std::string, SourcePosition>>() + : it->second; } private: std::set<std::string> set_; - std::map<std::string, std::vector<std::string>> map_; + std::map<std::string, std::pair<std::string, SourcePosition>> map_; }; +int GetAnnotationValue(const AnnotationSet& annotations, const char* name, + int default_value) { + auto value_and_pos = annotations.GetParam(name); + if (!value_and_pos.has_value()) return default_value; + const std::string& value = value_and_pos->first; + SourcePosition pos = value_and_pos->second; + if (value.empty()) { + Error("Annotation ", name, " requires an integer parameter").Position(pos); + } + size_t num_chars_converted = 0; + int result = default_value; + try { + result = std::stoi(value, &num_chars_converted, 0); + } catch (const std::invalid_argument&) { + Error("Expected an integer for annotation ", name).Position(pos); + return result; + } catch (const std::out_of_range&) { + Error("Integer out of 32-bit range in annotation ", name).Position(pos); + return result; + } + if (num_chars_converted != value.size()) { + Error("Parameter for annotation ", name, + " must be an integer with no trailing characters") + .Position(pos); + } + return result; +} + +InstanceTypeConstraints MakeInstanceTypeConstraints( + const AnnotationSet& annotations) { + InstanceTypeConstraints result; + result.value = + GetAnnotationValue(annotations, ANNOTATION_INSTANCE_TYPE_VALUE, -1); + result.num_flags_bits = GetAnnotationValue( + annotations, ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE, -1); + return result; +} + +base::Optional<ParseResult> MakeClassBody(ParseResultIterator* child_results) { + auto methods = child_results->NextAs<std::vector<Declaration*>>(); + auto fields = child_results->NextAs<std::vector<ClassFieldExpression>>(); + base::Optional<ClassBody*> result = + MakeNode<ClassBody>(std::move(methods), std::move(fields)); + return ParseResult(result); +} + base::Optional<ParseResult> MakeClassDeclaration( ParseResultIterator* child_results) { AnnotationSet annotations( child_results, - {"@generatePrint", "@noVerifier", "@abstract", - "@dirtyInstantiatedAbstractClass", "@hasSameInstanceTypeAsParent", - "@generateCppClass"}, - {}); + {ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT, + ANNOTATION_INSTANTIATED_ABSTRACT_CLASS, + ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT, + ANNOTATION_GENERATE_CPP_CLASS, + ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT, + ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT}, + {ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE, + ANNOTATION_INSTANCE_TYPE_VALUE}); ClassFlags flags = ClassFlag::kNone; - bool generate_print = annotations.Contains("@generatePrint"); + bool generate_print = annotations.Contains(ANNOTATION_GENERATE_PRINT); if (generate_print) flags |= ClassFlag::kGeneratePrint; - bool generate_verify = !annotations.Contains("@noVerifier"); + bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER); if (generate_verify) flags |= ClassFlag::kGenerateVerify; - if (annotations.Contains("@abstract")) { + if (annotations.Contains(ANNOTATION_ABSTRACT)) { flags |= ClassFlag::kAbstract; } - if (annotations.Contains("@dirtyInstantiatedAbstractClass")) { + if (annotations.Contains(ANNOTATION_INSTANTIATED_ABSTRACT_CLASS)) { flags |= ClassFlag::kInstantiatedAbstractClass; } - if (annotations.Contains("@hasSameInstanceTypeAsParent")) { + if (annotations.Contains(ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT)) { flags |= ClassFlag::kHasSameInstanceTypeAsParent; } - if (annotations.Contains("@generateCppClass")) { + if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) { flags |= ClassFlag::kGenerateCppClassDefinitions; } + if (annotations.Contains(ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT)) { + flags |= ClassFlag::kHighestInstanceTypeWithinParent; + } + if (annotations.Contains(ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT)) { + flags |= ClassFlag::kLowestInstanceTypeWithinParent; + } + auto is_extern = child_results->NextAs<bool>(); if (is_extern) flags |= ClassFlag::kExtern; auto transient = child_results->NextAs<bool>(); @@ -741,8 +810,15 @@ base::Optional<ParseResult> MakeClassDeclaration( ReportError("Expected type name in extends clause."); } auto generates = child_results->NextAs<base::Optional<std::string>>(); - auto methods = child_results->NextAs<std::vector<Declaration*>>(); - auto fields_raw = child_results->NextAs<std::vector<ClassFieldExpression>>(); + auto body = child_results->NextAs<base::Optional<ClassBody*>>(); + std::vector<Declaration*> methods; + std::vector<ClassFieldExpression> fields_raw; + if (body.has_value()) { + methods = (*body)->methods; + fields_raw = (*body)->fields; + } else { + flags |= ClassFlag::kUndefinedLayout; + } // Filter to only include fields that should be present based on decoration. std::vector<ClassFieldExpression> fields; @@ -751,8 +827,9 @@ base::Optional<ParseResult> MakeClassDeclaration( [](const ClassFieldExpression& exp) { for (const ConditionalAnnotation& condition : exp.conditions) { if (condition.type == ConditionalAnnotationType::kPositive - ? !BuildFlags::GetFlag(condition.condition, "@if") - : BuildFlags::GetFlag(condition.condition, "@ifnot")) { + ? !BuildFlags::GetFlag(condition.condition, ANNOTATION_IF) + : BuildFlags::GetFlag(condition.condition, + ANNOTATION_IFNOT)) { return false; } } @@ -761,7 +838,7 @@ base::Optional<ParseResult> MakeClassDeclaration( Declaration* result = MakeNode<ClassDeclaration>( name, flags, std::move(extends), std::move(generates), std::move(methods), - fields); + fields, MakeInstanceTypeConstraints(annotations)); return ParseResult{result}; } @@ -1358,14 +1435,21 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) { } base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) { - AnnotationSet annotations(child_results, {"@noVerifier"}, {"@if", "@ifnot"}); - bool generate_verify = !annotations.Contains("@noVerifier"); + AnnotationSet annotations(child_results, {ANNOTATION_NO_VERIFIER}, + {ANNOTATION_IF, ANNOTATION_IFNOT}); + bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER); std::vector<ConditionalAnnotation> conditions; - for (const std::string& condition : annotations.GetParams("@if")) { - conditions.push_back({condition, ConditionalAnnotationType::kPositive}); + base::Optional<std::pair<std::string, SourcePosition>> if_condition = + annotations.GetParam(ANNOTATION_IF); + base::Optional<std::pair<std::string, SourcePosition>> ifnot_condition = + annotations.GetParam(ANNOTATION_IFNOT); + if (if_condition.has_value()) { + conditions.push_back( + {if_condition->first, ConditionalAnnotationType::kPositive}); } - for (const std::string& condition : annotations.GetParams("@ifnot")) { - conditions.push_back({condition, ConditionalAnnotationType::kNegative}); + if (ifnot_condition.has_value()) { + conditions.push_back( + {ifnot_condition->first, ConditionalAnnotationType::kNegative}); } auto weak = child_results->NextAs<bool>(); auto const_qualified = child_results->NextAs<bool>(); @@ -1892,6 +1976,13 @@ struct TorqueGrammar : Grammar { &block}, MakeMethodDeclaration)}; + // Result: base::Optional<ClassBody*> + Symbol optionalClassBody = { + Rule({Token("{"), List<Declaration*>(&method), + List<ClassFieldExpression>(&classField), Token("}")}, + MakeClassBody), + Rule({Token(";")}, YieldDefaultValue<base::Optional<ClassBody*>>)}; + // Result: std::vector<Declaration*> Symbol declaration = { Rule({Token("const"), &name, Token(":"), &type, Token("="), expression, @@ -1905,8 +1996,7 @@ struct TorqueGrammar : Grammar { Optional<TypeExpression*>(Sequence({Token("extends"), &type})), Optional<std::string>( Sequence({Token("generates"), &externalString})), - Token("{"), List<Declaration*>(&method), - List<ClassFieldExpression>(&classField), Token("}")}, + &optionalClassBody}, AsSingletonVector<Declaration*, MakeClassDeclaration>()), Rule({Token("struct"), &name, TryOrDefault<GenericParameters>(&genericParameters), Token("{"), diff --git a/chromium/v8/src/torque/type-oracle.h b/chromium/v8/src/torque/type-oracle.h index 643c78c0306..03aad34d2ae 100644 --- a/chromium/v8/src/torque/type-oracle.h +++ b/chromium/v8/src/torque/type-oracle.h @@ -5,6 +5,8 @@ #ifndef V8_TORQUE_TYPE_ORACLE_H_ #define V8_TORQUE_TYPE_ORACLE_H_ +#include <memory> + #include "src/torque/contextual.h" #include "src/torque/declarable.h" #include "src/torque/declarations.h" diff --git a/chromium/v8/src/torque/type-visitor.cc b/chromium/v8/src/torque/type-visitor.cc index 9b5c96ee401..b45452058da 100644 --- a/chromium/v8/src/torque/type-visitor.cc +++ b/chromium/v8/src/torque/type-visitor.cc @@ -165,6 +165,12 @@ const ClassType* TypeVisitor::ComputeType(ClassDeclaration* decl) { "class \"", decl->name->value, "\" must extend either Tagged or an already declared class"); } + if (super_class->HasUndefinedLayout() && + !(decl->flags & ClassFlag::kUndefinedLayout)) { + Error("Class \"", decl->name->value, + "\" defines its layout but extends a class which does not") + .Position(decl->pos); + } } std::string generates = decl->name->value; @@ -282,8 +288,9 @@ void TypeVisitor::VisitClassFieldsAndMethods( "only one indexable field is currently supported per class"); } seen_indexed_field = true; - const Field* index_field = - &(class_type->LookupFieldInternal(*field_expression.index)); + const NameAndType& index_field = + class_type->LookupFieldInternal(*field_expression.index) + .name_and_type; class_type->RegisterField( {field_expression.name_and_type.name->pos, class_type, diff --git a/chromium/v8/src/torque/types.cc b/chromium/v8/src/torque/types.cc index fe792401f6c..022649e0657 100644 --- a/chromium/v8/src/torque/types.cc +++ b/chromium/v8/src/torque/types.cc @@ -50,7 +50,9 @@ bool Type::IsSubtypeOf(const Type* supertype) const { base::Optional<const ClassType*> Type::ClassSupertype() const { for (const Type* t = this; t != nullptr; t = t->parent()) { - if (auto* class_type = ClassType::DynamicCast(t)) return class_type; + if (auto* class_type = ClassType::DynamicCast(t)) { + return class_type; + } } return base::nullopt; } @@ -86,7 +88,7 @@ bool Type::IsAbstractName(const std::string& name) const { std::string Type::GetGeneratedTypeName() const { std::string result = GetGeneratedTypeNameImpl(); - if (result.empty() || result == "compiler::TNode<>") { + if (result.empty() || result == "TNode<>") { ReportError("Generated type is required for type '", ToString(), "'. Use 'generates' clause in definition."); } @@ -382,7 +384,7 @@ std::string ClassType::GetGeneratedTNodeTypeNameImpl() const { std::string ClassType::GetGeneratedTypeNameImpl() const { return IsConstexpr() ? GetGeneratedTNodeTypeName() - : "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">"; + : "TNode<" + GetGeneratedTNodeTypeName() + ">"; } std::string ClassType::ToExplicitString() const { @@ -404,11 +406,11 @@ void ClassType::Finalize() const { if (const ClassType* super_class = ClassType::DynamicCast(parent())) { if (super_class->HasIndexedField()) flags_ |= ClassFlag::kHasIndexedField; if (!super_class->IsAbstract() && !HasSameInstanceTypeAsParent()) { - Error( - "Super class must either be abstract (annotate super class with " - "@abstract) " - "or this class must have the same instance type as the super class " - "(annotate this class with @hasSameInstanceTypeAsParent).") + Error("Super class must either be abstract (annotate super class with ", + ANNOTATION_ABSTRACT, + ") or this class must have the same instance type as the super " + "class (annotate this class with ", + ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT, ").") .Position(this->decl_->name->pos); } } diff --git a/chromium/v8/src/torque/types.h b/chromium/v8/src/torque/types.h index d2198d50c33..0102cf31d5c 100644 --- a/chromium/v8/src/torque/types.h +++ b/chromium/v8/src/torque/types.h @@ -156,7 +156,7 @@ struct Field { SourcePosition pos; const AggregateType* aggregate; - base::Optional<const Field*> index; + base::Optional<NameAndType> index; NameAndType name_and_type; size_t offset; bool is_weak; @@ -204,8 +204,7 @@ class AbstractType final : public Type { return "AT" + str; } std::string GetGeneratedTypeNameImpl() const override { - return IsConstexpr() ? generated_type_ - : "compiler::TNode<" + generated_type_ + ">"; + return IsConstexpr() ? generated_type_ : "TNode<" + generated_type_ + ">"; } std::string GetGeneratedTNodeTypeNameImpl() const override; bool IsConstexpr() const override { @@ -316,7 +315,7 @@ class V8_EXPORT_PRIVATE UnionType final : public Type { std::string ToExplicitString() const override; std::string MangledName() const override; std::string GetGeneratedTypeNameImpl() const override { - return "compiler::TNode<" + GetGeneratedTNodeTypeName() + ">"; + return "TNode<" + GetGeneratedTNodeTypeName() + ">"; } std::string GetGeneratedTNodeTypeNameImpl() const override; @@ -514,10 +513,12 @@ class ClassType final : public AggregateType { std::string GetGeneratedTNodeTypeNameImpl() const override; bool IsExtern() const { return flags_ & ClassFlag::kExtern; } bool ShouldGeneratePrint() const { - return flags_ & ClassFlag::kGeneratePrint || !IsExtern(); + return (flags_ & ClassFlag::kGeneratePrint || !IsExtern()) && + !HasUndefinedLayout(); } bool ShouldGenerateVerify() const { - return flags_ & ClassFlag::kGenerateVerify || !IsExtern(); + return (flags_ & ClassFlag::kGenerateVerify || !IsExtern()) && + !HasUndefinedLayout(); } bool IsTransient() const override { return flags_ & ClassFlag::kTransient; } bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; } @@ -549,6 +550,20 @@ class ClassType final : public AggregateType { std::vector<Field> ComputeAllFields() const; + const InstanceTypeConstraints& GetInstanceTypeConstraints() const { + return decl_->instance_type_constraints; + } + bool IsHighestInstanceTypeWithinParent() const { + return flags_ & ClassFlag::kHighestInstanceTypeWithinParent; + } + bool IsLowestInstanceTypeWithinParent() const { + return flags_ & ClassFlag::kLowestInstanceTypeWithinParent; + } + bool HasUndefinedLayout() const { + return flags_ & ClassFlag::kUndefinedLayout; + } + SourcePosition GetPosition() const { return decl_->pos; } + private: friend class TypeOracle; friend class TypeVisitor; diff --git a/chromium/v8/src/torque/utils.cc b/chromium/v8/src/torque/utils.cc index 38862b31b0e..4e757ac9e86 100644 --- a/chromium/v8/src/torque/utils.cc +++ b/chromium/v8/src/torque/utils.cc @@ -212,19 +212,25 @@ bool IsValidTypeName(const std::string& s) { } std::string CapifyStringWithUnderscores(const std::string& camellified_string) { + // Special case: JSAbc yields JS_ABC, not JSABC, for any Abc. + size_t js_position = camellified_string.find("JS"); + std::string result; - bool previousWasLower = false; - for (auto current : camellified_string) { - if (previousWasLower && isupper(current)) { + bool previousWasLowerOrDigit = false; + for (size_t index = 0; index < camellified_string.size(); ++index) { + char current = camellified_string[index]; + if ((previousWasLowerOrDigit && isupper(current)) || + (js_position != std::string::npos && + index == js_position + strlen("JS"))) { result += "_"; } if (current == '.' || current == '-') { result += "_"; - previousWasLower = false; + previousWasLowerOrDigit = false; continue; } result += toupper(current); - previousWasLower = (islower(current)); + previousWasLowerOrDigit = islower(current) || isdigit(current); } return result; } diff --git a/chromium/v8/src/utils/allocation.cc b/chromium/v8/src/utils/allocation.cc index f44b3c42ea9..c89f83ba85d 100644 --- a/chromium/v8/src/utils/allocation.cc +++ b/chromium/v8/src/utils/allocation.cc @@ -10,6 +10,7 @@ #include "src/base/logging.h" #include "src/base/page-allocator.h" #include "src/base/platform/platform.h" +#include "src/flags/flags.h" #include "src/init/v8.h" #include "src/sanitizer/lsan-page-allocator.h" #include "src/utils/memcopy.h" @@ -166,6 +167,9 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size, DCHECK_NOT_NULL(page_allocator); DCHECK_EQ(hint, AlignedAddress(hint, alignment)); DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); + if (FLAG_randomize_all_allocations) { + hint = page_allocator->GetRandomMmapAddr(); + } void* result = nullptr; for (int i = 0; i < kAllocationTries; ++i) { result = page_allocator->AllocatePages(hint, size, alignment, access); diff --git a/chromium/v8/src/utils/memcopy.cc b/chromium/v8/src/utils/memcopy.cc index 1cac2189d08..c67d1d359a3 100644 --- a/chromium/v8/src/utils/memcopy.cc +++ b/chromium/v8/src/utils/memcopy.cc @@ -25,18 +25,8 @@ V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) { (*memmove_function)(dest, src, size); } #elif V8_OS_POSIX && V8_HOST_ARCH_ARM -void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, - size_t chars) { - uint16_t* limit = dest + chars; - while (dest < limit) { - *dest++ = static_cast<uint16_t>(*src++); - } -} - V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper; -MemCopyUint16Uint8Function memcopy_uint16_uint8_function = - &MemCopyUint16Uint8Wrapper; #elif V8_OS_POSIX && V8_HOST_ARCH_MIPS V8_EXPORT_PRIVATE MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper; @@ -54,9 +44,6 @@ void init_memcopy_functions() { EmbeddedData d = EmbeddedData::FromBlob(); memcopy_uint8_function = reinterpret_cast<MemCopyUint8Function>( d.InstructionStartOfBuiltin(Builtins::kMemCopyUint8Uint8)); - memcopy_uint16_uint8_function = - reinterpret_cast<MemCopyUint16Uint8Function>( - d.InstructionStartOfBuiltin(Builtins::kMemCopyUint16Uint8)); } #elif V8_OS_POSIX && V8_HOST_ARCH_MIPS if (Isolate::CurrentEmbeddedBlobIsBinaryEmbedded()) { diff --git a/chromium/v8/src/utils/memcopy.h b/chromium/v8/src/utils/memcopy.h index c1a0afbcb49..7e1b8539df0 100644 --- a/chromium/v8/src/utils/memcopy.h +++ b/chromium/v8/src/utils/memcopy.h @@ -8,6 +8,7 @@ #include <stdint.h> #include <stdlib.h> #include <string.h> +#include <algorithm> #include "src/base/logging.h" #include "src/base/macros.h" @@ -55,17 +56,8 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, memmove(dest, src, size); } -using MemCopyUint16Uint8Function = void (*)(uint16_t* dest, const uint8_t* src, - size_t size); -extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; -void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, - size_t chars); // For values < 12, the assembler function is slower than the inlined C code. const int kMinComplexConvertMemCopy = 12; -V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, - size_t size) { - (*memcopy_uint16_uint8_function)(dest, src, size); -} #elif defined(V8_HOST_ARCH_MIPS) using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src, size_t size); @@ -109,6 +101,7 @@ inline void CopyImpl(T* dst_ptr, const T* src_ptr, size_t count) { DCHECK(((src <= dst) && ((src + count * kTWordSize) <= dst)) || ((dst <= src) && ((dst + count * kTWordSize) <= src))); #endif + if (count == 0) return; // Use block copying MemCopy if the segment we're copying is // enough to justify the extra call/setup overhead. @@ -204,308 +197,32 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) { reinterpret_cast<Address>(value), counter); } -template <typename sourcechar, typename sinkchar> -V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, - size_t chars); -#if defined(V8_HOST_ARCH_ARM) -V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, - size_t chars); -V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, - size_t chars); -V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, - size_t chars); -#elif defined(V8_HOST_ARCH_MIPS) -V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, - size_t chars); -V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, - size_t chars); -#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390) -V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, - size_t chars); -V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, - size_t chars); -#endif - -// Copy from 8bit/16bit chars to 8bit/16bit chars. -template <typename sourcechar, typename sinkchar> -V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars); - -template <typename sourcechar, typename sinkchar> -void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) { - DCHECK_LE(sizeof(sourcechar), 2); - DCHECK_LE(sizeof(sinkchar), 2); - if (sizeof(sinkchar) == 1) { - if (sizeof(sourcechar) == 1) { - CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint8_t*>(src), chars); - } else { - CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), - reinterpret_cast<const uint16_t*>(src), chars); - } - } else { - if (sizeof(sourcechar) == 1) { - CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), - reinterpret_cast<const uint8_t*>(src), chars); - } else { - CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), - reinterpret_cast<const uint16_t*>(src), chars); - } - } -} - -template <typename sourcechar, typename sinkchar> -void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) { - sinkchar* limit = dest + chars; - if ((sizeof(*dest) == sizeof(*src)) && - (chars >= kMinComplexMemCopy / sizeof(*dest))) { - MemCopy(dest, src, chars * sizeof(*dest)); - } else { - while (dest < limit) *dest++ = static_cast<sinkchar>(*src++); - } -} +// Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if +// needed. Ranges are not allowed to overlap. +// The separate declaration is needed for the V8_NONNULL, which is not allowed +// on a definition. +template <typename SrcType, typename DstType> +void CopyChars(DstType* dst, const SrcType* src, size_t count) V8_NONNULL(1, 2); -#if defined(V8_HOST_ARCH_ARM) -void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - case 2: - memcpy(dest, src, 2); - break; - case 3: - memcpy(dest, src, 3); - break; - case 4: - memcpy(dest, src, 4); - break; - case 5: - memcpy(dest, src, 5); - break; - case 6: - memcpy(dest, src, 6); - break; - case 7: - memcpy(dest, src, 7); - break; - case 8: - memcpy(dest, src, 8); - break; - case 9: - memcpy(dest, src, 9); - break; - case 10: - memcpy(dest, src, 10); - break; - case 11: - memcpy(dest, src, 11); - break; - case 12: - memcpy(dest, src, 12); - break; - case 13: - memcpy(dest, src, 13); - break; - case 14: - memcpy(dest, src, 14); - break; - case 15: - memcpy(dest, src, 15); - break; - default: - MemCopy(dest, src, chars); - break; - } -} - -void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) { - if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) { - MemCopyUint16Uint8(dest, src, chars); - } else { - MemCopyUint16Uint8Wrapper(dest, src, chars); - } -} +template <typename SrcType, typename DstType> +void CopyChars(DstType* dst, const SrcType* src, size_t count) { + STATIC_ASSERT(std::is_integral<SrcType>::value); + STATIC_ASSERT(std::is_integral<DstType>::value); -void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - case 2: - memcpy(dest, src, 4); - break; - case 3: - memcpy(dest, src, 6); - break; - case 4: - memcpy(dest, src, 8); - break; - case 5: - memcpy(dest, src, 10); - break; - case 6: - memcpy(dest, src, 12); - break; - case 7: - memcpy(dest, src, 14); - break; - default: - MemCopy(dest, src, chars * sizeof(*dest)); - break; - } -} - -#elif defined(V8_HOST_ARCH_MIPS) -void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { - if (chars < kMinComplexMemCopy) { - memcpy(dest, src, chars); - } else { - MemCopy(dest, src, chars); - } -} - -void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { - if (chars < kMinComplexMemCopy) { - memcpy(dest, src, chars * sizeof(*dest)); - } else { - MemCopy(dest, src, chars * sizeof(*dest)); - } -} -#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390) -#define CASE(n) \ - case n: \ - memcpy(dest, src, n); \ - break -void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - CASE(2); - CASE(3); - CASE(4); - CASE(5); - CASE(6); - CASE(7); - CASE(8); - CASE(9); - CASE(10); - CASE(11); - CASE(12); - CASE(13); - CASE(14); - CASE(15); - CASE(16); - CASE(17); - CASE(18); - CASE(19); - CASE(20); - CASE(21); - CASE(22); - CASE(23); - CASE(24); - CASE(25); - CASE(26); - CASE(27); - CASE(28); - CASE(29); - CASE(30); - CASE(31); - CASE(32); - CASE(33); - CASE(34); - CASE(35); - CASE(36); - CASE(37); - CASE(38); - CASE(39); - CASE(40); - CASE(41); - CASE(42); - CASE(43); - CASE(44); - CASE(45); - CASE(46); - CASE(47); - CASE(48); - CASE(49); - CASE(50); - CASE(51); - CASE(52); - CASE(53); - CASE(54); - CASE(55); - CASE(56); - CASE(57); - CASE(58); - CASE(59); - CASE(60); - CASE(61); - CASE(62); - CASE(63); - CASE(64); - default: - memcpy(dest, src, chars); - break; - } -} -#undef CASE +#ifdef DEBUG + // Check for no overlap, otherwise {std::copy_n} cannot be used. + Address src_start = reinterpret_cast<Address>(src); + Address src_end = src_start + count * sizeof(SrcType); + Address dst_start = reinterpret_cast<Address>(dst); + Address dst_end = dst_start + count * sizeof(DstType); + DCHECK(src_end <= dst_start || dst_end <= src_start); +#endif -#define CASE(n) \ - case n: \ - memcpy(dest, src, n * 2); \ - break -void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { - switch (static_cast<unsigned>(chars)) { - case 0: - break; - case 1: - *dest = *src; - break; - CASE(2); - CASE(3); - CASE(4); - CASE(5); - CASE(6); - CASE(7); - CASE(8); - CASE(9); - CASE(10); - CASE(11); - CASE(12); - CASE(13); - CASE(14); - CASE(15); - CASE(16); - CASE(17); - CASE(18); - CASE(19); - CASE(20); - CASE(21); - CASE(22); - CASE(23); - CASE(24); - CASE(25); - CASE(26); - CASE(27); - CASE(28); - CASE(29); - CASE(30); - CASE(31); - CASE(32); - default: - memcpy(dest, src, chars * 2); - break; - } + using SrcTypeUnsigned = typename std::make_unsigned<SrcType>::type; + using DstTypeUnsigned = typename std::make_unsigned<DstType>::type; + std::copy_n(reinterpret_cast<const SrcTypeUnsigned*>(src), count, + reinterpret_cast<DstTypeUnsigned*>(dst)); } -#undef CASE -#endif } // namespace internal } // namespace v8 diff --git a/chromium/v8/src/utils/utils-inl.h b/chromium/v8/src/utils/utils-inl.h index e88055023ec..0c8af7cb88a 100644 --- a/chromium/v8/src/utils/utils-inl.h +++ b/chromium/v8/src/utils/utils-inl.h @@ -36,13 +36,30 @@ template <typename Char> bool TryAddIndexChar(uint32_t* index, Char c) { if (!IsDecimalDigit(c)) return false; int d = c - '0'; + // The maximum index is 4294967294; for the computation below to not + // exceed that, the previous index value must be <= 429496729 if d <= 4, + // or <= 429496728 if d >= 5. The (d+3)>>3 computation is a branch-free + // way to express that. if (*index > 429496729U - ((d + 3) >> 3)) return false; *index = (*index) * 10 + d; return true; } -template <typename Stream> -bool StringToArrayIndex(Stream* stream, uint32_t* index) { +template <typename Char> +bool TryAddIndexChar(uint64_t* index, Char c) { + if (!IsDecimalDigit(c)) return false; + int d = c - '0'; + // The maximum uint64_t is 18446744073709551615; for the computation below to + // not exceed that, the previous index value must be <= 1844674407370955161 + // if d <= 5, or <= 1844674407370955160 if d >= 6. The (d+2)>>3 computation + // is a branch-free way to express that. + if (*index > 1844674407370955161ull - ((d + 2) >> 3)) return false; + *index = (*index) * 10 + d; + return true; +} + +template <typename Stream, typename index_t> +bool StringToArrayIndex(Stream* stream, index_t* index) { uint16_t ch = stream->GetNext(); // If the string begins with a '0' character, it must only consist @@ -55,9 +72,20 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) { // Convert string to uint32 array index; character by character. if (!IsDecimalDigit(ch)) return false; int d = ch - '0'; - uint32_t result = d; + index_t result = d; while (stream->HasMore()) { - if (!TryAddIndexChar(&result, stream->GetNext())) return false; + // Clang on Mac doesn't think that size_t and uint*_t should be + // implicitly convertible. + if (sizeof(index_t) == 8) { + if (!TryAddIndexChar(reinterpret_cast<uint64_t*>(&result), + stream->GetNext())) { + return false; + } + } else { + if (!TryAddIndexChar(reinterpret_cast<uint32_t*>(&result), + stream->GetNext())) + return false; + } } *index = result; diff --git a/chromium/v8/src/utils/utils.h b/chromium/v8/src/utils/utils.h index 27d3d5ef217..b414a4c52b1 100644 --- a/chromium/v8/src/utils/utils.h +++ b/chromium/v8/src/utils/utils.h @@ -760,13 +760,8 @@ inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); } -inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { - return (x << (31 - msb)) >> (lsb + 31 - msb); -} - -inline int signed_bitextract_64(int msb, int lsb, int x) { - // TODO(jbramley): This is broken for big bitfields. - return (x << (63 - msb)) >> (lsb + 63 - msb); +inline int32_t signed_bitextract_32(int msb, int lsb, uint32_t x) { + return static_cast<int32_t>(x << (31 - msb)) >> (lsb + 31 - msb); } // Check number width. @@ -978,8 +973,8 @@ bool DoubleToBoolean(double d); template <typename Char> bool TryAddIndexChar(uint32_t* index, Char c); -template <typename Stream> -bool StringToArrayIndex(Stream* stream, uint32_t* index); +template <typename Stream, typename index_t> +bool StringToArrayIndex(Stream* stream, index_t* index); // Returns the current stack top. Works correctly with ASAN and SafeStack. // GetCurrentStackPosition() should not be inlined, because it works on stack diff --git a/chromium/v8/src/utils/vector.h b/chromium/v8/src/utils/vector.h index dd5c59e5538..e0c13afc901 100644 --- a/chromium/v8/src/utils/vector.h +++ b/chromium/v8/src/utils/vector.h @@ -8,6 +8,7 @@ #include <algorithm> #include <cstring> #include <iterator> +#include <memory> #include "src/common/checks.h" #include "src/common/globals.h" diff --git a/chromium/v8/src/wasm/DEPS b/chromium/v8/src/wasm/DEPS index eb0780f5e3d..2d310c631cd 100644 --- a/chromium/v8/src/wasm/DEPS +++ b/chromium/v8/src/wasm/DEPS @@ -1,4 +1,11 @@ specific_include_rules = { + "jump-table-assembler\.(cc|h)": [ + # The JumpTableAssembler should not depend on any wasm-specific headers. + # The only allowed include is 'src/codegen' for assembler headers. + "-src", + "+src/codegen", + "+src/wasm/jump-table-assembler.h", + ], "c-api\.cc": [ "+include/libplatform/libplatform.h", "+third_party/wasm-api/wasm.h", diff --git a/chromium/v8/src/wasm/OWNERS b/chromium/v8/src/wasm/OWNERS index 8aa6e247392..bc9ec357df4 100644 --- a/chromium/v8/src/wasm/OWNERS +++ b/chromium/v8/src/wasm/OWNERS @@ -1,7 +1,7 @@ ahaas@chromium.org bbudge@chromium.org binji@chromium.org -clemensh@chromium.org +clemensb@chromium.org gdeepti@chromium.org mstarzinger@chromium.org titzer@chromium.org diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h index 834eb181d83..e6c46e4a09d 100644 --- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -46,10 +46,12 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize; // Three instructions are required to sub a large constant, movw + movt + sub. constexpr int32_t kPatchInstructionsRequired = 3; +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + inline MemOperand GetStackSlot(uint32_t index) { - int32_t offset = - kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; - return MemOperand(fp, -offset); + return MemOperand(fp, -GetStackSlotOffset(index)); } inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { @@ -635,6 +637,44 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, ldr(reg, liftoff::GetHalfStackSlot(index, half)); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + // We need a zero reg. Always use r0 for that, and push it before to restore + // its value afterwards. + push(r0); + mov(r0, Operand(0)); + + if (count <= 5) { + // Special straight-line code for up to five slots. Generates two + // instructions per slot. + for (uint32_t offset = 0; offset < count; ++offset) { + str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); + str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); + } + } else { + // General case for bigger counts (9 instructions). + // Use r1 for start address (inclusive), r2 for end address (exclusive). + push(r1); + push(r2); + sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); + sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize)); + + Label loop; + bind(&loop); + str(r0, MemOperand(r1, /* offset */ kSystemPointerSize, PostIndex)); + cmp(r1, r2); + b(&loop, ne); + + pop(r2); + pop(r1); + } + + pop(r0); +} + #define I32_BINOP(name, instruction) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index dc68267825c..dede53b7a48 100644 --- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -43,10 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize; constexpr int32_t kConstantStackSpace = 0; +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + inline MemOperand GetStackSlot(uint32_t index) { - int32_t offset = - kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; - return MemOperand(fp, -offset); + return MemOperand(fp, -GetStackSlotOffset(index)); } inline MemOperand GetInstanceOperand() { @@ -398,6 +400,38 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { UNREACHABLE(); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1); + if (count <= 20 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) { + // Special straight-line code for up to 20 slots. Generates one + // instruction per two slots (<= 10 instructions total). + for (; count > 1; count -= 2) { + STATIC_ASSERT(kStackSlotSize == kSystemPointerSize); + stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1)); + } + DCHECK(count == 0 || count == 1); + if (count) str(xzr, liftoff::GetStackSlot(index)); + } else { + // General case for bigger counts (7 instructions). + // Use x0 for start address (inclusive), x1 for end address (exclusive). + Push(x1, x0); + Sub(x0, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); + Sub(x1, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize)); + + Label loop; + bind(&loop); + str(xzr, MemOperand(x0, /* offset */ kSystemPointerSize, PostIndex)); + cmp(x0, x1); + b(&loop, ne); + + Pop(x0, x1); + } +} + #define I32_BINOP(name, instruction) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 7bc3596d2e7..fa88d20df63 100644 --- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -41,7 +41,7 @@ inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { return Operand(ebp, -kFirstStackSlotOffset - offset); } -// TODO(clemensh): Make this a constexpr variable once Operand is constexpr. +// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. inline Operand GetInstanceOperand() { return Operand(ebp, -8); } static constexpr LiftoffRegList kByteRegs = @@ -511,6 +511,37 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, mov(reg, liftoff::GetHalfStackSlot(index, half)); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + if (count <= 2) { + // Special straight-line code for up to two slots (6-9 bytes per word: + // C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot). + for (uint32_t offset = 0; offset < count; ++offset) { + mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0)); + mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0)); + } + } else { + // General case for bigger counts. + // This sequence takes 19-22 bytes (3 for pushes, 3-6 for lea, 2 for xor, 5 + // for mov, 3 for repstosq, 3 for pops). + // Note: rep_stos fills ECX doublewords at [EDI] with EAX. + push(eax); + push(ecx); + push(edi); + lea(edi, liftoff::GetStackSlot(last_stack_slot)); + xor_(eax, eax); + // Number of words is number of slots times two. + mov(ecx, Immediate(count * 2)); + rep_stos(); + pop(edi); + pop(ecx); + pop(eax); + } +} + void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) { if (lhs != dst) { lea(dst, Operand(lhs, rhs, times_1, 0)); diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc index 0fcfb8dbfcf..389c0655075 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc +++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc @@ -297,7 +297,7 @@ class StackTransferRecipe { // process all remaining moves in that cycle. Repeat for all cycles. uint32_t next_spill_slot = asm_->cache_state()->stack_height(); while (!move_dst_regs_.is_empty()) { - // TODO(clemensh): Use an unused register if available. + // TODO(clemensb): Use an unused register if available. LiftoffRegister dst = move_dst_regs_.GetFirstRegSet(); RegisterMove* move = register_move(dst); LiftoffRegister spill_reg = move->src; @@ -412,7 +412,7 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state, } // namespace -// TODO(clemensh): Don't copy the full parent state (this makes us N^2). +// TODO(clemensb): Don't copy the full parent state (this makes us N^2). void LiftoffAssembler::CacheState::InitMerge(const CacheState& source, uint32_t num_locals, uint32_t arity, @@ -484,7 +484,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() { } // namespace -// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function +// TODO(clemensb): Provide a reasonably sized buffer, based on wasm function // size. LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer) : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, @@ -526,7 +526,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) { void LiftoffAssembler::MergeFullStackWith(const CacheState& target, const CacheState& source) { DCHECK_EQ(source.stack_height(), target.stack_height()); - // TODO(clemensh): Reuse the same StackTransferRecipe object to save some + // TODO(clemensb): Reuse the same StackTransferRecipe object to save some // allocations. StackTransferRecipe transfers(this); for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) { diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h index 766ce71db11..f0d49a87828 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h +++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h @@ -228,7 +228,7 @@ class LiftoffAssembler : public TurboAssembler { return reg; } - // TODO(clemensh): Don't copy the full parent state (this makes us N^2). + // TODO(clemensb): Don't copy the full parent state (this makes us N^2). void InitMerge(const CacheState& source, uint32_t num_locals, uint32_t arity, uint32_t stack_depth); @@ -386,6 +386,7 @@ class LiftoffAssembler : public TurboAssembler { // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. // 4 bytes on the stack holding half of a 64-bit value. inline void FillI64Half(Register, uint32_t index, RegPairHalf); + inline void FillStackSlotsWithZero(uint32_t index, uint32_t count); // i32 binops. inline void emit_i32_add(Register dst, Register lhs, Register rhs); diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc index 02de06763c1..997c8ff52b7 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc @@ -6,7 +6,7 @@ #include "src/base/optional.h" #include "src/codegen/assembler-inl.h" -// TODO(clemensh): Remove dependences on compiler stuff. +// TODO(clemensb): Remove dependences on compiler stuff. #include "src/codegen/interface-descriptors.h" #include "src/codegen/macro-assembler-inl.h" #include "src/compiler/linkage.h" @@ -121,7 +121,7 @@ constexpr Vector<const ValueType> kSupportedTypes = class LiftoffCompiler { public: - // TODO(clemensh): Make this a template parameter. + // TODO(clemensb): Make this a template parameter. static constexpr Decoder::ValidateFlag validate = Decoder::kValidate; using Value = ValueBase; @@ -341,6 +341,24 @@ class LiftoffCompiler { __ bind(ool.continuation.get()); } + bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) { + int actual_locals = __ num_locals() - num_params; + DCHECK_LE(0, actual_locals); + constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs); + // If we have many locals, we put them on the stack initially. This avoids + // having to spill them on merge points. Use of these initial values should + // be rare anyway. + if (actual_locals > kNumCacheRegisters / 2) return true; + // If there are locals which are not i32 or i64, we also spill all locals, + // because other types cannot be initialized to constants. + for (uint32_t param_idx = num_params; param_idx < __ num_locals(); + ++param_idx) { + ValueType type = decoder->GetLocalType(param_idx); + if (type != kWasmI32 && type != kWasmI64) return true; + } + return false; + } + void StartFunctionBody(FullDecoder* decoder, Control* block) { for (uint32_t i = 0; i < __ num_locals(); ++i) { if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i), @@ -373,6 +391,7 @@ class LiftoffCompiler { // LiftoffAssembler methods. if (DidAssemblerBailout(decoder)) return; + // Process parameters. __ SpillInstance(instance_reg); // Input 0 is the code target, 1 is the instance. First parameter at 2. uint32_t input_idx = kInstanceParameterIndex + 1; @@ -380,32 +399,20 @@ class LiftoffCompiler { input_idx += ProcessParameter(__ local_type(param_idx), input_idx); } DCHECK_EQ(input_idx, descriptor_->InputCount()); - // Set to a gp register, to mark this uninitialized. - LiftoffRegister zero_double_reg = kGpCacheRegList.GetFirstRegSet(); - DCHECK(zero_double_reg.is_gp()); - for (uint32_t param_idx = num_params; param_idx < __ num_locals(); - ++param_idx) { - ValueType type = decoder->GetLocalType(param_idx); - switch (type) { - case kWasmI32: - __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0}); - break; - case kWasmI64: - __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0}); - break; - case kWasmF32: - case kWasmF64: - if (zero_double_reg.is_gp()) { - // Note: This might spill one of the registers used to hold - // parameters. - zero_double_reg = __ GetUnusedRegister(kFpReg); - // Zero is represented by the bit pattern 0 for both f32 and f64. - __ LoadConstant(zero_double_reg, WasmValue(0.)); - } - __ PushRegister(type, zero_double_reg); - break; - default: - UNIMPLEMENTED(); + + // Initialize locals beyond parameters. + if (SpillLocalsInitially(decoder, num_params)) { + __ FillStackSlotsWithZero(num_params, __ num_locals() - num_params); + for (uint32_t param_idx = num_params; param_idx < __ num_locals(); + ++param_idx) { + ValueType type = decoder->GetLocalType(param_idx); + __ cache_state()->stack_state.emplace_back(type); + } + } else { + for (uint32_t param_idx = num_params; param_idx < __ num_locals(); + ++param_idx) { + ValueType type = decoder->GetLocalType(param_idx); + __ cache_state()->stack_state.emplace_back(type, int32_t{0}); } } @@ -488,7 +495,7 @@ class LiftoffCompiler { // Before entering a loop, spill all locals to the stack, in order to free // the cache registers, and to avoid unnecessarily reloading stack values // into registers at branches. - // TODO(clemensh): Come up with a better strategy here, involving + // TODO(clemensb): Come up with a better strategy here, involving // pre-analysis of the function. __ SpillLocals(); @@ -519,7 +526,7 @@ class LiftoffCompiler { } // Allocate the else state. - if_block->else_state = base::make_unique<ElseState>(); + if_block->else_state = std::make_unique<ElseState>(); // Test the condition, jump to else if zero. Register value = __ PopToRegister().gp(); @@ -617,8 +624,8 @@ class LiftoffCompiler { template <ValueType src_type, ValueType result_type, class EmitFn> void EmitUnOp(EmitFn fn) { - static RegClass src_rc = reg_class_for(src_type); - static RegClass result_rc = reg_class_for(result_type); + constexpr RegClass src_rc = reg_class_for(src_type); + constexpr RegClass result_rc = reg_class_for(result_type); LiftoffRegister src = __ PopToRegister(); LiftoffRegister dst = src_rc == result_rc ? __ GetUnusedRegister(result_rc, {src}) @@ -693,45 +700,44 @@ class LiftoffCompiler { void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value, Value* result) { #define CASE_I32_UNOP(opcode, fn) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitUnOp<kWasmI32, kWasmI32>( \ [=](LiftoffRegister dst, LiftoffRegister src) { \ __ emit_##fn(dst.gp(), src.gp()); \ }); \ break; #define CASE_I32_SIGN_EXTENSION(opcode, fn) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitUnOp<kWasmI32, kWasmI32>( \ [=](LiftoffRegister dst, LiftoffRegister src) { \ __ emit_##fn(dst.gp(), src.gp()); \ }); \ break; #define CASE_I64_SIGN_EXTENSION(opcode, fn) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitUnOp<kWasmI64, kWasmI64>( \ [=](LiftoffRegister dst, LiftoffRegister src) { \ __ emit_##fn(dst, src); \ }); \ break; #define CASE_FLOAT_UNOP(opcode, type, fn) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitUnOp<kWasm##type, kWasm##type>( \ [=](LiftoffRegister dst, LiftoffRegister src) { \ __ emit_##fn(dst.fp(), src.fp()); \ }); \ break; #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \ &ExternalReference::wasm_##fn); \ break; #define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \ - case WasmOpcode::kExpr##opcode: \ + case kExpr##opcode: \ EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \ kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); \ break; switch (opcode) { - CASE_I32_UNOP(I32Eqz, i32_eqz) CASE_I32_UNOP(I32Clz, i32_clz) CASE_I32_UNOP(I32Ctz, i32_ctz) CASE_FLOAT_UNOP(F32Abs, F32, f32_abs) @@ -786,29 +792,41 @@ class LiftoffCompiler { CASE_I64_SIGN_EXTENSION(I64SExtendI8, i64_signextend_i8) CASE_I64_SIGN_EXTENSION(I64SExtendI16, i64_signextend_i16) CASE_I64_SIGN_EXTENSION(I64SExtendI32, i64_signextend_i32) + case kExprI32Eqz: + DCHECK(decoder->lookahead(0, kExprI32Eqz)); + if (decoder->lookahead(1, kExprBrIf)) { + DCHECK(!has_outstanding_op()); + outstanding_op_ = kExprI32Eqz; + break; + } + EmitUnOp<kWasmI32, kWasmI32>( + [=](LiftoffRegister dst, LiftoffRegister src) { + __ emit_i32_eqz(dst.gp(), src.gp()); + }); + break; case kExprI32Popcnt: EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt, &ExternalReference::wasm_word32_popcnt); break; - case WasmOpcode::kExprI64Eqz: + case kExprI64Eqz: EmitUnOp<kWasmI64, kWasmI32>( [=](LiftoffRegister dst, LiftoffRegister src) { __ emit_i64_eqz(dst.gp(), src); }); break; - case WasmOpcode::kExprI64Clz: - case WasmOpcode::kExprI64Ctz: - case WasmOpcode::kExprI64Popcnt: + case kExprI64Clz: + case kExprI64Ctz: + case kExprI64Popcnt: return unsupported(decoder, kComplexOperation, WasmOpcodes::OpcodeName(opcode)); - case WasmOpcode::kExprI32SConvertSatF32: - case WasmOpcode::kExprI32UConvertSatF32: - case WasmOpcode::kExprI32SConvertSatF64: - case WasmOpcode::kExprI32UConvertSatF64: - case WasmOpcode::kExprI64SConvertSatF32: - case WasmOpcode::kExprI64UConvertSatF32: - case WasmOpcode::kExprI64SConvertSatF64: - case WasmOpcode::kExprI64UConvertSatF64: + case kExprI32SConvertSatF32: + case kExprI32UConvertSatF32: + case kExprI32SConvertSatF64: + case kExprI32UConvertSatF64: + case kExprI64SConvertSatF32: + case kExprI64UConvertSatF32: + case kExprI64SConvertSatF64: + case kExprI64UConvertSatF64: return unsupported(decoder, kNonTrappingFloatToInt, WasmOpcodes::OpcodeName(opcode)); default: @@ -1224,7 +1242,7 @@ class LiftoffCompiler { ReturnImpl(decoder); } - void GetLocal(FullDecoder* decoder, Value* result, + void LocalGet(FullDecoder* decoder, Value* result, const LocalIndexImmediate<validate>& imm) { auto& slot = __ cache_state()->stack_state[imm.index]; DCHECK_EQ(slot.type(), imm.type); @@ -1245,7 +1263,7 @@ class LiftoffCompiler { } } - void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot, + void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot, uint32_t local_index) { auto& state = *__ cache_state(); ValueType type = dst_slot->type(); @@ -1266,7 +1284,7 @@ class LiftoffCompiler { __ cache_state()->inc_used(dst_reg); } - void SetLocal(uint32_t local_index, bool is_tee) { + void LocalSet(uint32_t local_index, bool is_tee) { auto& state = *__ cache_state(); auto& source_slot = state.stack_state.back(); auto& target_slot = state.stack_state[local_index]; @@ -1281,20 +1299,20 @@ class LiftoffCompiler { target_slot = source_slot; break; case kStack: - SetLocalFromStackSlot(&target_slot, local_index); + LocalSetFromStackSlot(&target_slot, local_index); break; } if (!is_tee) __ cache_state()->stack_state.pop_back(); } - void SetLocal(FullDecoder* decoder, const Value& value, + void LocalSet(FullDecoder* decoder, const Value& value, const LocalIndexImmediate<validate>& imm) { - SetLocal(imm.index, false); + LocalSet(imm.index, false); } - void TeeLocal(FullDecoder* decoder, const Value& value, Value* result, + void LocalTee(FullDecoder* decoder, const Value& value, Value* result, const LocalIndexImmediate<validate>& imm) { - SetLocal(imm.index, true); + LocalSet(imm.index, true); } Register GetGlobalBaseAndOffset(const WasmGlobal* global, @@ -1312,7 +1330,7 @@ class LiftoffCompiler { return addr; } - void GetGlobal(FullDecoder* decoder, Value* result, + void GlobalGet(FullDecoder* decoder, Value* result, const GlobalIndexImmediate<validate>& imm) { const auto* global = &env_->module->globals[imm.index]; if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global")) @@ -1327,7 +1345,7 @@ class LiftoffCompiler { __ PushRegister(global->type, value); } - void SetGlobal(FullDecoder* decoder, const Value& value, + void GlobalSet(FullDecoder* decoder, const Value& value, const GlobalIndexImmediate<validate>& imm) { auto* global = &env_->module->globals[imm.index]; if (!CheckSupportedType(decoder, kSupportedTypes, global->type, "global")) @@ -1402,10 +1420,18 @@ class LiftoffCompiler { } } - void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) { + void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) { + Condition cond = kEqual; // Unary "equal" means "equals zero". + + if (has_outstanding_op()) { + DCHECK_EQ(kExprI32Eqz, outstanding_op_); + cond = kUnequal; // Unary "unequal" means "not equals zero". + outstanding_op_ = kNoOutstandingOp; + } + Label cont_false; Register value = __ PopToRegister().gp(); - __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value); + __ emit_cond_jump(cond, &cont_false, kWasmI32, value); BrOrRet(decoder, depth); __ bind(&cont_false); @@ -2056,7 +2082,14 @@ class LiftoffCompiler { } private: + static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable; + LiftoffAssembler asm_; + + // Used for merging code generation of subsequent operations (via look-ahead). + // Set by the first opcode, reset by the second. + WasmOpcode outstanding_op_ = kNoOutstandingOp; + compiler::CallDescriptor* const descriptor_; CompilationEnv* const env_; LiftoffBailoutReason bailout_reason_ = kSuccess; @@ -2072,6 +2105,10 @@ class LiftoffCompiler { // patch the actually needed stack size in the end. uint32_t pc_offset_stack_frame_construction_ = 0; + bool has_outstanding_op() const { + return outstanding_op_ != kNoOutstandingOp; + } + void TraceCacheState(FullDecoder* decoder) const { #ifdef DEBUG if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return; diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h index 267a0055470..b322f7eb68d 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-register.h +++ b/chromium/v8/src/wasm/baseline/liftoff-register.h @@ -32,15 +32,18 @@ static inline constexpr bool needs_reg_pair(ValueType type) { return kNeedI64RegPair && type == kWasmI64; } -// TODO(clemensh): Use a switch once we require C++14 support. static inline constexpr RegClass reg_class_for(ValueType type) { - return needs_reg_pair(type) // i64 on 32 bit - ? kGpRegPair - : type == kWasmI32 || type == kWasmI64 // int types - ? kGpReg - : type == kWasmF32 || type == kWasmF64 // float types - ? kFpReg - : kNoReg; // other (unsupported) types + switch (type) { + case kWasmF32: + case kWasmF64: + return kFpReg; + case kWasmI32: + return kGpReg; + case kWasmI64: + return kNeedI64RegPair ? kGpRegPair : kGpReg; + default: + return kNoReg; // unsupported type + } } // Maximum code of a gp cache register. diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h index e82ffe8f67d..4c69e423c1a 100644 --- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -13,6 +13,28 @@ namespace wasm { namespace liftoff { +// half +// slot Frame +// -----+--------------------+--------------------------- +// n+3 | parameter n | +// ... | ... | +// 4 | parameter 1 | or parameter 2 +// 3 | parameter 0 | or parameter 1 +// 2 | (result address) | or parameter 0 +// -----+--------------------+--------------------------- +// 1 | return addr (ra) | +// 0 | previous frame (fp)| +// -----+--------------------+ <-- frame ptr (fp) +// -1 | 0xa: WASM_COMPILED | +// -2 | instance | +// -----+--------------------+--------------------------- +// -3 | slot 0 (high) | ^ +// -4 | slot 0 (low) | | +// -5 | slot 1 (high) | Frame slots +// -6 | slot 1 (low) | | +// | | v +// -----+--------------------+ <-- stack ptr (sp) +// #if defined(V8_TARGET_BIG_ENDIAN) constexpr int32_t kLowWordOffset = 4; constexpr int32_t kHighWordOffset = 0; @@ -27,9 +49,12 @@ constexpr int32_t kConstantStackSpace = 8; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + inline MemOperand GetStackSlot(uint32_t index) { - int32_t offset = index * LiftoffAssembler::kStackSlotSize; - return MemOperand(fp, -kFirstStackSlotOffset - offset); + return MemOperand(fp, -GetStackSlotOffset(index)); } inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { @@ -583,6 +608,34 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, lw(reg, liftoff::GetHalfStackSlot(index, half)); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + if (count <= 12) { + // Special straight-line code for up to 12 slots. Generates one + // instruction per slot (<=12 instructions total). + for (uint32_t offset = 0; offset < count; ++offset) { + Sw(zero_reg, liftoff::GetStackSlot(index + offset)); + } + } else { + // General case for bigger counts (12 instructions). + // Use a0 for start address (inclusive), a1 for end address (exclusive). + Push(a1, a0); + Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot))); + Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize)); + + Label loop; + bind(&loop); + Sw(zero_reg, MemOperand(a0, kSystemPointerSize)); + addiu(a0, a0, kSystemPointerSize); + BranchShort(&loop, ne, a0, Operand(a1)); + + Pop(a1, a0); + } +} + void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { TurboAssembler::Mul(dst, lhs, rhs); } diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 9c87dca7330..5314a65da57 100644 --- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -13,15 +13,44 @@ namespace wasm { namespace liftoff { +// Liftoff Frames. +// +// slot Frame +// +--------------------+--------------------------- +// n+4 | optional padding slot to keep the stack 16 byte aligned. +// n+3 | parameter n | +// ... | ... | +// 4 | parameter 1 | or parameter 2 +// 3 | parameter 0 | or parameter 1 +// 2 | (result address) | or parameter 0 +// -----+--------------------+--------------------------- +// 1 | return addr (ra) | +// 0 | previous frame (fp)| +// -----+--------------------+ <-- frame ptr (fp) +// -1 | 0xa: WASM_COMPILED | +// -2 | instance | +// -----+--------------------+--------------------------- +// -3 | slot 0 | ^ +// -4 | slot 1 | | +// | | Frame slots +// | | | +// | | v +// | optional padding slot to keep the stack 16 byte aligned. +// -----+--------------------+ <-- stack ptr (sp) +// + // fp-8 holds the stack marker, fp-16 is the instance parameter, first stack // slot is located at fp-24. constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kFirstStackSlotOffset = kConstantStackSpace + LiftoffAssembler::kStackSlotSize; +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + inline MemOperand GetStackSlot(uint32_t index) { - int32_t offset = index * LiftoffAssembler::kStackSlotSize; - return MemOperand(fp, -kFirstStackSlotOffset - offset); + return MemOperand(fp, -GetStackSlotOffset(index)); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } @@ -498,6 +527,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { UNREACHABLE(); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + if (count <= 12) { + // Special straight-line code for up to 12 slots. Generates one + // instruction per slot (<= 12 instructions total). + for (uint32_t offset = 0; offset < count; ++offset) { + Sd(zero_reg, liftoff::GetStackSlot(index + offset)); + } + } else { + // General case for bigger counts (12 instructions). + // Use a0 for start address (inclusive), a1 for end address (exclusive). + Push(a1, a0); + Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot))); + Daddu(a1, fp, + Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize)); + + Label loop; + bind(&loop); + Sd(zero_reg, MemOperand(a0, kSystemPointerSize)); + daddiu(a0, a0, kSystemPointerSize); + BranchShort(&loop, ne, a0, Operand(a1)); + + Pop(a1, a0); + } +} + void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { TurboAssembler::Mul(dst, lhs, rhs); } diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index a690a1c090d..3b436a96d51 100644 --- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -12,6 +12,49 @@ namespace v8 { namespace internal { namespace wasm { +namespace liftoff { + +// half +// slot Frame +// -----+--------------------+--------------------------- +// n+3 | parameter n | +// ... | ... | +// 4 | parameter 1 | or parameter 2 +// 3 | parameter 0 | or parameter 1 +// 2 | (result address) | or parameter 0 +// -----+--------------------+--------------------------- +// 1 | return addr (lr) | +// 0 | previous frame (fp)| +// -----+--------------------+ <-- frame ptr (fp) +// -1 | 0xa: WASM_COMPILED | +// -2 | instance | +// -----+--------------------+--------------------------- +// -3 | slot 0 (high) | ^ +// -4 | slot 0 (low) | | +// -5 | slot 1 (high) | Frame slots +// -6 | slot 1 (low) | | +// | | v +// -----+--------------------+ <-- stack ptr (sp) +// + +constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; +constexpr int32_t kFirstStackSlotOffset = + kInstanceOffset + 2 * kSystemPointerSize; + +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + +inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { + int32_t half_offset = + half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; + int32_t offset = kFirstStackSlotOffset + + index * LiftoffAssembler::kStackSlotSize - half_offset; + return MemOperand(fp, -offset); +} + +} // namespace liftoff + int LiftoffAssembler::PrepareStackFrame() { bailout(kUnsupportedArchitecture, "PrepareStackFrame"); return 0; @@ -108,6 +151,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { bailout(kUnsupportedArchitecture, "FillI64Half"); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + // We need a zero reg. Always use r0 for that, and push it before to restore + // its value afterwards. + push(r0); + mov(r0, Operand(0)); + + if (count <= 5) { + // Special straight-line code for up to five slots. Generates two + // instructions per slot. + for (uint32_t offset = 0; offset < count; ++offset) { + StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); + StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); + } + } else { + // General case for bigger counts (9 instructions). + // Use r4 for start address (inclusive), r5 for end address (exclusive). + push(r4); + push(r5); + subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); + subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize)); + + Label loop; + bind(&loop); + StoreP(r0, MemOperand(r0)); + addi(r0, r0, Operand(kSystemPointerSize)); + cmp(r4, r5); + bne(&loop); + + pop(r4); + pop(r5); + } + + pop(r0); +} + #define UNIMPLEMENTED_I32_BINOP(name) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h index d17c7dada1c..36267560dda 100644 --- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -12,6 +12,48 @@ namespace v8 { namespace internal { namespace wasm { +namespace liftoff { + +// half +// slot Frame +// -----+--------------------+--------------------------- +// n+3 | parameter n | +// ... | ... | +// 4 | parameter 1 | or parameter 2 +// 3 | parameter 0 | or parameter 1 +// 2 | (result address) | or parameter 0 +// -----+--------------------+--------------------------- +// 1 | return addr (lr) | +// 0 | previous frame (fp)| +// -----+--------------------+ <-- frame ptr (fp) +// -1 | 0xa: WASM_COMPILED | +// -2 | instance | +// -----+--------------------+--------------------------- +// -3 | slot 0 (high) | ^ +// -4 | slot 0 (low) | | +// -5 | slot 1 (high) | Frame slots +// -6 | slot 1 (low) | | +// | | v +// -----+--------------------+ <-- stack ptr (sp) +// +constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; +constexpr int32_t kFirstStackSlotOffset = + kInstanceOffset + 2 * kSystemPointerSize; + +inline int GetStackSlotOffset(uint32_t index) { + return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; +} + +inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { + int32_t half_offset = + half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; + int32_t offset = kFirstStackSlotOffset + + index * LiftoffAssembler::kStackSlotSize - half_offset; + return MemOperand(fp, -offset); +} + +} // namespace liftoff + int LiftoffAssembler::PrepareStackFrame() { bailout(kUnsupportedArchitecture, "PrepareStackFrame"); return 0; @@ -108,6 +150,45 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { bailout(kUnsupportedArchitecture, "FillI64Half"); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + // We need a zero reg. Always use r0 for that, and push it before to restore + // its value afterwards. + push(r0); + mov(r0, Operand(0)); + + if (count <= 5) { + // Special straight-line code for up to five slots. Generates two + // instructions per slot. + for (uint32_t offset = 0; offset < count; ++offset) { + StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); + StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); + } + } else { + // General case for bigger counts (9 instructions). + // Use r3 for start address (inclusive), r4 for end address (exclusive). + push(r3); + push(r4); + SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); + SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) + kStackSlotSize)); + + Label loop; + bind(&loop); + StoreP(r0, MemOperand(r0)); + la(r0, MemOperand(r0, kSystemPointerSize)); + CmpLogicalP(r3, r4); + bne(&loop); + + pop(r4); + pop(r3); + } + + pop(r0); +} + #define UNIMPLEMENTED_I32_BINOP(name) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index 43637985d05..f4185de0700 100644 --- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -47,7 +47,7 @@ inline Operand GetStackSlot(uint32_t index) { return Operand(rbp, -kFirstStackSlotOffset - offset); } -// TODO(clemensh): Make this a constexpr variable once Operand is constexpr. +// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. inline Operand GetInstanceOperand() { return Operand(rbp, -16); } inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, @@ -452,6 +452,35 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { UNREACHABLE(); } +void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { + DCHECK_LT(0, count); + uint32_t last_stack_slot = index + count - 1; + RecordUsedSpillSlot(last_stack_slot); + + if (count <= 3) { + // Special straight-line code for up to three slots + // (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>). + for (uint32_t offset = 0; offset < count; ++offset) { + movq(liftoff::GetStackSlot(index + offset), Immediate(0)); + } + } else { + // General case for bigger counts. + // This sequence takes 20-23 bytes (3 for pushes, 4-7 for lea, 2 for xor, 5 + // for mov, 3 for repstosq, 3 for pops). + // From intel manual: repstosq fills RCX quadwords at [RDI] with RAX. + pushq(rax); + pushq(rcx); + pushq(rdi); + leaq(rdi, liftoff::GetStackSlot(last_stack_slot)); + xorl(rax, rax); + movl(rcx, Immediate(count)); + repstosq(); + popq(rdi); + popq(rcx); + popq(rax); + } +} + void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) { if (lhs != dst) { leal(dst, Operand(lhs, rhs, times_1, 0)); diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc index e812dd7994f..31b68e9cdc5 100644 --- a/chromium/v8/src/wasm/c-api.cc +++ b/chromium/v8/src/wasm/c-api.cc @@ -1692,17 +1692,17 @@ auto Global::type() const -> own<GlobalType> { auto Global::get() const -> Val { i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object(); - switch (type()->content()->kind()) { - case I32: + switch (v8_global->type()) { + case i::wasm::kWasmI32: return Val(v8_global->GetI32()); - case I64: + case i::wasm::kWasmI64: return Val(v8_global->GetI64()); - case F32: + case i::wasm::kWasmF32: return Val(v8_global->GetF32()); - case F64: + case i::wasm::kWasmF64: return Val(v8_global->GetF64()); - case ANYREF: - case FUNCREF: { + case i::wasm::kWasmAnyRef: + case i::wasm::kWasmFuncRef: { StoreImpl* store = impl(this)->store(); i::HandleScope scope(store->i_isolate()); return Val(V8RefValueToWasm(store, v8_global->GetRef())); @@ -1883,9 +1883,10 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> { if (maximum < minimum) return nullptr; if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr; } - bool is_shared = false; // TODO(wasm+): Support shared memory. + // TODO(wasm+): Support shared memory. + i::SharedFlag shared = i::SharedFlag::kNotShared; i::Handle<i::WasmMemoryObject> memory_obj; - if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared) + if (!i::WasmMemoryObject::New(isolate, minimum, maximum, shared) .ToHandle(&memory_obj)) { return own<Memory>(); } diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h index abb7b8ee868..71c06467f11 100644 --- a/chromium/v8/src/wasm/decoder.h +++ b/chromium/v8/src/wasm/decoder.h @@ -267,6 +267,12 @@ class Decoder { } const byte* end() const { return end_; } + // Check if the byte at {offset} from the current pc equals {expected}. + bool lookahead(int offset, byte expected) { + DCHECK_LE(pc_, end_); + return end_ - pc_ > offset && pc_[offset] == expected; + } + protected: const byte* start_; const byte* pc_; diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h index 582934e19f0..1f29571e406 100644 --- a/chromium/v8/src/wasm/function-body-decoder-impl.h +++ b/chromium/v8/src/wasm/function-body-decoder-impl.h @@ -714,12 +714,12 @@ struct ControlBase { F(RefFunc, uint32_t function_index, Value* result) \ F(Drop, const Value& value) \ F(DoReturn, Vector<Value> values) \ - F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \ - F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \ - F(TeeLocal, const Value& value, Value* result, \ + F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \ + F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \ + F(LocalTee, const Value& value, Value* result, \ const LocalIndexImmediate<validate>& imm) \ - F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \ - F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \ + F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \ + F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \ F(TableGet, const Value& index, Value* result, \ const TableIndexImmediate<validate>& imm) \ F(TableSet, const Value& index, const Value& value, \ @@ -910,8 +910,8 @@ class WasmDecoder : public Decoder { length = OpcodeLength(decoder, pc); depth++; break; - case kExprSetLocal: // fallthru - case kExprTeeLocal: { + case kExprLocalSet: // fallthru + case kExprLocalTee: { LocalIndexImmediate<validate> imm(decoder, pc); if (assigned->length() > 0 && imm.index < static_cast<uint32_t>(assigned->length())) { @@ -1045,8 +1045,8 @@ class WasmDecoder : public Decoder { bool Validate(const byte* pc, BranchTableImmediate<validate>& imm, size_t block_depth) { - if (!VALIDATE(imm.table_count < kV8MaxWasmFunctionSize)) { - errorf(pc + 1, "invalid table count (> max function size): %u", + if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) { + errorf(pc + 1, "invalid table count (> max br_table size): %u", imm.table_count); return false; } @@ -1069,11 +1069,13 @@ class WasmDecoder : public Decoder { case kExprI32x4ReplaceLane: num_lanes = 4; break; - case kExprI16x8ExtractLane: + case kExprI16x8ExtractLaneS: + case kExprI16x8ExtractLaneU: case kExprI16x8ReplaceLane: num_lanes = 8; break; - case kExprI8x16ExtractLane: + case kExprI8x16ExtractLaneS: + case kExprI8x16ExtractLaneU: case kExprI8x16ReplaceLane: num_lanes = 16; break; @@ -1252,8 +1254,8 @@ class WasmDecoder : public Decoder { BranchDepthImmediate<validate> imm(decoder, pc); return 1 + imm.length; } - case kExprGetGlobal: - case kExprSetGlobal: { + case kExprGlobalGet: + case kExprGlobalSet: { GlobalIndexImmediate<validate> imm(decoder, pc); return 1 + imm.length; } @@ -1291,9 +1293,9 @@ class WasmDecoder : public Decoder { return 1 + imm.length; } - case kExprSetLocal: - case kExprTeeLocal: - case kExprGetLocal: { + case kExprLocalGet: + case kExprLocalSet: + case kExprLocalTee: { LocalIndexImmediate<validate> imm(decoder, pc); return 1 + imm.length; } @@ -1458,19 +1460,19 @@ class WasmDecoder : public Decoder { return {2, 0}; FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE) case kExprTableGet: - case kExprTeeLocal: + case kExprLocalTee: case kExprMemoryGrow: return {1, 1}; - case kExprSetLocal: - case kExprSetGlobal: + case kExprLocalSet: + case kExprGlobalSet: case kExprDrop: case kExprBrIf: case kExprBrTable: case kExprIf: case kExprRethrow: return {1, 0}; - case kExprGetLocal: - case kExprGetGlobal: + case kExprLocalGet: + case kExprGlobalGet: case kExprI32Const: case kExprI64Const: case kExprF32Const: @@ -2125,28 +2127,28 @@ class WasmFullDecoder : public WasmDecoder<validate> { len = 1 + imm.length; break; } - case kExprGetLocal: { + case kExprLocalGet: { LocalIndexImmediate<validate> imm(this, this->pc_); if (!this->Validate(this->pc_, imm)) break; auto* value = Push(imm.type); - CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm); + CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm); len = 1 + imm.length; break; } - case kExprSetLocal: { + case kExprLocalSet: { LocalIndexImmediate<validate> imm(this, this->pc_); if (!this->Validate(this->pc_, imm)) break; auto value = Pop(0, local_type_vec_[imm.index]); - CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm); + CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm); len = 1 + imm.length; break; } - case kExprTeeLocal: { + case kExprLocalTee: { LocalIndexImmediate<validate> imm(this, this->pc_); if (!this->Validate(this->pc_, imm)) break; auto value = Pop(0, local_type_vec_[imm.index]); auto* result = Push(value.type); - CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm); + CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm); len = 1 + imm.length; break; } @@ -2155,15 +2157,15 @@ class WasmFullDecoder : public WasmDecoder<validate> { CALL_INTERFACE_IF_REACHABLE(Drop, value); break; } - case kExprGetGlobal: { + case kExprGlobalGet: { GlobalIndexImmediate<validate> imm(this, this->pc_); len = 1 + imm.length; if (!this->Validate(this->pc_, imm)) break; auto* result = Push(imm.type); - CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm); + CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm); break; } - case kExprSetGlobal: { + case kExprGlobalSet: { GlobalIndexImmediate<validate> imm(this, this->pc_); len = 1 + imm.length; if (!this->Validate(this->pc_, imm)) break; @@ -2173,7 +2175,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { break; } auto value = Pop(0, imm.type); - CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm); + CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm); break; } case kExprTableGet: { @@ -2447,15 +2449,15 @@ class WasmFullDecoder : public WasmDecoder<validate> { TRACE_PART("[%d]", imm.value); break; } - case kExprGetLocal: - case kExprSetLocal: - case kExprTeeLocal: { + case kExprLocalGet: + case kExprLocalSet: + case kExprLocalTee: { LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc); TRACE_PART("[%u]", imm.index); break; } - case kExprGetGlobal: - case kExprSetGlobal: { + case kExprGlobalGet: + case kExprGlobalSet: { GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc); TRACE_PART("[%u]", imm.index); break; @@ -2700,8 +2702,10 @@ class WasmFullDecoder : public WasmDecoder<validate> { break; } case kExprI32x4ExtractLane: - case kExprI16x8ExtractLane: - case kExprI8x16ExtractLane: { + case kExprI16x8ExtractLaneS: + case kExprI16x8ExtractLaneU: + case kExprI8x16ExtractLaneS: + case kExprI8x16ExtractLaneU: { len = SimdExtractLane(opcode, kWasmI32); break; } diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc index 4940134d53c..e89c31d7295 100644 --- a/chromium/v8/src/wasm/function-compiler.cc +++ b/chromium/v8/src/wasm/function-compiler.cc @@ -49,7 +49,7 @@ class WasmInstructionBufferImpl { holder_->old_buffer_ = std::move(holder_->buffer_); holder_->buffer_ = OwnedVector<uint8_t>::New(new_size); - return base::make_unique<View>(holder_->buffer_.as_vector(), holder_); + return std::make_unique<View>(holder_->buffer_.as_vector(), holder_); } private: @@ -59,7 +59,7 @@ class WasmInstructionBufferImpl { std::unique_ptr<AssemblerBuffer> CreateView() { DCHECK_NOT_NULL(buffer_); - return base::make_unique<View>(buffer_.as_vector(), this); + return std::make_unique<View>(buffer_.as_vector(), this); } std::unique_ptr<uint8_t[]> ReleaseBuffer() { @@ -170,7 +170,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation( TimedHistogramScope wasm_compile_function_time_scope(timed_histogram); if (FLAG_trace_wasm_compiler) { - PrintF("Compiling wasm function %d with %s\n\n", func_index_, + PrintF("Compiling wasm function %d with %s\n", func_index_, ExecutionTierToString(tier_)); } diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h index 2da028a047e..bdebfebe14d 100644 --- a/chromium/v8/src/wasm/function-compiler.h +++ b/chromium/v8/src/wasm/function-compiler.h @@ -5,6 +5,8 @@ #ifndef V8_WASM_FUNCTION_COMPILER_H_ #define V8_WASM_FUNCTION_COMPILER_H_ +#include <memory> + #include "src/codegen/code-desc.h" #include "src/trap-handler/trap-handler.h" #include "src/wasm/compilation-environment.h" diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc index 923e1154ea0..b08aa9215ec 100644 --- a/chromium/v8/src/wasm/graph-builder-interface.cc +++ b/chromium/v8/src/wasm/graph-builder-interface.cc @@ -166,7 +166,8 @@ class WasmGraphBuildingInterface { // Wrap input merge into phis. for (uint32_t i = 0; i < block->start_merge.arity; ++i) { Value& val = block->start_merge[i]; - val.node = builder_->Phi(val.type, 1, &val.node, block->end_env->control); + TFNode* inputs[] = {val.node, block->end_env->control}; + val.node = builder_->Phi(val.type, 1, inputs); } } @@ -212,7 +213,10 @@ class WasmGraphBuildingInterface { if (block->is_onearmed_if()) { // Merge the else branch into the end merge. SetEnv(block->false_env); - MergeValuesInto(decoder, block, &block->end_merge); + DCHECK_EQ(block->start_merge.arity, block->end_merge.arity); + Value* values = + block->start_merge.arity > 0 ? &block->start_merge[0] : nullptr; + MergeValuesInto(decoder, block, &block->end_merge, values); } // Now continue with the merged environment. SetEnv(block->end_env); @@ -258,37 +262,38 @@ class WasmGraphBuildingInterface { void Drop(FullDecoder* decoder, const Value& value) {} void DoReturn(FullDecoder* decoder, Vector<Value> values) { - Vector<TFNode*> nodes = GetNodes(values); - BUILD(Return, nodes); + base::SmallVector<TFNode*, 8> nodes(values.size()); + GetNodes(nodes.begin(), values); + BUILD(Return, VectorOf(nodes)); } - void GetLocal(FullDecoder* decoder, Value* result, + void LocalGet(FullDecoder* decoder, Value* result, const LocalIndexImmediate<validate>& imm) { if (!ssa_env_->locals) return; // unreachable result->node = ssa_env_->locals[imm.index]; } - void SetLocal(FullDecoder* decoder, const Value& value, + void LocalSet(FullDecoder* decoder, const Value& value, const LocalIndexImmediate<validate>& imm) { if (!ssa_env_->locals) return; // unreachable ssa_env_->locals[imm.index] = value.node; } - void TeeLocal(FullDecoder* decoder, const Value& value, Value* result, + void LocalTee(FullDecoder* decoder, const Value& value, Value* result, const LocalIndexImmediate<validate>& imm) { result->node = value.node; if (!ssa_env_->locals) return; // unreachable ssa_env_->locals[imm.index] = value.node; } - void GetGlobal(FullDecoder* decoder, Value* result, + void GlobalGet(FullDecoder* decoder, Value* result, const GlobalIndexImmediate<validate>& imm) { - result->node = BUILD(GetGlobal, imm.index); + result->node = BUILD(GlobalGet, imm.index); } - void SetGlobal(FullDecoder* decoder, const Value& value, + void GlobalSet(FullDecoder* decoder, const Value& value, const GlobalIndexImmediate<validate>& imm) { - BUILD(SetGlobal, imm.index, value.node); + BUILD(GlobalSet, imm.index, value.node); } void TableGet(FullDecoder* decoder, const Value& index, Value* result, @@ -310,8 +315,8 @@ class WasmGraphBuildingInterface { TFNode* controls[2]; BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]); TFNode* merge = BUILD(Merge, 2, controls); - TFNode* vals[2] = {tval.node, fval.node}; - TFNode* phi = BUILD(Phi, tval.type, 2, vals, merge); + TFNode* inputs[] = {tval.node, fval.node, merge}; + TFNode* phi = BUILD(Phi, tval.type, 2, inputs); result->node = phi; ssa_env_->control = merge; } @@ -319,10 +324,11 @@ class WasmGraphBuildingInterface { void BrOrRet(FullDecoder* decoder, uint32_t depth) { if (depth == decoder->control_depth() - 1) { uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count()); - Vector<TFNode*> values = - ret_count == 0 ? Vector<TFNode*>{} - : GetNodes(decoder->stack_value(ret_count), ret_count); - BUILD(Return, values); + base::SmallVector<TFNode*, 8> values(ret_count); + if (ret_count > 0) { + GetNodes(values.begin(), decoder->stack_value(ret_count), ret_count); + } + BUILD(Return, VectorOf(values)); } else { Br(decoder, decoder->control_at(depth)); } @@ -431,7 +437,8 @@ class WasmGraphBuildingInterface { void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, Value* result) { - Vector<TFNode*> inputs = GetNodes(args); + base::SmallVector<TFNode*, 8> inputs(args.size()); + GetNodes(inputs.begin(), args); TFNode* node = BUILD(SimdOp, opcode, inputs.begin()); if (result) result->node = node; } @@ -439,7 +446,8 @@ class WasmGraphBuildingInterface { void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode, const SimdLaneImmediate<validate> imm, Vector<Value> inputs, Value* result) { - Vector<TFNode*> nodes = GetNodes(inputs); + base::SmallVector<TFNode*, 8> nodes(inputs.size()); + GetNodes(nodes.begin(), inputs); result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes.begin()); } @@ -486,12 +494,11 @@ class WasmGraphBuildingInterface { // If the tags match we extract the values from the exception object and // push them onto the operand stack using the passed {values} vector. SetEnv(if_match_env); - // TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns - // TFNode** rather than TFNode*. Fix to add landing pads. - Vector<TFNode*> caught_values = - builder_->GetExceptionValues(exception.node, imm.exception); + base::SmallVector<TFNode*, 8> caught_values(values.size()); + Vector<TFNode*> caught_vector = VectorOf(caught_values); + BUILD(GetExceptionValues, exception.node, imm.exception, caught_vector); for (size_t i = 0, e = values.size(); i < e; ++i) { - values[i].node = caught_values[i]; + values[i].node = caught_vector[i]; } BrOrRet(decoder, depth); @@ -519,7 +526,8 @@ class WasmGraphBuildingInterface { void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, const MemoryAccessImmediate<validate>& imm, Value* result) { - Vector<TFNode*> inputs = GetNodes(args); + base::SmallVector<TFNode*, 8> inputs(args.size()); + GetNodes(inputs.begin(), args); TFNode* node = BUILD(AtomicOp, opcode, inputs.begin(), imm.alignment, imm.offset, decoder->position()); if (result) result->node = node; @@ -591,16 +599,14 @@ class WasmGraphBuildingInterface { ->try_info; } - Vector<TFNode*> GetNodes(Value* values, size_t count) { - Vector<TFNode*> nodes = builder_->Buffer(count); + void GetNodes(TFNode** nodes, Value* values, size_t count) { for (size_t i = 0; i < count; ++i) { nodes[i] = values[i].node; } - return nodes; } - Vector<TFNode*> GetNodes(Vector<Value> values) { - return GetNodes(values.begin(), values.size()); + void GetNodes(TFNode** nodes, Vector<Value> values) { + GetNodes(nodes, values.begin(), values.size()); } void SetEnv(SsaEnv* env) { @@ -656,10 +662,10 @@ class WasmGraphBuildingInterface { SsaEnv* exception_env = Split(decoder, success_env); exception_env->control = if_exception; + exception_env->effect = if_exception; TryInfo* try_info = current_try_info(decoder); Goto(decoder, exception_env, try_info->catch_env); - TFNode* exception = try_info->exception; - if (exception == nullptr) { + if (try_info->exception == nullptr) { DCHECK_EQ(SsaEnv::kReached, try_info->catch_env->state); try_info->exception = if_exception; } else { @@ -694,7 +700,8 @@ class WasmGraphBuildingInterface { } } - void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) { + void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge, + Value* values) { DCHECK(merge == &c->start_merge || merge == &c->end_merge); SsaEnv* target = c->end_env; @@ -703,13 +710,8 @@ class WasmGraphBuildingInterface { if (merge->arity == 0) return; - uint32_t avail = - decoder->stack_size() - decoder->control_at(0)->stack_depth; - DCHECK_GE(avail, merge->arity); - uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail; - Value* stack_values = decoder->stack_value(merge->arity); - for (uint32_t i = start; i < merge->arity; ++i) { - Value& val = stack_values[i]; + for (uint32_t i = 0; i < merge->arity; ++i) { + Value& val = values[i]; Value& old = (*merge)[i]; DCHECK_NOT_NULL(val.node); DCHECK(val.type == kWasmBottom || @@ -722,6 +724,17 @@ class WasmGraphBuildingInterface { } } + void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) { +#ifdef DEBUG + uint32_t avail = + decoder->stack_size() - decoder->control_at(0)->stack_depth; + DCHECK_GE(avail, merge->arity); +#endif + Value* stack_values = + merge->arity > 0 ? decoder->stack_value(merge->arity) : nullptr; + MergeValuesInto(decoder, c, merge, stack_values); + } + void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) { DCHECK_NOT_NULL(to); switch (to->state) { @@ -741,17 +754,16 @@ class WasmGraphBuildingInterface { to->control = merge; // Merge effects. if (from->effect != to->effect) { - TFNode* effects[] = {to->effect, from->effect, merge}; - to->effect = builder_->EffectPhi(2, effects, merge); + TFNode* inputs[] = {to->effect, from->effect, merge}; + to->effect = builder_->EffectPhi(2, inputs); } // Merge SSA values. for (int i = decoder->num_locals() - 1; i >= 0; i--) { TFNode* a = to->locals[i]; TFNode* b = from->locals[i]; if (a != b) { - TFNode* vals[] = {a, b}; - to->locals[i] = - builder_->Phi(decoder->GetLocalType(i), 2, vals, merge); + TFNode* inputs[] = {a, b, merge}; + to->locals[i] = builder_->Phi(decoder->GetLocalType(i), 2, inputs); } } // Start a new merge from the instance cache. @@ -787,7 +799,8 @@ class WasmGraphBuildingInterface { env->state = SsaEnv::kMerged; env->control = builder_->Loop(env->control); - env->effect = builder_->EffectPhi(1, &env->effect, env->control); + TFNode* effect_inputs[] = {env->effect, env->control}; + env->effect = builder_->EffectPhi(1, effect_inputs); builder_->TerminateLoop(env->effect, env->control); // The '+ 1' here is to be able to set the instance cache as assigned. BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment( @@ -798,8 +811,8 @@ class WasmGraphBuildingInterface { int instance_cache_index = decoder->total_locals(); for (int i = decoder->num_locals() - 1; i >= 0; i--) { if (!assigned->Contains(i)) continue; - env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, - &env->locals[i], env->control); + TFNode* inputs[] = {env->locals[i], env->control}; + env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs); } // Introduce phis for instance cache pointers if necessary. if (assigned->Contains(instance_cache_index)) { @@ -815,8 +828,8 @@ class WasmGraphBuildingInterface { // Conservatively introduce phis for all local variables. for (int i = decoder->num_locals() - 1; i >= 0; i--) { - env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, - &env->locals[i], env->control); + TFNode* inputs[] = {env->locals[i], env->control}; + env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1, inputs); } // Conservatively introduce phis for instance cache. @@ -877,22 +890,22 @@ class WasmGraphBuildingInterface { void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node, FunctionSig* sig, uint32_t sig_index, const Value args[], Value returns[]) { - int param_count = static_cast<int>(sig->parameter_count()); - Vector<TFNode*> arg_nodes = builder_->Buffer(param_count + 1); - TFNode** return_nodes = nullptr; + size_t param_count = sig->parameter_count(); + size_t return_count = sig->return_count(); + base::SmallVector<TFNode*, 16> arg_nodes(param_count + 1); + base::SmallVector<TFNode*, 1> return_nodes(return_count); arg_nodes[0] = index_node; - for (int i = 0; i < param_count; ++i) { + for (size_t i = 0; i < param_count; ++i) { arg_nodes[i + 1] = args[i].node; } if (index_node) { - BUILD(CallIndirect, table_index, sig_index, arg_nodes.begin(), - &return_nodes, decoder->position()); + BUILD(CallIndirect, table_index, sig_index, VectorOf(arg_nodes), + VectorOf(return_nodes), decoder->position()); } else { - BUILD(CallDirect, sig_index, arg_nodes.begin(), &return_nodes, + BUILD(CallDirect, sig_index, VectorOf(arg_nodes), VectorOf(return_nodes), decoder->position()); } - int return_count = static_cast<int>(sig->return_count()); - for (int i = 0; i < return_count; ++i) { + for (size_t i = 0; i < return_count; ++i) { returns[i].node = return_nodes[i]; } // The invoked function could have used grow_memory, so we need to @@ -903,17 +916,17 @@ class WasmGraphBuildingInterface { void DoReturnCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node, FunctionSig* sig, uint32_t sig_index, const Value args[]) { - int arg_count = static_cast<int>(sig->parameter_count()); - Vector<TFNode*> arg_nodes = builder_->Buffer(arg_count + 1); + size_t arg_count = sig->parameter_count(); + base::SmallVector<TFNode*, 16> arg_nodes(arg_count + 1); arg_nodes[0] = index_node; - for (int i = 0; i < arg_count; ++i) { + for (size_t i = 0; i < arg_count; ++i) { arg_nodes[i + 1] = args[i].node; } if (index_node) { - BUILD(ReturnCallIndirect, table_index, sig_index, arg_nodes.begin(), + BUILD(ReturnCallIndirect, table_index, sig_index, VectorOf(arg_nodes), decoder->position()); } else { - BUILD(ReturnCall, sig_index, arg_nodes.begin(), decoder->position()); + BUILD(ReturnCall, sig_index, VectorOf(arg_nodes), decoder->position()); } } }; diff --git a/chromium/v8/src/wasm/jump-table-assembler.cc b/chromium/v8/src/wasm/jump-table-assembler.cc index 7c41c0a209c..adb7e19158a 100644 --- a/chromium/v8/src/wasm/jump-table-assembler.cc +++ b/chromium/v8/src/wasm/jump-table-assembler.cc @@ -21,17 +21,37 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, EmitJumpSlot(lazy_compile_target); // 5 bytes } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); -} - -void JumpTableAssembler::EmitJumpSlot(Address target) { - // On x64, all code is allocated within a single code section, so we can use - // relative jumps. - static_assert(kMaxWasmCodeMemory <= size_t{2} * GB, "can use relative jump"); +bool JumpTableAssembler::EmitJumpSlot(Address target) { intptr_t displacement = static_cast<intptr_t>( reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize); - near_jmp(displacement, RelocInfo::NONE); + if (!is_int32(displacement)) return false; + near_jmp(displacement, RelocInfo::NONE); // 5 bytes + return true; +} + +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + Label data; + int start_offset = pc_offset(); + jmp(Operand(&data)); // 6 bytes + Nop(2); // 2 bytes + // The data must be properly aligned, so it can be patched atomically (see + // {PatchFarJumpSlot}). + DCHECK_EQ(start_offset + kSystemPointerSize, pc_offset()); + USE(start_offset); + bind(&data); + dq(target); // 8 bytes +} + +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + // The slot needs to be pointer-size aligned so we can atomically update it. + DCHECK(IsAligned(slot, kSystemPointerSize)); + // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}. + reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize) + ->store(target, std::memory_order_relaxed); + // The update is atomic because the address is properly aligned. + // Because of cache coherence, the data update will eventually be seen by all + // cores. It's ok if they temporarily jump to the old target. } void JumpTableAssembler::NopBytes(int bytes) { @@ -46,14 +66,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); +bool JumpTableAssembler::EmitJumpSlot(Address target) { + jmp(target, RelocInfo::NONE); + return true; } -void JumpTableAssembler::EmitJumpSlot(Address target) { +void JumpTableAssembler::EmitFarJumpSlot(Address target) { jmp(target, RelocInfo::NONE); } +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); +} + void JumpTableAssembler::NopBytes(int bytes) { DCHECK_LE(0, bytes); Nop(bytes); @@ -74,16 +100,26 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, EmitJumpSlot(lazy_compile_target); } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); - CheckConstPool(true, false); // force emit of const pool -} - -void JumpTableAssembler::EmitJumpSlot(Address target) { +bool JumpTableAssembler::EmitJumpSlot(Address target) { // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation // mode used below, we need this to allow concurrent patching of this slot. Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL)); CheckConstPool(true, false); // force emit of const pool + return true; +} + +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + // Load from [pc + kInstrSize] to pc. Note that {pc} points two instructions + // after the currently executing one. + ldr_pcrel(pc, -kInstrSize); // 1 instruction + dd(target); // 4 bytes (== 1 instruction) + STATIC_ASSERT(kInstrSize == kInt32Size); + STATIC_ASSERT(kFarJumpTableSlotSize == 2 * kInstrSize); +} + +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); } void JumpTableAssembler::NopBytes(int bytes) { @@ -105,19 +141,43 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, if (nop_bytes) nop(); } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); - ForceConstantPoolEmissionWithoutJump(); +bool JumpTableAssembler::EmitJumpSlot(Address target) { + if (!TurboAssembler::IsNearCallOffset( + (reinterpret_cast<byte*>(target) - pc_) / kInstrSize)) { + return false; + } + + Jump(target, RelocInfo::NONE); + return true; } -void JumpTableAssembler::EmitJumpSlot(Address target) { - // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is - // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make - // sure concurrent patching is still supported. - DCHECK(TurboAssembler::IsNearCallOffset( - (reinterpret_cast<byte*>(target) - pc_) / kInstrSize)); +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + // This code uses hard-coded registers and instructions (and avoids + // {UseScratchRegisterScope} or {InstructionAccurateScope}) because this code + // will only be called for the very specific runtime slot table, and we want + // to have maximum control over the generated code. + // Do not reuse this code without validating that the same assumptions hold. + constexpr Register kTmpReg = x16; + DCHECK(TmpList()->IncludesAliasOf(kTmpReg)); + // Load from [pc + 2 * kInstrSize] to {kTmpReg}, then branch there. + ldr_pcrel(kTmpReg, 2); // 1 instruction + br(kTmpReg); // 1 instruction + dq(target); // 8 bytes (== 2 instructions) + STATIC_ASSERT(2 * kInstrSize == kSystemPointerSize); + STATIC_ASSERT(kFarJumpTableSlotSize == 4 * kInstrSize); +} - Jump(target, RelocInfo::NONE); +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + // The slot needs to be pointer-size aligned so we can atomically update it. + DCHECK(IsAligned(slot, kSystemPointerSize)); + // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}. + reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize) + ->store(target, std::memory_order_relaxed); + // The data update is guaranteed to be atomic since it's a properly aligned + // and stores a single machine word. This update will eventually be observed + // by any concurrent [ldr] on the same address because of the data cache + // coherence. It's ok if other cores temporarily jump to the old target. } void JumpTableAssembler::NopBytes(int bytes) { @@ -138,13 +198,19 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, b(r1); // 2 bytes } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); -} - -void JumpTableAssembler::EmitJumpSlot(Address target) { +bool JumpTableAssembler::EmitJumpSlot(Address target) { mov(r1, Operand(target)); b(r1); + return true; +} + +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + JumpToInstructionStream(target); +} + +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); } void JumpTableAssembler::NopBytes(int bytes) { @@ -168,12 +234,18 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); +bool JumpTableAssembler::EmitJumpSlot(Address target) { + Jump(target, RelocInfo::NONE); + return true; } -void JumpTableAssembler::EmitJumpSlot(Address target) { - Jump(target, RelocInfo::NONE); +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + JumpToInstructionStream(target); +} + +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); } void JumpTableAssembler::NopBytes(int bytes) { @@ -199,14 +271,20 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); } -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - JumpToInstructionStream(builtin_target); -} - -void JumpTableAssembler::EmitJumpSlot(Address target) { +bool JumpTableAssembler::EmitJumpSlot(Address target) { mov(r0, Operand(target)); mtctr(r0); bctr(); + return true; +} + +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + JumpToInstructionStream(target); +} + +// static +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); } void JumpTableAssembler::NopBytes(int bytes) { @@ -218,21 +296,7 @@ void JumpTableAssembler::NopBytes(int bytes) { } #else -void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, - Address lazy_compile_target) { - UNIMPLEMENTED(); -} - -void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { - UNIMPLEMENTED(); -} - -void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); } - -void JumpTableAssembler::NopBytes(int bytes) { - DCHECK_LE(0, bytes); - UNIMPLEMENTED(); -} +#error Unknown architecture. #endif } // namespace wasm diff --git a/chromium/v8/src/wasm/jump-table-assembler.h b/chromium/v8/src/wasm/jump-table-assembler.h index 8889c18e9c5..2100e44199f 100644 --- a/chromium/v8/src/wasm/jump-table-assembler.h +++ b/chromium/v8/src/wasm/jump-table-assembler.h @@ -6,7 +6,6 @@ #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_ #include "src/codegen/macro-assembler.h" -#include "src/wasm/wasm-code-manager.h" namespace v8 { namespace internal { @@ -19,9 +18,11 @@ namespace wasm { // // Additionally to this main jump table, there exist special jump tables for // other purposes: -// - the runtime stub table contains one entry per wasm runtime stub (see +// - the far stub table contains one entry per wasm runtime stub (see // {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded -// builtin. +// builtin, plus (if {FLAG_wasm_far_jump_table} is enabled and not the full +// address space can be reached via the jump table) one entry per wasm +// function. // - the lazy compile table contains one entry per wasm function which jumps to // the common {WasmCompileLazy} builtin and passes the function index that was // invoked. @@ -73,16 +74,28 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { // Determine the size of a jump table containing the given number of slots. static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) { - // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values, - // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize} return ((slot_count + kJumpTableSlotsPerLine - 1) / kJumpTableSlotsPerLine) * kJumpTableLineSize; } - // Translate a stub slot index to an offset into the continuous jump table. - static uint32_t StubSlotIndexToOffset(uint32_t slot_index) { - return slot_index * kJumpTableStubSlotSize; + // Translate a far jump table index to an offset into the table. + static uint32_t FarJumpSlotIndexToOffset(uint32_t slot_index) { + return slot_index * kFarJumpTableSlotSize; + } + + // Translate a far jump table offset to the index into the table. + static uint32_t FarJumpSlotOffsetToIndex(uint32_t offset) { + DCHECK_EQ(0, offset % kFarJumpTableSlotSize); + return offset / kFarJumpTableSlotSize; + } + + // Determine the size of a far jump table containing the given number of + // slots. + static constexpr uint32_t SizeForNumberOfFarJumpSlots( + int num_runtime_slots, int num_function_slots) { + int num_entries = num_runtime_slots + num_function_slots; + return num_entries * kFarJumpTableSlotSize; } // Translate a slot index to an offset into the lazy compile table. @@ -90,11 +103,6 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { return slot_index * kLazyCompileTableSlotSize; } - // Determine the size of a jump table containing only runtime stub slots. - static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) { - return slot_count * kJumpTableStubSlotSize; - } - // Determine the size of a lazy compile table. static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) { return slot_count * kLazyCompileTableSlotSize; @@ -115,32 +123,41 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { FlushInstructionCache(base, lazy_compile_table_size); } - static void GenerateRuntimeStubTable(Address base, Address* targets, - int num_stubs) { - uint32_t table_size = num_stubs * kJumpTableStubSlotSize; + static void GenerateFarJumpTable(Address base, Address* stub_targets, + int num_runtime_slots, + int num_function_slots) { + uint32_t table_size = + SizeForNumberOfFarJumpSlots(num_runtime_slots, num_function_slots); // Assume enough space, so the Assembler does not try to grow the buffer. JumpTableAssembler jtasm(base, table_size + 256); int offset = 0; - for (int index = 0; index < num_stubs; ++index) { - DCHECK_EQ(offset, StubSlotIndexToOffset(index)); + for (int index = 0; index < num_runtime_slots + num_function_slots; + ++index) { + DCHECK_EQ(offset, FarJumpSlotIndexToOffset(index)); + // Functions slots initially jump to themselves. They are patched before + // being used. + Address target = + index < num_runtime_slots ? stub_targets[index] : base + offset; + jtasm.EmitFarJumpSlot(target); + offset += kFarJumpTableSlotSize; DCHECK_EQ(offset, jtasm.pc_offset()); - jtasm.EmitRuntimeStubSlot(targets[index]); - offset += kJumpTableStubSlotSize; - jtasm.NopBytes(offset - jtasm.pc_offset()); } FlushInstructionCache(base, table_size); } - static void PatchJumpTableSlot(Address base, uint32_t slot_index, - Address new_target, - WasmCode::FlushICache flush_i_cache) { - Address slot = base + JumpSlotIndexToOffset(slot_index); - JumpTableAssembler jtasm(slot); - jtasm.EmitJumpSlot(new_target); - jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset()); - if (flush_i_cache) { - FlushInstructionCache(slot, kJumpTableSlotSize); + static void PatchJumpTableSlot(Address jump_table_slot, + Address far_jump_table_slot, Address target) { + // First, try to patch the jump table slot. + JumpTableAssembler jtasm(jump_table_slot); + if (!jtasm.EmitJumpSlot(target)) { + // If that fails, we need to patch the far jump table slot, and then + // update the jump table slot to jump to this far jump table slot. + DCHECK_NE(kNullAddress, far_jump_table_slot); + JumpTableAssembler::PatchFarJumpSlot(far_jump_table_slot, target); + CHECK(jtasm.EmitJumpSlot(far_jump_table_slot)); } + jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset()); + FlushInstructionCache(jump_table_slot, kJumpTableSlotSize); } private: @@ -157,48 +174,45 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { #if V8_TARGET_ARCH_X64 static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableSlotSize = 5; + static constexpr int kFarJumpTableSlotSize = 16; static constexpr int kLazyCompileTableSlotSize = 10; - static constexpr int kJumpTableStubSlotSize = 18; #elif V8_TARGET_ARCH_IA32 static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableSlotSize = 5; + static constexpr int kFarJumpTableSlotSize = 5; static constexpr int kLazyCompileTableSlotSize = 10; - static constexpr int kJumpTableStubSlotSize = 10; #elif V8_TARGET_ARCH_ARM static constexpr int kJumpTableLineSize = 3 * kInstrSize; static constexpr int kJumpTableSlotSize = 3 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 2 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize; - static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize; #elif V8_TARGET_ARCH_ARM64 static constexpr int kJumpTableLineSize = 1 * kInstrSize; static constexpr int kJumpTableSlotSize = 1 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize; - static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize; #elif V8_TARGET_ARCH_S390X static constexpr int kJumpTableLineSize = 128; static constexpr int kJumpTableSlotSize = 14; + static constexpr int kFarJumpTableSlotSize = 14; static constexpr int kLazyCompileTableSlotSize = 20; - static constexpr int kJumpTableStubSlotSize = 14; #elif V8_TARGET_ARCH_PPC64 static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableSlotSize = 7 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 7 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize; - static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize; #elif V8_TARGET_ARCH_MIPS static constexpr int kJumpTableLineSize = 6 * kInstrSize; static constexpr int kJumpTableSlotSize = 4 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize; - static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize; #elif V8_TARGET_ARCH_MIPS64 static constexpr int kJumpTableLineSize = 8 * kInstrSize; static constexpr int kJumpTableSlotSize = 6 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; - static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize; #else - static constexpr int kJumpTableLineSize = 1; - static constexpr int kJumpTableSlotSize = 1; - static constexpr int kLazyCompileTableSlotSize = 1; - static constexpr int kJumpTableStubSlotSize = 1; +#error Unknown architecture. #endif static constexpr int kJumpTableSlotsPerLine = @@ -218,9 +232,15 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { void EmitLazyCompileJumpSlot(uint32_t func_index, Address lazy_compile_target); - void EmitRuntimeStubSlot(Address builtin_target); + // Returns {true} if the jump fits in the jump table slot, {false} otherwise. + bool EmitJumpSlot(Address target); + + // Initially emit a far jump slot. + void EmitFarJumpSlot(Address target); - void EmitJumpSlot(Address target); + // Patch an existing far jump slot, and make sure that this updated eventually + // becomes available to all execution units that might execute this code. + static void PatchFarJumpSlot(Address slot, Address target); void NopBytes(int bytes); }; diff --git a/chromium/v8/src/wasm/memory-tracing.cc b/chromium/v8/src/wasm/memory-tracing.cc index b11a557195d..300c7afcf9e 100644 --- a/chromium/v8/src/wasm/memory-tracing.cc +++ b/chromium/v8/src/wasm/memory-tracing.cc @@ -16,7 +16,7 @@ namespace wasm { void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info, int func_index, int position, uint8_t* mem_start) { - EmbeddedVector<char, 64> value; + EmbeddedVector<char, 91> value; auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep); switch (mem_rep) { #define TRACE_TYPE(rep, str, format, ctype1, ctype2) \ @@ -34,6 +34,25 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info, TRACE_TYPE(kFloat32, "f32", "%f / %08x", float, uint32_t) TRACE_TYPE(kFloat64, "f64", "%f / %016" PRIx64, double, uint64_t) #undef TRACE_TYPE + case MachineRepresentation::kSimd128: + SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x", + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 4), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 8), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 12), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 4), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 8), + base::ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(mem_start) + info->address + 12)); + break; default: SNPrintF(value, "???"); } diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc index c264bac96e8..9e08f8d1090 100644 --- a/chromium/v8/src/wasm/module-compiler.cc +++ b/chromium/v8/src/wasm/module-compiler.cc @@ -14,7 +14,6 @@ #include "src/base/platform/mutex.h" #include "src/base/platform/semaphore.h" #include "src/base/platform/time.h" -#include "src/base/template-utils.h" #include "src/base/utils/random-number-generator.h" #include "src/compiler/wasm-compiler.h" #include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope. @@ -31,7 +30,6 @@ #include "src/wasm/wasm-import-wrapper-cache.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-result.h" @@ -966,6 +964,10 @@ bool ExecuteJSToWasmWrapperCompilationUnits( return true; } +bool NeedsDeterministicCompile() { + return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1; +} + // Run by the main thread and background tasks to take part in compilation. // Returns whether any units were executed. bool ExecuteCompilationUnits( @@ -993,6 +995,7 @@ bool ExecuteCompilationUnits( // These fields are initialized in a {BackgroundCompileScope} before // starting compilation. double deadline = 0; + const bool deterministic = NeedsDeterministicCompile(); base::Optional<CompilationEnv> env; std::shared_ptr<WireBytesStorage> wire_bytes; std::shared_ptr<const WasmModule> module; @@ -1086,7 +1089,7 @@ bool ExecuteCompilationUnits( } // Get next unit. - if (deadline < platform->MonotonicallyIncreasingTime()) { + if (deterministic || deadline < platform->MonotonicallyIncreasingTime()) { unit = {}; } else { unit = compile_scope.compilation_state()->GetNextCompilationUnit( @@ -1198,10 +1201,6 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) { builder.Commit(); } -bool NeedsDeterministicCompile() { - return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1; -} - bool MayCompriseLazyFunctions(const WasmModule* module, const WasmFeatures& enabled_features, bool lazy_module) { @@ -1372,7 +1371,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule( auto native_module = isolate->wasm_engine()->NewNativeModule( isolate, enabled, std::move(module)); native_module->SetWireBytes(std::move(wire_bytes_copy)); - native_module->SetRuntimeStubs(isolate); CompileNativeModule(isolate, thrower, wasm_module, native_module.get()); if (thrower->error()) return {}; @@ -1467,7 +1465,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor { std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() { DCHECK_NULL(stream_); stream_.reset( - new StreamingDecoder(base::make_unique<AsyncStreamingProcessor>(this))); + new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(this))); return stream_; } @@ -1503,7 +1501,7 @@ void AsyncCompileJob::CreateNativeModule( // Create the module object and populate with compiled functions and // information needed at instantiation time. - // TODO(clemensh): For the same module (same bytes / same hash), we should + // TODO(clemensb): For the same module (same bytes / same hash), we should // only have one {WasmModuleObject}. Otherwise, we might only set // breakpoints on a (potentially empty) subset of the instances. // Create the module object. @@ -1511,7 +1509,6 @@ void AsyncCompileJob::CreateNativeModule( native_module_ = isolate_->wasm_engine()->NewNativeModule( isolate_, enabled_features_, std::move(module)); native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()}); - native_module_->SetRuntimeStubs(isolate_); if (stream_) stream_->NotifyNativeModuleCreated(native_module_); } @@ -1706,7 +1703,7 @@ class AsyncCompileJob::CompileTask : public CancelableTask { void AsyncCompileJob::StartForegroundTask() { DCHECK_NULL(pending_foreground_task_); - auto new_task = base::make_unique<CompileTask>(this, true); + auto new_task = std::make_unique<CompileTask>(this, true); pending_foreground_task_ = new_task.get(); foreground_task_runner_->PostTask(std::move(new_task)); } @@ -1714,7 +1711,7 @@ void AsyncCompileJob::StartForegroundTask() { void AsyncCompileJob::ExecuteForegroundTaskImmediately() { DCHECK_NULL(pending_foreground_task_); - auto new_task = base::make_unique<CompileTask>(this, true); + auto new_task = std::make_unique<CompileTask>(this, true); pending_foreground_task_ = new_task.get(); new_task->Run(); } @@ -1726,7 +1723,7 @@ void AsyncCompileJob::CancelPendingForegroundTask() { } void AsyncCompileJob::StartBackgroundTask() { - auto task = base::make_unique<CompileTask>(this, false); + auto task = std::make_unique<CompileTask>(this, false); // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground // tasks. This is used to make timing deterministic. @@ -2209,11 +2206,9 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes, } int GetMaxBackgroundTasks() { - if (NeedsDeterministicCompile()) return 1; + if (NeedsDeterministicCompile()) return 0; int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads(); - int num_compile_tasks = - std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads); - return std::max(1, num_compile_tasks); + return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads); } CompilationStateImpl::CompilationStateImpl( @@ -2227,7 +2222,7 @@ CompilationStateImpl::CompilationStateImpl( ? CompileMode::kTiering : CompileMode::kRegular), async_counters_(std::move(async_counters)), - max_background_tasks_(GetMaxBackgroundTasks()), + max_background_tasks_(std::max(GetMaxBackgroundTasks(), 1)), compilation_unit_queues_(max_background_tasks_), available_task_ids_(max_background_tasks_) { for (int i = 0; i < max_background_tasks_; ++i) { @@ -2616,7 +2611,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, auto& function = module->functions[exp.index]; JSToWasmWrapperKey key(function.imported, *function.sig); if (queue.insert(key)) { - auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>( + auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>( isolate, isolate->wasm_engine(), function.sig, function.imported, enabled_features); compilation_units.emplace(key, std::move(unit)); @@ -2627,7 +2622,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, CancelableTaskManager task_manager; const int max_background_tasks = GetMaxBackgroundTasks(); for (int i = 0; i < max_background_tasks; ++i) { - auto task = base::make_unique<CompileJSToWasmWrapperTask>( + auto task = std::make_unique<CompileJSToWasmWrapperTask>( &task_manager, &queue, &compilation_units); V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); } @@ -2698,12 +2693,21 @@ Handle<Script> CreateWasmScript(Isolate* isolate, const int kBufferSize = 32; char buffer[kBufferSize]; + Handle<String> url_prefix = + isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/")); + int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash); DCHECK(name_chars >= 0 && name_chars < kBufferSize); - MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte( - VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), - AllocationType::kOld); - script->set_name(*name_str.ToHandleChecked()); + Handle<String> name_str = + isolate->factory() + ->NewStringFromOneByte( + VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), + AllocationType::kOld) + .ToHandleChecked(); + script->set_name(*name_str); + MaybeHandle<String> url_str = + isolate->factory()->NewConsString(url_prefix, name_str); + script->set_source_url(*url_str.ToHandleChecked()); if (source_map_url.size() != 0) { MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8( diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc index 56712977b18..b89d06b881c 100644 --- a/chromium/v8/src/wasm/module-decoder.cc +++ b/chromium/v8/src/wasm/module-decoder.cc @@ -6,7 +6,6 @@ #include "src/base/functional.h" #include "src/base/platform/platform.h" -#include "src/base/template-utils.h" #include "src/flags/flags.h" #include "src/init/v8.h" #include "src/logging/counters.h" @@ -31,6 +30,7 @@ namespace { constexpr char kNameString[] = "name"; constexpr char kSourceMappingURLString[] = "sourceMappingURL"; constexpr char kCompilationHintsString[] = "compilationHints"; +constexpr char kDebugInfoString[] = ".debug_info"; template <size_t N> constexpr size_t num_chars(const char (&)[N]) { @@ -89,6 +89,8 @@ const char* SectionName(SectionCode code) { return kNameString; case kSourceMappingURLSectionCode: return kSourceMappingURLString; + case kDebugInfoSectionCode: + return kDebugInfoString; case kCompilationHintsSectionCode: return kCompilationHintsString; default: @@ -304,7 +306,7 @@ class ModuleDecoderImpl : public Decoder { CHECK_NULL(module_); SetCounters(counters); module_.reset( - new WasmModule(base::make_unique<Zone>(allocator, "signatures"))); + new WasmModule(std::make_unique<Zone>(allocator, "signatures"))); module_->initial_pages = 0; module_->maximum_pages = 0; module_->mem_export = false; @@ -399,6 +401,10 @@ class ModuleDecoderImpl : public Decoder { // sourceMappingURL is a custom section and currently can occur anywhere // in the module. In case of multiple sourceMappingURL sections, all // except the first occurrence are ignored. + case kDebugInfoSectionCode: + // .debug_info is a custom section containing core DWARF information + // if produced by compiler. Its presence likely means that Wasm was + // built in a debug mode. case kCompilationHintsSectionCode: // TODO(frgossen): report out of place compilation hints section as a // warning. @@ -453,6 +459,13 @@ class ModuleDecoderImpl : public Decoder { case kSourceMappingURLSectionCode: DecodeSourceMappingURLSection(); break; + case kDebugInfoSectionCode: + // If there is an explicit source map, prefer it over DWARF info. + if (!has_seen_unordered_section(kSourceMappingURLSectionCode)) { + module_->source_map_url.assign("wasm://dwarf"); + } + consume_bytes(static_cast<uint32_t>(end_ - start_), ".debug_info"); + break; case kCompilationHintsSectionCode: if (enabled_features_.compilation_hints) { DecodeCompilationHintsSection(); @@ -798,9 +811,11 @@ class ModuleDecoderImpl : public Decoder { const byte* pos = pc(); bool is_active; + bool functions_as_elements; uint32_t table_index; WasmInitExpr offset; - consume_segment_header("table index", &is_active, &table_index, &offset); + consume_element_segment_header(&is_active, &functions_as_elements, + &table_index, &offset); if (failed()) return; if (is_active) { @@ -815,12 +830,6 @@ class ModuleDecoderImpl : public Decoder { table_index); break; } - } else { - ValueType type = consume_reference_type(); - if (!ValueTypes::IsSubType(kWasmFuncRef, type)) { - error(pc_ - 1, "invalid element segment type"); - break; - } } uint32_t num_elem = @@ -833,8 +842,8 @@ class ModuleDecoderImpl : public Decoder { WasmElemSegment* init = &module_->elem_segments.back(); for (uint32_t j = 0; j < num_elem; j++) { - uint32_t index = is_active ? consume_element_func_index() - : consume_passive_element(); + uint32_t index = functions_as_elements ? consume_element_expr() + : consume_element_func_index(); if (failed()) break; init->entries.push_back(index); } @@ -911,8 +920,7 @@ class ModuleDecoderImpl : public Decoder { bool is_active; uint32_t memory_index; WasmInitExpr dest_addr; - consume_segment_header("memory index", &is_active, &memory_index, - &dest_addr); + consume_data_segment_header(&is_active, &memory_index, &dest_addr); if (failed()) break; if (is_active && memory_index != 0) { @@ -1483,7 +1491,7 @@ class ModuleDecoderImpl : public Decoder { WasmInitExpr expr; uint32_t len = 0; switch (opcode) { - case kExprGetGlobal: { + case kExprGlobalGet: { GlobalIndexImmediate<Decoder::kValidate> imm(this, pc() - 1); if (module->globals.size() <= imm.index) { error("global index is out of bounds"); @@ -1544,6 +1552,10 @@ class ModuleDecoderImpl : public Decoder { case kExprRefFunc: { if (enabled_features_.anyref) { FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1); + if (module->functions.size() <= imm.index) { + errorf(pc() - 1, "invalid function index: %u", imm.index); + break; + } expr.kind = WasmInitExpr::kRefFuncConst; expr.val.function_index = imm.index; len = imm.length; @@ -1678,8 +1690,103 @@ class ModuleDecoderImpl : public Decoder { return attribute; } - void consume_segment_header(const char* name, bool* is_active, - uint32_t* index, WasmInitExpr* offset) { + void consume_element_segment_header(bool* is_active, + bool* functions_as_elements, + uint32_t* table_index, + WasmInitExpr* offset) { + const byte* pos = pc(); + uint8_t flag; + if (enabled_features_.bulk_memory || enabled_features_.anyref) { + flag = consume_u8("flag"); + } else { + uint32_t table_index = consume_u32v("table index"); + // The only valid flag value without bulk_memory or anyref is '0'. + if (table_index != 0) { + error( + "Element segments with table indices require " + "--experimental-wasm-bulk-memory or --experimental-wasm-anyref"); + return; + } + flag = 0; + } + + // The mask for the bit in the flag which indicates if the segment is + // active or not. + constexpr uint8_t kIsPassiveMask = 0x01; + // The mask for the bit in the flag which indicates if the segment has an + // explicit table index field. + constexpr uint8_t kHasTableIndexMask = 0x02; + // The mask for the bit in the flag which indicates if the functions of this + // segment are defined as function indices (=0) or elements(=1). + constexpr uint8_t kFunctionsAsElementsMask = 0x04; + constexpr uint8_t kFullMask = + kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask; + + bool is_passive = flag & kIsPassiveMask; + *is_active = !is_passive; + *functions_as_elements = flag & kFunctionsAsElementsMask; + bool has_table_index = flag & kHasTableIndexMask; + + if (is_passive && !enabled_features_.bulk_memory) { + error("Passive element segments require --experimental-wasm-bulk-memory"); + return; + } + if (*functions_as_elements && !enabled_features_.bulk_memory) { + error( + "Illegal segment flag. Did you forget " + "--experimental-wasm-bulk-memory?"); + return; + } + if (flag != 0 && !enabled_features_.bulk_memory && + !enabled_features_.anyref) { + error( + "Invalid segment flag. Did you forget " + "--experimental-wasm-bulk-memory or --experimental-wasm-anyref?"); + return; + } + if ((flag & kFullMask) != flag || (!(*is_active) && has_table_index)) { + errorf(pos, "illegal flag value %u. Must be 0, 1, 2, 4, 5 or 6", flag); + } + + if (has_table_index) { + *table_index = consume_u32v("table index"); + } else { + *table_index = 0; + } + + if (*is_active) { + *offset = consume_init_expr(module_.get(), kWasmI32); + } + + if (*is_active && !has_table_index) { + // Active segments without table indices are a special case for backwards + // compatibility. These cases have an implicit element kind or element + // type, so we are done already with the segment header. + return; + } + + if (*functions_as_elements) { + // We have to check that there is an element type of type FuncRef. All + // other element types are not valid yet. + ValueType type = consume_reference_type(); + if (!ValueTypes::IsSubType(kWasmFuncRef, type)) { + error(pc_ - 1, "invalid element segment type"); + return; + } + } else { + // We have to check that there is an element kind of type Function. All + // other element kinds are not valid yet. + uint8_t val = consume_u8("element kind"); + ImportExportKindCode kind = static_cast<ImportExportKindCode>(val); + if (kind != kExternalFunction) { + errorf(pos, "illegal element kind %x. Must be 0x00", val); + return; + } + } + } + + void consume_data_segment_header(bool* is_active, uint32_t* index, + WasmInitExpr* offset) { const byte* pos = pc(); uint32_t flag = consume_u32v("flag"); @@ -1715,7 +1822,7 @@ class ModuleDecoderImpl : public Decoder { } if (flag == SegmentFlags::kActiveWithIndex) { *is_active = true; - *index = consume_u32v(name); + *index = consume_u32v("memory index"); *offset = consume_init_expr(module_.get(), kWasmI32); } } @@ -1731,7 +1838,7 @@ class ModuleDecoderImpl : public Decoder { return index; } - uint32_t consume_passive_element() { + uint32_t consume_element_expr() { uint32_t index = WasmElemSegment::kNullIndex; uint8_t opcode = consume_u8("element opcode"); if (failed()) return index; @@ -1857,6 +1964,10 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder, kCompilationHintsString, num_chars(kCompilationHintsString)) == 0) { return kCompilationHintsSectionCode; + } else if (string.length() == num_chars(kDebugInfoString) && + strncmp(reinterpret_cast<const char*>(section_name_start), + kDebugInfoString, num_chars(kDebugInfoString)) == 0) { + return kDebugInfoSectionCode; } return kUnknownSectionCode; } @@ -1895,7 +2006,7 @@ FunctionResult DecodeWasmFunctionForTesting( ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin); decoder.SetCounters(counters); return decoder.DecodeSingleFunction(zone, wire_bytes, module, - base::make_unique<WasmFunction>()); + std::make_unique<WasmFunction>()); } AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start, diff --git a/chromium/v8/src/wasm/module-decoder.h b/chromium/v8/src/wasm/module-decoder.h index 8e121c9d306..5ee324b109e 100644 --- a/chromium/v8/src/wasm/module-decoder.h +++ b/chromium/v8/src/wasm/module-decoder.h @@ -5,6 +5,8 @@ #ifndef V8_WASM_MODULE_DECODER_H_ #define V8_WASM_MODULE_DECODER_H_ +#include <memory> + #include "src/common/globals.h" #include "src/wasm/function-body-decoder.h" #include "src/wasm/wasm-constants.h" diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc index 976c3cde001..95d892ab505 100644 --- a/chromium/v8/src/wasm/module-instantiate.cc +++ b/chromium/v8/src/wasm/module-instantiate.cc @@ -93,7 +93,7 @@ class InstanceBuilder { InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi, - MaybeHandle<JSArrayBuffer> memory); + MaybeHandle<JSArrayBuffer> memory_buffer); // Build an instance, in all of its glory. MaybeHandle<WasmInstanceObject> Build(); @@ -114,7 +114,8 @@ class InstanceBuilder { ErrorThrower* thrower_; Handle<WasmModuleObject> module_object_; MaybeHandle<JSReceiver> ffi_; - MaybeHandle<JSArrayBuffer> memory_; + MaybeHandle<JSArrayBuffer> memory_buffer_; + Handle<WasmMemoryObject> memory_object_; Handle<JSArrayBuffer> untagged_globals_; Handle<FixedArray> tagged_globals_; std::vector<Handle<WasmExceptionObject>> exception_wrappers_; @@ -165,9 +166,11 @@ class InstanceBuilder { void SanitizeImports(); - // Find the imported memory buffer if there is one. This is used to see if we - // need to recompile with bounds checks before creating the instance. - MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const; + // Find the imported memory if there is one. + bool FindImportedMemory(); + + // Allocate the memory. + bool AllocateMemory(); // Processes a single imported function. bool ProcessImportedFunction(Handle<WasmInstanceObject> instance, @@ -221,9 +224,6 @@ class InstanceBuilder { // Process initialization of globals. void InitGlobals(Handle<WasmInstanceObject> instance); - // Allocate memory for a module instance as a new JSArrayBuffer. - Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages, - uint32_t maximum_pages); bool NeedsWrappers() const; @@ -243,8 +243,9 @@ class InstanceBuilder { MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject( Isolate* isolate, ErrorThrower* thrower, Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports, - MaybeHandle<JSArrayBuffer> memory) { - InstanceBuilder builder(isolate, thrower, module_object, imports, memory); + MaybeHandle<JSArrayBuffer> memory_buffer) { + InstanceBuilder builder(isolate, thrower, module_object, imports, + memory_buffer); auto instance = builder.Build(); if (!instance.is_null() && builder.ExecuteStartFunction()) { return instance; @@ -256,14 +257,14 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject( InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi, - MaybeHandle<JSArrayBuffer> memory) + MaybeHandle<JSArrayBuffer> memory_buffer) : isolate_(isolate), enabled_(module_object->native_module()->enabled_features()), module_(module_object->module()), thrower_(thrower), module_object_(module_object), ffi_(ffi), - memory_(memory) { + memory_buffer_(memory_buffer) { sanitized_imports_.reserve(module_->import_table.size()); } @@ -289,7 +290,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { NativeModule* native_module = module_object_->native_module(); //-------------------------------------------------------------------------- - // Allocate the memory array buffer. + // Set up the memory buffer and memory objects. //-------------------------------------------------------------------------- uint32_t initial_pages = module_->initial_pages; auto initial_pages_counter = SELECT_WASM_COUNTER( @@ -301,29 +302,41 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { isolate_->counters()->wasm_wasm_max_mem_pages_count(); max_pages_counter->AddSample(module_->maximum_pages); } - // Asm.js has memory_ already set at this point, so we don't want to - // overwrite it. - if (memory_.is_null()) { - memory_ = FindImportedMemoryBuffer(); - } - if (!memory_.is_null()) { - // Set externally passed ArrayBuffer non detachable. - Handle<JSArrayBuffer> memory = memory_.ToHandleChecked(); - memory->set_is_detachable(false); - - DCHECK_IMPLIES(native_module->use_trap_handler(), - is_asmjs_module(module_) || memory->is_wasm_memory() || - memory->backing_store() == nullptr); - } else if (initial_pages > 0 || native_module->use_trap_handler()) { - // We need to unconditionally create a guard region if using trap handlers, - // even when the size is zero to prevent null-dereference issues - // (e.g. https://crbug.com/769637). - // Allocate memory if the initial size is more than 0 pages. - memory_ = AllocateMemory(initial_pages, module_->maximum_pages); - if (memory_.is_null()) { - // failed to allocate memory - DCHECK(isolate_->has_pending_exception() || thrower_->error()); - return {}; + + if (is_asmjs_module(module_)) { + Handle<JSArrayBuffer> buffer; + if (memory_buffer_.ToHandle(&buffer)) { + // asm.js instantiation should have changed the state of the buffer. + CHECK(!buffer->is_detachable()); + CHECK(buffer->is_asmjs_memory()); + } else { + // Use an empty JSArrayBuffer for degenerate asm.js modules. + memory_buffer_ = isolate_->factory()->NewJSArrayBufferAndBackingStore( + 0, InitializedFlag::kUninitialized); + if (!memory_buffer_.ToHandle(&buffer)) { + thrower_->RangeError("Out of memory: asm.js memory"); + return {}; + } + buffer->set_is_asmjs_memory(true); + buffer->set_is_detachable(false); + } + + // The maximum number of pages isn't strictly necessary for memory + // objects used for asm.js, as they are never visible, but we might + // as well make it accurate. + auto maximum_pages = static_cast<uint32_t>( + RoundUp(buffer->byte_length(), wasm::kWasmPageSize) / + wasm::kWasmPageSize); + memory_object_ = + WasmMemoryObject::New(isolate_, memory_buffer_, maximum_pages); + } else { + // Actual wasm module must have either imported or created memory. + CHECK(memory_buffer_.is_null()); + if (!FindImportedMemory()) { + if (module_->has_memory && !AllocateMemory()) { + DCHECK(isolate_->has_pending_exception() || thrower_->error()); + return {}; + } } } @@ -333,33 +346,42 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { TRACE("New module instantiation for %p\n", native_module); Handle<WasmInstanceObject> instance = WasmInstanceObject::New(isolate_, module_object_); - NativeModuleModificationScope native_modification_scope(native_module); + + //-------------------------------------------------------------------------- + // Attach the memory to the instance. + //-------------------------------------------------------------------------- + if (module_->has_memory) { + DCHECK(!memory_object_.is_null()); + if (!instance->has_memory_object()) { + instance->set_memory_object(*memory_object_); + } + // Add the instance object to the list of instances for this memory. + WasmMemoryObject::AddInstance(isolate_, memory_object_, instance); + + // Double-check the {memory} array buffer matches the instance. + Handle<JSArrayBuffer> memory = memory_buffer_.ToHandleChecked(); + CHECK_EQ(instance->memory_size(), memory->byte_length()); + CHECK_EQ(instance->memory_start(), memory->backing_store()); + } //-------------------------------------------------------------------------- // Set up the globals for the new instance. //-------------------------------------------------------------------------- uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size; if (untagged_globals_buffer_size > 0) { - void* backing_store = isolate_->array_buffer_allocator()->Allocate( - untagged_globals_buffer_size); - if (backing_store == nullptr) { - thrower_->RangeError("Out of memory: wasm globals"); - return {}; - } - untagged_globals_ = isolate_->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, AllocationType::kOld); - constexpr bool is_external = false; - constexpr bool is_wasm_memory = false; - JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external, - backing_store, untagged_globals_buffer_size, - SharedFlag::kNotShared, is_wasm_memory); - if (untagged_globals_.is_null()) { + MaybeHandle<JSArrayBuffer> result = + isolate_->factory()->NewJSArrayBufferAndBackingStore( + untagged_globals_buffer_size, InitializedFlag::kZeroInitialized, + AllocationType::kOld); + + if (!result.ToHandle(&untagged_globals_)) { thrower_->RangeError("Out of memory: wasm globals"); return {}; } + + instance->set_untagged_globals_buffer(*untagged_globals_); instance->set_globals_start( reinterpret_cast<byte*>(untagged_globals_->backing_store())); - instance->set_untagged_globals_buffer(*untagged_globals_); } uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size; @@ -425,6 +447,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { instance->set_indirect_function_tables(*tables); } + NativeModuleModificationScope native_modification_scope(native_module); + //-------------------------------------------------------------------------- // Process the imports for the module. //-------------------------------------------------------------------------- @@ -450,30 +474,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { InitializeExceptions(instance); } - //-------------------------------------------------------------------------- - // Create the WebAssembly.Memory object. - //-------------------------------------------------------------------------- - if (module_->has_memory) { - if (!instance->has_memory_object()) { - // No memory object exists. Create one. - Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New( - isolate_, memory_, - module_->maximum_pages != 0 ? module_->maximum_pages : -1); - instance->set_memory_object(*memory_object); - } - - // Add the instance object to the list of instances for this memory. - Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_); - WasmMemoryObject::AddInstance(isolate_, memory_object, instance); - - if (!memory_.is_null()) { - // Double-check the {memory} array buffer matches the instance. - Handle<JSArrayBuffer> memory = memory_.ToHandleChecked(); - CHECK_EQ(instance->memory_size(), memory->byte_length()); - CHECK_EQ(instance->memory_start(), memory->backing_store()); - } - } - // The bulk memory proposal changes the MVP behavior here; the segments are // written as if `memory.init` and `table.init` are executed directly, and // not bounds checked ahead of time. @@ -536,7 +536,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { // Debugging support. //-------------------------------------------------------------------------- // Set all breakpoints that were set on the shared module. - WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance); + WasmModuleObject::SetBreakpointsOnNewInstance( + handle(module_object_->script(), isolate_), instance); //-------------------------------------------------------------------------- // Create a wrapper for the start function. @@ -547,7 +548,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { Handle<Code> wrapper_code = JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper( isolate_, function.sig, function.imported); - // TODO(clemensh): Don't generate an exported function for the start + // TODO(clemensb): Don't generate an exported function for the start // function. Use CWasmEntry instead. start_function_ = WasmExportedFunction::New( isolate_, instance, start_index, @@ -807,22 +808,21 @@ void InstanceBuilder::SanitizeImports() { } } -MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const { +bool InstanceBuilder::FindImportedMemory() { DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size()); for (size_t index = 0; index < module_->import_table.size(); index++) { - const WasmImport& import = module_->import_table[index]; + WasmImport import = module_->import_table[index]; if (import.kind == kExternalMemory) { - const auto& value = sanitized_imports_[index].value; - if (!value->IsWasmMemoryObject()) { - return {}; - } - auto memory = Handle<WasmMemoryObject>::cast(value); - Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_); - return buffer; + auto& value = sanitized_imports_[index].value; + if (!value->IsWasmMemoryObject()) return false; + memory_object_ = Handle<WasmMemoryObject>::cast(value); + memory_buffer_ = + Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_); + return true; } } - return {}; + return false; } bool InstanceBuilder::ProcessImportedFunction( @@ -1016,19 +1016,19 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance, Handle<String> module_name, Handle<String> import_name, Handle<Object> value) { - // Validation should have failed if more than one memory object was - // provided. - DCHECK(!instance->has_memory_object()); if (!value->IsWasmMemoryObject()) { ReportLinkError("memory import must be a WebAssembly.Memory object", import_index, module_name, import_name); return false; } - auto memory = Handle<WasmMemoryObject>::cast(value); - instance->set_memory_object(*memory); - Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_); + auto memory_object = Handle<WasmMemoryObject>::cast(value); + + // The imported memory should have been already set up early. + CHECK_EQ(instance->memory_object(), *memory_object); + + Handle<JSArrayBuffer> buffer(memory_object_->array_buffer(), isolate_); // memory_ should have already been assigned in Build(). - DCHECK_EQ(*memory_.ToHandleChecked(), *buffer); + DCHECK_EQ(*memory_buffer_.ToHandleChecked(), *buffer); uint32_t imported_cur_pages = static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize); if (imported_cur_pages < module_->initial_pages) { @@ -1037,7 +1037,7 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance, imported_cur_pages); return false; } - int32_t imported_maximum_pages = memory->maximum_pages(); + int32_t imported_maximum_pages = memory_object_->maximum_pages(); if (module_->has_maximum_pages) { if (imported_maximum_pages < 0) { thrower_->LinkError( @@ -1186,13 +1186,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance, return true; } - if (enabled_.bigint && global.type == kWasmI64) { - Handle<BigInt> bigint; - - if (!BigInt::FromObject(isolate_, value).ToHandle(&bigint)) { - return false; - } - WriteGlobalValue(global, bigint->AsInt64()); + if (enabled_.bigint && global.type == kWasmI64 && value->IsBigInt()) { + WriteGlobalValue(global, BigInt::cast(*value).AsInt64()); return true; } @@ -1241,7 +1236,7 @@ void InstanceBuilder::CompileImportWrappers( CancelableTaskManager task_manager; const int max_background_tasks = GetMaxBackgroundTasks(); for (int i = 0; i < max_background_tasks; ++i) { - auto task = base::make_unique<CompileImportWrapperTask>( + auto task = std::make_unique<CompileImportWrapperTask>( &task_manager, isolate_->wasm_engine(), isolate_->counters(), native_module, &import_wrapper_queue, &cache_scope); V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); @@ -1411,27 +1406,28 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) { } // Allocate memory for a module instance as a new JSArrayBuffer. -Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages, - uint32_t maximum_pages) { +bool InstanceBuilder::AllocateMemory() { + auto initial_pages = module_->initial_pages; + auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages + : wasm::max_mem_pages(); if (initial_pages > max_mem_pages()) { thrower_->RangeError("Out of memory: wasm memory too large"); - return Handle<JSArrayBuffer>::null(); - } - const bool is_shared_memory = module_->has_shared_memory && enabled_.threads; - Handle<JSArrayBuffer> mem_buffer; - if (is_shared_memory) { - if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize, - maximum_pages * kWasmPageSize) - .ToHandle(&mem_buffer)) { - thrower_->RangeError("Out of memory: wasm shared memory"); - } - } else { - if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize) - .ToHandle(&mem_buffer)) { - thrower_->RangeError("Out of memory: wasm memory"); - } + return false; } - return mem_buffer; + auto shared = (module_->has_shared_memory && enabled_.threads) + ? SharedFlag::kShared + : SharedFlag::kNotShared; + + MaybeHandle<WasmMemoryObject> result = + WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared); + + if (!result.ToHandle(&memory_object_)) { + thrower_->RangeError("Out of memory: wasm memory"); + return false; + } + memory_buffer_ = + Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_); + return true; } bool InstanceBuilder::NeedsWrappers() const { diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc index 94945ea58a8..37aaf056056 100644 --- a/chromium/v8/src/wasm/streaming-decoder.cc +++ b/chromium/v8/src/wasm/streaming-decoder.cc @@ -4,7 +4,6 @@ #include "src/wasm/streaming-decoder.h" -#include "src/base/template-utils.h" #include "src/handles/handles.h" #include "src/objects/descriptor-array.h" #include "src/objects/dictionary.h" @@ -364,14 +363,14 @@ StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) { TRACE_STREAMING("DecodeModuleHeader\n"); streaming->ProcessModuleHeader(); if (!streaming->ok()) return nullptr; - return base::make_unique<DecodeSectionID>(streaming->module_offset()); + return std::make_unique<DecodeSectionID>(streaming->module_offset()); } std::unique_ptr<StreamingDecoder::DecodingState> StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) { TRACE_STREAMING("DecodeSectionID: %s section\n", SectionName(static_cast<SectionCode>(id_))); - return base::make_unique<DecodeSectionLength>(id_, module_offset_); + return std::make_unique<DecodeSectionLength>(id_, module_offset_); } std::unique_ptr<StreamingDecoder::DecodingState> @@ -391,7 +390,7 @@ StreamingDecoder::DecodeSectionLength::NextWithValue( streaming->ProcessSection(buf); if (!streaming->ok()) return nullptr; // There is no payload, we go to the next section immediately. - return base::make_unique<DecodeSectionID>(streaming->module_offset_); + return std::make_unique<DecodeSectionID>(streaming->module_offset_); } else { if (section_id_ == SectionCode::kCodeSectionCode) { // Explicitly check for multiple code sections as module decoder never @@ -404,9 +403,9 @@ StreamingDecoder::DecodeSectionLength::NextWithValue( streaming->code_section_processed_ = true; // We reached the code section. All functions of the code section are put // into the same SectionBuffer. - return base::make_unique<DecodeNumberOfFunctions>(buf); + return std::make_unique<DecodeNumberOfFunctions>(buf); } - return base::make_unique<DecodeSectionPayload>(buf); + return std::make_unique<DecodeSectionPayload>(buf); } } @@ -415,7 +414,7 @@ StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) { TRACE_STREAMING("DecodeSectionPayload\n"); streaming->ProcessSection(section_buffer_); if (!streaming->ok()) return nullptr; - return base::make_unique<DecodeSectionID>(streaming->module_offset()); + return std::make_unique<DecodeSectionID>(streaming->module_offset()); } std::unique_ptr<StreamingDecoder::DecodingState> @@ -434,14 +433,14 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue( if (payload_buf.size() != bytes_consumed_) { return streaming->Error("not all code section bytes were used"); } - return base::make_unique<DecodeSectionID>(streaming->module_offset()); + return std::make_unique<DecodeSectionID>(streaming->module_offset()); } DCHECK_GE(kMaxInt, value_); streaming->StartCodeSection(static_cast<int>(value_), streaming->section_buffers_.back()); if (!streaming->ok()) return nullptr; - return base::make_unique<DecodeFunctionLength>( + return std::make_unique<DecodeFunctionLength>( section_buffer_, section_buffer_->payload_offset() + bytes_consumed_, value_); } @@ -464,7 +463,7 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue( return streaming->Error("not enough code section bytes"); } - return base::make_unique<DecodeFunctionBody>( + return std::make_unique<DecodeFunctionBody>( section_buffer_, buffer_offset_ + bytes_consumed_, value_, num_remaining_functions_, streaming->module_offset()); } @@ -477,14 +476,14 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) { size_t end_offset = buffer_offset_ + function_body_length_; if (num_remaining_functions_ > 0) { - return base::make_unique<DecodeFunctionLength>(section_buffer_, end_offset, - num_remaining_functions_); + return std::make_unique<DecodeFunctionLength>(section_buffer_, end_offset, + num_remaining_functions_); } // We just read the last function body. Continue with the next section. if (end_offset != section_buffer_->length()) { return streaming->Error("not all code section bytes were used"); } - return base::make_unique<DecodeSectionID>(streaming->module_offset()); + return std::make_unique<DecodeSectionID>(streaming->module_offset()); } StreamingDecoder::StreamingDecoder( diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h index bca5c2b941c..49f348b714a 100644 --- a/chromium/v8/src/wasm/value-type.h +++ b/chromium/v8/src/wasm/value-type.h @@ -44,7 +44,7 @@ using FunctionSig = Signature<ValueType>; inline size_t hash_value(ValueType type) { return static_cast<size_t>(type); } -// TODO(clemensh): Compute memtype and size from ValueType once we have c++14 +// TODO(clemensb): Compute memtype and size from ValueType once we have c++14 // constexpr support. #define FOREACH_LOAD_TYPE(V) \ V(I32, , Int32, 2) \ diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc index 91cfc01ceae..55695259f01 100644 --- a/chromium/v8/src/wasm/wasm-code-manager.cc +++ b/chromium/v8/src/wasm/wasm-code-manager.cc @@ -6,7 +6,7 @@ #include <iomanip> -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/macros.h" #include "src/base/platform/platform.h" #include "src/base/small-vector.h" @@ -192,7 +192,7 @@ void WasmCode::LogCode(Isolate* isolate) const { Local<v8::String> source_map_str = load_wasm_source_map(v8_isolate, source_map_url.c_str()); native_module()->SetWasmSourceMap( - base::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str)); + std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str)); } if (!name_vec.empty()) { @@ -235,7 +235,10 @@ void WasmCode::Validate() const { switch (mode) { case RelocInfo::WASM_CALL: { Address target = it.rinfo()->wasm_call_address(); - DCHECK(native_module_->is_jump_table_slot(target)); + WasmCode* code = native_module_->Lookup(target); + CHECK_NOT_NULL(code); + CHECK_EQ(WasmCode::kJumpTable, code->kind()); + CHECK(code->contains(target)); break; } case RelocInfo::WASM_STUB_CALL: { @@ -244,7 +247,6 @@ void WasmCode::Validate() const { CHECK_NOT_NULL(code); #ifdef V8_EMBEDDED_BUILTINS CHECK_EQ(WasmCode::kJumpTable, code->kind()); - CHECK_EQ(native_module()->runtime_stub_table_, code); CHECK(code->contains(target)); #else CHECK_EQ(WasmCode::kRuntimeStub, code->kind()); @@ -385,8 +387,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) { return "wasm-to-capi"; case WasmCode::kWasmToJsWrapper: return "wasm-to-js"; - case WasmCode::kRuntimeStub: - return "runtime-stub"; case WasmCode::kInterpreterEntry: return "interpreter entry"; case WasmCode::kJumpTable: @@ -430,6 +430,16 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) { if (engine) engine->FreeDeadCode(dead_code); } +WasmCodeAllocator::OptionalLock::~OptionalLock() { + if (allocator_) allocator_->mutex_.Unlock(); +} + +void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) { + DCHECK(!is_locked()); + allocator_ = allocator; + allocator->mutex_.Lock(); +} + WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager, VirtualMemory code_space, bool can_request_more, @@ -448,6 +458,11 @@ WasmCodeAllocator::~WasmCodeAllocator() { committed_code_space()); } +void WasmCodeAllocator::Init(NativeModule* native_module) { + DCHECK_EQ(1, owned_code_space_.size()); + native_module->AddCodeSpace(owned_code_space_[0].region(), {}); +} + namespace { // On Windows, we cannot commit a region that straddles different reservations // of virtual memory. Because we bump-allocate, and because, if we need more @@ -487,17 +502,70 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded( #endif return split_ranges; } + +int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) { + return NativeModule::kNeedsFarJumpsBetweenCodeSpaces && + FLAG_wasm_far_jump_table + ? static_cast<int>(num_declared_functions) + : 0; +} + +// Returns an overapproximation of the code size overhead per new code space +// created by the jump tables. +size_t OverheadPerCodeSpace(uint32_t num_declared_functions) { + // Overhead for the jump table. + size_t overhead = RoundUp<kCodeAlignment>( + JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions)); + +#if defined(V8_OS_WIN64) + // On Win64, we need to reserve some pages at the beginning of an executable + // space. See {AddCodeSpace}. + overhead += Heap::GetCodeRangeReservedAreaSize(); +#endif // V8_OS_WIN64 + + // Overhead for the far jump table. + overhead += + RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots( + WasmCode::kRuntimeStubCount, + NumWasmFunctionsInFarJumpTable(num_declared_functions))); + + return overhead; +} + +size_t ReservationSize(size_t code_size_estimate, int num_declared_functions, + size_t total_reserved) { + size_t overhead = OverheadPerCodeSpace(num_declared_functions); + + // Reserve a power of two at least as big as any of + // a) needed size + overhead (this is the minimum needed) + // b) 2 * overhead (to not waste too much space by overhead) + // c) 1/4 of current total reservation size (to grow exponentially) + size_t reserve_size = base::bits::RoundUpToPowerOfTwo( + std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead, + 2 * overhead), + total_reserved / 4)); + + // Limit by the maximum supported code space size. + return std::min(kMaxWasmCodeSpaceSize, reserve_size); +} + } // namespace Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module, size_t size) { return AllocateForCodeInRegion( - native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()}); + native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()}, + WasmCodeAllocator::OptionalLock{}); } Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion( - NativeModule* native_module, size_t size, base::AddressRegion region) { - base::MutexGuard lock(&mutex_); + NativeModule* native_module, size_t size, base::AddressRegion region, + const WasmCodeAllocator::OptionalLock& optional_lock) { + OptionalLock new_lock; + if (!optional_lock.is_locked()) new_lock.Lock(this); + const auto& locked_lock = + optional_lock.is_locked() ? optional_lock : new_lock; + DCHECK(locked_lock.is_locked()); DCHECK_EQ(code_manager_, native_module->engine()->code_manager()); DCHECK_LT(0, size); v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); @@ -517,12 +585,10 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion( Address hint = owned_code_space_.empty() ? kNullAddress : owned_code_space_.back().end(); - // Reserve at least 20% of the total generated code size so far, and of - // course at least {size}. Round up to the next power of two. size_t total_reserved = 0; for (auto& vmem : owned_code_space_) total_reserved += vmem.size(); - size_t reserve_size = - base::bits::RoundUpToPowerOfTwo(std::max(size, total_reserved / 5)); + size_t reserve_size = ReservationSize( + size, native_module->module()->num_declared_functions, total_reserved); VirtualMemory new_mem = code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint)); if (!new_mem.IsReserved()) { @@ -534,7 +600,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion( code_manager_->AssignRange(new_region, native_module); free_code_space_.Merge(new_region); owned_code_space_.emplace_back(std::move(new_mem)); - native_module->AddCodeSpace(new_region); + native_module->AddCodeSpace(new_region, locked_lock); code_space = free_code_space_.Allocate(size); DCHECK(!code_space.is_empty()); @@ -660,10 +726,9 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) { } } -base::AddressRegion WasmCodeAllocator::GetSingleCodeRegion() const { +size_t WasmCodeAllocator::GetNumCodeSpaces() const { base::MutexGuard lock(&mutex_); - DCHECK_EQ(1, owned_code_space_.size()); - return owned_code_space_[0].region(); + return owned_code_space_.size(); } NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled, @@ -689,27 +754,34 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled, CompilationState::New(*shared_this, std::move(async_counters)); DCHECK_NOT_NULL(module_); if (module_->num_declared_functions > 0) { - code_table_.reset(new WasmCode* [module_->num_declared_functions] {}); + code_table_ = + std::make_unique<WasmCode*[]>(module_->num_declared_functions); } - AddCodeSpace(code_allocator_.GetSingleCodeRegion()); + code_allocator_.Init(this); } void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) { WasmCodeRefScope code_ref_scope; - DCHECK_LE(num_functions(), max_functions); - WasmCode** new_table = new WasmCode* [max_functions] {}; + DCHECK_LE(module_->num_declared_functions, max_functions); + auto new_table = std::make_unique<WasmCode*[]>(max_functions); if (module_->num_declared_functions > 0) { - memcpy(new_table, code_table_.get(), - module_->num_declared_functions * sizeof(*new_table)); + memcpy(new_table.get(), code_table_.get(), + module_->num_declared_functions * sizeof(WasmCode*)); } - code_table_.reset(new_table); + code_table_ = std::move(new_table); - CHECK_EQ(1, code_space_data_.size()); + base::AddressRegion single_code_space_region; + { + base::MutexGuard guard(&allocation_mutex_); + CHECK_EQ(1, code_space_data_.size()); + single_code_space_region = code_space_data_[0].region; + } // Re-allocate jump table. - code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion( + main_jump_table_ = CreateEmptyJumpTableInRegion( JumpTableAssembler::SizeForNumberOfSlots(max_functions), - code_space_data_[0].region); - main_jump_table_ = code_space_data_[0].jump_table; + single_code_space_region, WasmCodeAllocator::OptionalLock{}); + base::MutexGuard guard(&allocation_mutex_); + code_space_data_[0].jump_table = main_jump_table_; } void NativeModule::LogWasmCodes(Isolate* isolate) { @@ -731,89 +803,6 @@ CompilationEnv NativeModule::CreateCompilationEnv() const { } WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { - return AddAndPublishAnonymousCode(code, WasmCode::kFunction); -} - -void NativeModule::UseLazyStub(uint32_t func_index) { - DCHECK_LE(module_->num_imported_functions, func_index); - DCHECK_LT(func_index, - module_->num_imported_functions + module_->num_declared_functions); - - if (!lazy_compile_table_) { - uint32_t num_slots = module_->num_declared_functions; - WasmCodeRefScope code_ref_scope; - DCHECK_EQ(1, code_space_data_.size()); - lazy_compile_table_ = CreateEmptyJumpTableInRegion( - JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots), - code_space_data_[0].region); - JumpTableAssembler::GenerateLazyCompileTable( - lazy_compile_table_->instruction_start(), num_slots, - module_->num_imported_functions, - runtime_stub_entry(WasmCode::kWasmCompileLazy)); - } - - // Add jump table entry for jump to the lazy compile stub. - uint32_t slot_index = func_index - module_->num_imported_functions; - DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress); - Address lazy_compile_target = - lazy_compile_table_->instruction_start() + - JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index); - JumpTableAssembler::PatchJumpTableSlot(main_jump_table_->instruction_start(), - slot_index, lazy_compile_target, - WasmCode::kFlushICache); -} - -// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS} -// was removed and embedded builtins are no longer optional. -void NativeModule::SetRuntimeStubs(Isolate* isolate) { - DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once. -#ifdef V8_EMBEDDED_BUILTINS - WasmCodeRefScope code_ref_scope; - DCHECK_EQ(1, code_space_data_.size()); - WasmCode* jump_table = CreateEmptyJumpTableInRegion( - JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount), - code_space_data_[0].region); - Address base = jump_table->instruction_start(); - EmbeddedData embedded_data = EmbeddedData::FromBlob(); -#define RUNTIME_STUB(Name) Builtins::k##Name, -#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name) - Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = { - WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)}; -#undef RUNTIME_STUB -#undef RUNTIME_STUB_TRAP - Address builtin_address[WasmCode::kRuntimeStubCount]; - for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) { - Builtins::Name builtin = wasm_runtime_stubs[i]; - CHECK(embedded_data.ContainsBuiltin(builtin)); - builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin); - runtime_stub_entries_[i] = - base + JumpTableAssembler::StubSlotIndexToOffset(i); - } - JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address, - WasmCode::kRuntimeStubCount); - DCHECK_NULL(runtime_stub_table_); - runtime_stub_table_ = jump_table; -#else // V8_EMBEDDED_BUILTINS - HandleScope scope(isolate); - WasmCodeRefScope code_ref_scope; - USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header. -#define COPY_BUILTIN(Name) \ - runtime_stub_entries_[WasmCode::k##Name] = \ - AddAndPublishAnonymousCode( \ - isolate->builtins()->builtin_handle(Builtins::k##Name), \ - WasmCode::kRuntimeStub, #Name) \ - ->instruction_start(); -#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name) - WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP) -#undef COPY_BUILTIN_TRAP -#undef COPY_BUILTIN -#endif // V8_EMBEDDED_BUILTINS - DCHECK_NE(kNullAddress, runtime_stub_entries_[0]); -} - -WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, - WasmCode::Kind kind, - const char* name) { // For off-heap builtins, we create a copy of the off-heap instruction stream // instead of the on-heap code object containing the trampoline. Ensure that // we do not apply the on-heap reloc info to the off-heap instructions. @@ -859,8 +848,10 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, code->InstructionStart(); int mode_mask = RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL); - Address constant_pool_start = - reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset; + auto jump_tables_ref = + FindJumpTablesForCode(reinterpret_cast<Address>(dst_code_bytes.begin())); + Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin()); + Address constant_pool_start = dst_code_addr + constant_pool_offset; RelocIterator orig_it(*code, mode_mask); for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(), constant_pool_start, mode_mask); @@ -869,8 +860,8 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, if (RelocInfo::IsWasmStubCall(mode)) { uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag(); DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount); - Address entry = runtime_stub_entry( - static_cast<WasmCode::RuntimeStubId>(stub_call_tag)); + Address entry = GetNearRuntimeStubEntry( + static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref); it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH); } else { it.rinfo()->apply(delta); @@ -880,7 +871,6 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, // Flush the i-cache after relocation. FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size()); - DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry); std::unique_ptr<WasmCode> new_code{new WasmCode{ this, // native_module kAnonymousFuncIndex, // index @@ -895,24 +885,63 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, OwnedVector<ProtectedInstructionData>{}, // protected_instructions std::move(reloc_info), // reloc_info std::move(source_pos), // source positions - kind, // kind + WasmCode::kFunction, // kind ExecutionTier::kNone}}; // tier - new_code->MaybePrint(name); + new_code->MaybePrint(nullptr); new_code->Validate(); return PublishCode(std::move(new_code)); } +void NativeModule::UseLazyStub(uint32_t func_index) { + DCHECK_LE(module_->num_imported_functions, func_index); + DCHECK_LT(func_index, + module_->num_imported_functions + module_->num_declared_functions); + + if (!lazy_compile_table_) { + uint32_t num_slots = module_->num_declared_functions; + WasmCodeRefScope code_ref_scope; + base::AddressRegion single_code_space_region; + { + base::MutexGuard guard(&allocation_mutex_); + DCHECK_EQ(1, code_space_data_.size()); + single_code_space_region = code_space_data_[0].region; + } + lazy_compile_table_ = CreateEmptyJumpTableInRegion( + JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots), + single_code_space_region, WasmCodeAllocator::OptionalLock{}); + JumpTableAssembler::GenerateLazyCompileTable( + lazy_compile_table_->instruction_start(), num_slots, + module_->num_imported_functions, + GetNearRuntimeStubEntry( + WasmCode::kWasmCompileLazy, + FindJumpTablesForCode(lazy_compile_table_->instruction_start()))); + } + + // Add jump table entry for jump to the lazy compile stub. + uint32_t slot_index = func_index - module_->num_imported_functions; + DCHECK_NULL(code_table_[slot_index]); + Address lazy_compile_target = + lazy_compile_table_->instruction_start() + + JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index); + base::MutexGuard guard(&allocation_mutex_); + PatchJumpTablesLocked(slot_index, lazy_compile_target); +} + std::unique_ptr<WasmCode> NativeModule::AddCode( uint32_t index, const CodeDesc& desc, uint32_t stack_slots, uint32_t tagged_parameter_slots, OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions, OwnedVector<const byte> source_position_table, WasmCode::Kind kind, ExecutionTier tier) { - return AddCodeWithCodeSpace( - index, desc, stack_slots, tagged_parameter_slots, - std::move(protected_instructions), std::move(source_position_table), kind, - tier, code_allocator_.AllocateForCode(this, desc.instr_size)); + Vector<byte> code_space = + code_allocator_.AllocateForCode(this, desc.instr_size); + auto jump_table_ref = + FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin())); + return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots, + std::move(protected_instructions), + std::move(source_position_table), kind, tier, + code_space, jump_table_ref); } std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( @@ -920,7 +949,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( uint32_t tagged_parameter_slots, OwnedVector<ProtectedInstructionData> protected_instructions, OwnedVector<const byte> source_position_table, WasmCode::Kind kind, - ExecutionTier tier, Vector<uint8_t> dst_code_bytes) { + ExecutionTier tier, Vector<uint8_t> dst_code_bytes, + const JumpTablesRef& jump_tables_ref) { OwnedVector<byte> reloc_info; if (desc.reloc_size > 0) { reloc_info = OwnedVector<byte>::New(desc.reloc_size); @@ -949,21 +979,21 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( int mode_mask = RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_CALL) | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL); - Address constant_pool_start = - reinterpret_cast<Address>(dst_code_bytes.begin()) + constant_pool_offset; + Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin()); + Address constant_pool_start = code_start + constant_pool_offset; for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(), constant_pool_start, mode_mask); !it.done(); it.next()) { RelocInfo::Mode mode = it.rinfo()->rmode(); if (RelocInfo::IsWasmCall(mode)) { uint32_t call_tag = it.rinfo()->wasm_call_tag(); - Address target = GetCallTargetForFunction(call_tag); + Address target = GetNearCallTargetForFunction(call_tag, jump_tables_ref); it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH); } else if (RelocInfo::IsWasmStubCall(mode)) { uint32_t stub_call_tag = it.rinfo()->wasm_call_tag(); DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount); - Address entry = runtime_stub_entry( - static_cast<WasmCode::RuntimeStubId>(stub_call_tag)); + Address entry = GetNearRuntimeStubEntry( + static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref); it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH); } else { it.rinfo()->apply(delta); @@ -1036,12 +1066,9 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) { // Populate optimized code to the jump table unless there is an active // redirection to the interpreter that should be preserved. - DCHECK_IMPLIES( - main_jump_table_ == nullptr, - engine_->code_manager()->IsImplicitAllocationsDisabledForTesting()); - bool update_jump_table = update_code_table && - !has_interpreter_redirection(code->index()) && - main_jump_table_; + DCHECK_NOT_NULL(main_jump_table_); + bool update_jump_table = + update_code_table && !has_interpreter_redirection(code->index()); // Ensure that interpreter entries always populate to the jump table. if (code->kind_ == WasmCode::Kind::kInterpreterEntry) { @@ -1050,9 +1077,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) { } if (update_jump_table) { - JumpTableAssembler::PatchJumpTableSlot( - main_jump_table_->instruction_start(), slot_idx, - code->instruction_start(), WasmCode::kFlushICache); + PatchJumpTablesLocked(slot_idx, code->instruction_start()); } } WasmCodeRefScope::AddRef(code.get()); @@ -1120,11 +1145,12 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const { } WasmCode* NativeModule::CreateEmptyJumpTableInRegion( - uint32_t jump_table_size, base::AddressRegion region) { + uint32_t jump_table_size, base::AddressRegion region, + const WasmCodeAllocator::OptionalLock& allocator_lock) { // Only call this if we really need a jump table. DCHECK_LT(0, jump_table_size); - Vector<uint8_t> code_space = - code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region); + Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion( + this, jump_table_size, region, allocator_lock); DCHECK(!code_space.empty()); ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size()); std::unique_ptr<WasmCode> code{new WasmCode{ @@ -1146,12 +1172,63 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion( return PublishCode(std::move(code)); } -void NativeModule::AddCodeSpace(base::AddressRegion region) { +void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) { + // The caller must hold the {allocation_mutex_}, thus we fail to lock it here. + DCHECK(!allocation_mutex_.TryLock()); + + for (auto& code_space_data : code_space_data_) { + DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table); + if (!code_space_data.jump_table) continue; + PatchJumpTableLocked(code_space_data, slot_index, target); + } +} + +void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data, + uint32_t slot_index, Address target) { + // The caller must hold the {allocation_mutex_}, thus we fail to lock it here. + DCHECK(!allocation_mutex_.TryLock()); + + DCHECK_NOT_NULL(code_space_data.jump_table); + DCHECK_NOT_NULL(code_space_data.far_jump_table); + + DCHECK_LT(slot_index, module_->num_declared_functions); + Address jump_table_slot = + code_space_data.jump_table->instruction_start() + + JumpTableAssembler::JumpSlotIndexToOffset(slot_index); + uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset( + WasmCode::kRuntimeStubCount + slot_index); + // Only pass the far jump table start if the far jump table actually has a + // slot for this function index (i.e. does not only contain runtime stubs). + bool has_far_jump_slot = + far_jump_table_offset < + code_space_data.far_jump_table->instructions().size(); + Address far_jump_table_start = + code_space_data.far_jump_table->instruction_start(); + Address far_jump_table_slot = + has_far_jump_slot ? far_jump_table_start + far_jump_table_offset + : kNullAddress; + JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot, + target); +} + +void NativeModule::AddCodeSpace( + base::AddressRegion region, + const WasmCodeAllocator::OptionalLock& allocator_lock) { +#ifndef V8_EMBEDDED_BUILTINS + // The far jump table contains far jumps to the embedded builtins. This + // requires a build with embedded builtins enabled. + FATAL( + "WebAssembly is not supported in no-embed builds. no-embed builds are " + "deprecated. See\n" + " - https://groups.google.com/d/msg/v8-users/9F53xqBjpkI/9WmKSbcWBAAJ\n" + " - https://crbug.com/v8/8519\n" + " - https://crbug.com/v8/8531\n"); +#endif // V8_EMBEDDED_BUILTINS + // Each code space must be at least twice as large as the overhead per code // space. Otherwise, we are wasting too much memory. - const bool is_first_code_space = code_space_data_.empty(); - const bool implicit_alloc_disabled = - engine_->code_manager()->IsImplicitAllocationsDisabledForTesting(); + DCHECK_GE(region.size(), + 2 * OverheadPerCodeSpace(module()->num_declared_functions)); #if defined(V8_OS_WIN64) // On some platforms, specifically Win64, we need to reserve some pages at @@ -1160,32 +1237,82 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) { // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204 // for details. if (engine_->code_manager() - ->CanRegisterUnwindInfoForNonABICompliantCodeRange() && - !implicit_alloc_disabled) { + ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) { size_t size = Heap::GetCodeRangeReservedAreaSize(); DCHECK_LT(0, size); - Vector<byte> padding = code_allocator_.AllocateForCode(this, size); - CHECK(region.contains(reinterpret_cast<Address>(padding.begin()), - padding.size())); + Vector<byte> padding = code_allocator_.AllocateForCodeInRegion( + this, size, region, allocator_lock); + CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin()); + win64_unwindinfo::RegisterNonABICompliantCodeRange( + reinterpret_cast<void*>(region.begin()), region.size()); } #endif // V8_OS_WIN64 WasmCodeRefScope code_ref_scope; WasmCode* jump_table = nullptr; + WasmCode* far_jump_table = nullptr; const uint32_t num_wasm_functions = module_->num_declared_functions; const bool has_functions = num_wasm_functions > 0; + const bool is_first_code_space = code_space_data_.empty(); + // TODO(clemensb): Avoid additional jump table if the code space is close + // enough to another existing code space. const bool needs_jump_table = - has_functions && is_first_code_space && !implicit_alloc_disabled; + has_functions && (kNeedsFarJumpsBetweenCodeSpaces || is_first_code_space); if (needs_jump_table) { jump_table = CreateEmptyJumpTableInRegion( - JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region); + JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region, + allocator_lock); CHECK(region.contains(jump_table->instruction_start())); } + // Always allocate a far jump table, because it contains the runtime stubs. + int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions); + far_jump_table = CreateEmptyJumpTableInRegion( + JumpTableAssembler::SizeForNumberOfFarJumpSlots( + WasmCode::kRuntimeStubCount, num_function_slots), + region, allocator_lock); + CHECK(region.contains(far_jump_table->instruction_start())); + EmbeddedData embedded_data = EmbeddedData::FromBlob(); +#define RUNTIME_STUB(Name) Builtins::k##Name, +#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name) + Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = { + WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)}; +#undef RUNTIME_STUB +#undef RUNTIME_STUB_TRAP + Address builtin_addresses[WasmCode::kRuntimeStubCount]; + for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) { + Builtins::Name builtin = stub_names[i]; + CHECK(embedded_data.ContainsBuiltin(builtin)); + builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin); + } + JumpTableAssembler::GenerateFarJumpTable( + far_jump_table->instruction_start(), builtin_addresses, + WasmCode::kRuntimeStubCount, num_function_slots); + if (is_first_code_space) main_jump_table_ = jump_table; - code_space_data_.push_back(CodeSpaceData{region, jump_table}); + base::MutexGuard guard(&allocation_mutex_); + code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table}); + + if (jump_table && !is_first_code_space) { + // Patch the new jump table(s) with existing functions. If this is the first + // code space, there cannot be any functions that have been compiled yet. + const CodeSpaceData& new_code_space_data = code_space_data_.back(); + for (uint32_t slot_index = 0; slot_index < num_wasm_functions; + ++slot_index) { + if (code_table_[slot_index]) { + PatchJumpTableLocked(new_code_space_data, slot_index, + code_table_[slot_index]->instruction_start()); + } else if (lazy_compile_table_) { + Address lazy_compile_target = + lazy_compile_table_->instruction_start() + + JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index); + PatchJumpTableLocked(new_code_space_data, slot_index, + lazy_compile_target); + } + } + } } namespace { @@ -1241,26 +1368,86 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const { return main_jump_table_->instruction_start() + slot_offset; } +NativeModule::JumpTablesRef NativeModule::FindJumpTablesForCode( + Address code_addr) const { + base::MutexGuard guard(&allocation_mutex_); + for (auto& code_space_data : code_space_data_) { + const bool jump_table_reachable = + !kNeedsFarJumpsBetweenCodeSpaces || + code_space_data.region.contains(code_addr); + if (jump_table_reachable && code_space_data.far_jump_table) { + // We might not have a jump table if we have no functions. + return {code_space_data.jump_table + ? code_space_data.jump_table->instruction_start() + : kNullAddress, + code_space_data.far_jump_table->instruction_start()}; + } + } + FATAL("code_addr is not part of a code space"); +} + +Address NativeModule::GetNearCallTargetForFunction( + uint32_t func_index, const JumpTablesRef& jump_tables) const { + uint32_t slot_offset = GetJumpTableOffset(func_index); + return jump_tables.jump_table_start + slot_offset; +} + +Address NativeModule::GetNearRuntimeStubEntry( + WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const { + auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index); + return jump_tables.far_jump_table_start + offset; +} + uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot( Address slot_address) const { - DCHECK(is_jump_table_slot(slot_address)); - uint32_t slot_offset = static_cast<uint32_t>( - slot_address - main_jump_table_->instruction_start()); + WasmCodeRefScope code_refs; + WasmCode* code = Lookup(slot_address); + DCHECK_NOT_NULL(code); + DCHECK_EQ(WasmCode::kJumpTable, code->kind()); + uint32_t slot_offset = + static_cast<uint32_t>(slot_address - code->instruction_start()); uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset); DCHECK_LT(slot_idx, module_->num_declared_functions); + DCHECK_EQ(slot_address, + code->instruction_start() + + JumpTableAssembler::JumpSlotIndexToOffset(slot_idx)); return module_->num_imported_functions + slot_idx; } -const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const { -#define RETURN_NAME(Name) \ - if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \ - return #Name; \ +WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const { + base::MutexGuard guard(&allocation_mutex_); + + for (auto& code_space_data : code_space_data_) { + if (code_space_data.far_jump_table->contains(target)) { + uint32_t offset = static_cast<uint32_t>( + target - code_space_data.far_jump_table->instruction_start()); + uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset); + if (index >= WasmCode::kRuntimeStubCount) continue; + if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) { + continue; + } + return static_cast<WasmCode::RuntimeStubId>(index); + } } -#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name) - WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP) -#undef RETURN_NAME_TRAP -#undef RETURN_NAME - return "<unknown>"; + + // Invalid address. + return WasmCode::kRuntimeStubCount; +} + +const char* NativeModule::GetRuntimeStubName(Address target) const { + WasmCode::RuntimeStubId stub_id = GetRuntimeStubId(target); + +#define RUNTIME_STUB_NAME(Name) #Name, +#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name, + constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST( + RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"}; +#undef RUNTIME_STUB_NAME +#undef RUNTIME_STUB_NAME_TRAP + STATIC_ASSERT(arraysize(runtime_stub_names) == + WasmCode::kRuntimeStubCount + 1); + + DCHECK_GT(arraysize(runtime_stub_names), stub_id); + return runtime_stub_names[stub_id]; } NativeModule::~NativeModule() { @@ -1275,10 +1462,8 @@ NativeModule::~NativeModule() { import_wrapper_cache_.reset(); } -WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker, - size_t max_committed) - : memory_tracker_(memory_tracker), - max_committed_code_space_(max_committed), +WasmCodeManager::WasmCodeManager(size_t max_committed) + : max_committed_code_space_(max_committed), critical_committed_code_space_(max_committed / 2) { DCHECK_LE(max_committed, kMaxWasmCodeMemory); } @@ -1350,12 +1535,12 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { DCHECK_GT(size, 0); size_t allocate_page_size = page_allocator->AllocatePageSize(); size = RoundUp(size, allocate_page_size); - if (!memory_tracker_->ReserveAddressSpace(size)) return {}; + if (!BackingStore::ReserveAddressSpace(size)) return {}; if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); VirtualMemory mem(page_allocator, size, hint, allocate_page_size); if (!mem.IsReserved()) { - memory_tracker_->ReleaseReservation(size); + BackingStore::ReleaseReservation(size); return {}; } TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(), @@ -1369,13 +1554,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { return mem; } -void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) { - // This has to be set before committing any memory. - DCHECK_EQ(0, total_committed_code_space_.load()); - max_committed_code_space_ = limit; - critical_committed_code_space_.store(limit / 2); -} - // static size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) { constexpr size_t kCodeSizeMultiplier = 4; @@ -1387,8 +1565,6 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) { for (auto& function : module->functions) { estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length(); } - estimate += - JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions); estimate += kImportSize * module->num_imported_functions; return estimate; @@ -1425,9 +1601,20 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule( committed + (max_committed_code_space_ - committed) / 2); } - // If the code must be contiguous, reserve enough address space up front. + // If we cannot add code space later, reserve enough address space up front. size_t code_vmem_size = - kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate; + can_request_more ? ReservationSize(code_size_estimate, + module->num_declared_functions, 0) + : kMaxWasmCodeSpaceSize; + + // The '--wasm-max-code-space-reservation' testing flag can be used to reduce + // the maximum size of the initial code space reservation (in MB). + if (FLAG_wasm_max_initial_code_space_reservation > 0) { + size_t flag_max_bytes = + static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB; + if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes; + } + // Try up to two times; getting rid of dead JSArrayBuffer allocations might // require two GCs because the first GC maybe incremental and may have // floating garbage. @@ -1456,14 +1643,6 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule( TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start, size); -#if defined(V8_OS_WIN64) - if (CanRegisterUnwindInfoForNonABICompliantCodeRange() && - !implicit_allocations_disabled_for_testing_) { - win64_unwindinfo::RegisterNonABICompliantCodeRange( - reinterpret_cast<void*>(start), size); - } -#endif // V8_OS_WIN64 - base::MutexGuard lock(&native_modules_mutex_); lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get()))); return ret; @@ -1519,6 +1698,9 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode( } Vector<byte> code_space = code_allocator_.AllocateForCode(this, total_code_space); + // Lookup the jump tables to use once, then use for all code objects. + auto jump_tables_ref = + FindJumpTablesForCode(reinterpret_cast<Address>(code_space.begin())); std::vector<std::unique_ptr<WasmCode>> generated_code; generated_code.reserve(results.size()); @@ -1533,7 +1715,7 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode( result.func_index, result.code_desc, result.frame_slot_count, result.tagged_parameter_slots, std::move(result.protected_instructions), std::move(result.source_positions), GetCodeKind(result), - result.result_tier, this_code_space)); + result.result_tier, this_code_space, jump_tables_ref)); } DCHECK_EQ(0, code_space.size()); @@ -1567,6 +1749,10 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) { } } +size_t NativeModule::GetNumberOfCodeSpacesForTesting() const { + return code_allocator_.GetNumCodeSpaces(); +} + void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space, size_t committed_size) { base::MutexGuard lock(&native_modules_mutex_); @@ -1576,15 +1762,14 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space, code_space.address(), code_space.end(), code_space.size()); #if defined(V8_OS_WIN64) - if (CanRegisterUnwindInfoForNonABICompliantCodeRange() && - !implicit_allocations_disabled_for_testing_) { + if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) { win64_unwindinfo::UnregisterNonABICompliantCodeRange( reinterpret_cast<void*>(code_space.address())); } #endif // V8_OS_WIN64 lookup_map_.erase(code_space.address()); - memory_tracker_->ReleaseReservation(code_space.size()); + BackingStore::ReleaseReservation(code_space.size()); code_space.Free(); DCHECK(!code_space.IsReserved()); } @@ -1616,7 +1801,7 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const { } // TODO(v8:7424): Code protection scopes are not yet supported with shared code -// enabled and need to be revisited to work with --wasm-shared-code as well. +// enabled and need to be revisited. NativeModuleModificationScope::NativeModuleModificationScope( NativeModule* native_module) : native_module_(native_module) { diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h index c2e5249e5ee..7deea9032a5 100644 --- a/chromium/v8/src/wasm/wasm-code-manager.h +++ b/chromium/v8/src/wasm/wasm-code-manager.h @@ -39,7 +39,6 @@ class NativeModule; class WasmCodeManager; struct WasmCompilationResult; class WasmEngine; -class WasmMemoryTracker; class WasmImportWrapperCache; struct WasmModule; @@ -79,7 +78,6 @@ class V8_EXPORT_PRIVATE WasmCode final { kFunction, kWasmToCapiWrapper, kWasmToJsWrapper, - kRuntimeStub, kInterpreterEntry, kJumpTable }; @@ -282,11 +280,33 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind); // Manages the code reservations and allocations of a single {NativeModule}. class WasmCodeAllocator { public: + // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to + // indicate that the lock on the {WasmCodeAllocator} is already taken. It's + // optional to allow to also call methods without holding the lock. + class OptionalLock { + public: + // External users can only instantiate a non-locked {OptionalLock}. + OptionalLock() = default; + ~OptionalLock(); + bool is_locked() const { return allocator_ != nullptr; } + + private: + friend class WasmCodeAllocator; + // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock} + // is passed. + void Lock(WasmCodeAllocator*); + + WasmCodeAllocator* allocator_ = nullptr; + }; + WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space, bool can_request_more, std::shared_ptr<Counters> async_counters); ~WasmCodeAllocator(); + // Call before use, after the {NativeModule} is set up completely. + void Init(NativeModule*); + size_t committed_code_space() const { return committed_code_space_.load(std::memory_order_acquire); } @@ -303,7 +323,8 @@ class WasmCodeAllocator { // Allocate code space within a specific region. Returns a valid buffer or // fails with OOM (crash). Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size, - base::AddressRegion); + base::AddressRegion, + const WasmCodeAllocator::OptionalLock&); // Sets permissions of all owned code space to executable, or read-write (if // {executable} is false). Returns true on success. @@ -312,9 +333,8 @@ class WasmCodeAllocator { // Free memory pages of all given code objects. Used for wasm code GC. void FreeCode(Vector<WasmCode* const>); - // Returns the region of the single code space managed by this code allocator. - // Will fail if more than one code space has been created. - base::AddressRegion GetSingleCodeRegion() const; + // Retrieve the number of separately reserved code spaces. + size_t GetNumCodeSpaces() const; private: // The engine-wide wasm code manager. @@ -344,6 +364,8 @@ class WasmCodeAllocator { bool is_executable_ = false; + // TODO(clemensb): Remove this field once multiple code spaces are supported + // everywhere. const bool can_request_more_memory_; std::shared_ptr<Counters> async_counters_; @@ -352,9 +374,9 @@ class WasmCodeAllocator { class V8_EXPORT_PRIVATE NativeModule final { public: #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 - static constexpr bool kCanAllocateMoreMemory = false; + static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true; #else - static constexpr bool kCanAllocateMoreMemory = true; + static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false; #endif // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding @@ -394,11 +416,6 @@ class V8_EXPORT_PRIVATE NativeModule final { // table with trampolines accordingly. void UseLazyStub(uint32_t func_index); - // Initializes all runtime stubs by setting up entry addresses in the runtime - // stub table. It must be called exactly once per native module before adding - // other WasmCode so that runtime stub ids can be resolved during relocation. - void SetRuntimeStubs(Isolate* isolate); - // Creates a snapshot of the current state of the code table. This is useful // to get a consistent view of the table (e.g. used by the serializer). std::vector<WasmCode*> SnapshotCodeTable() const; @@ -409,13 +426,6 @@ class V8_EXPORT_PRIVATE NativeModule final { void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map); WasmModuleSourceMap* GetWasmSourceMap() const; - Address runtime_stub_entry(WasmCode::RuntimeStubId index) const { - DCHECK_LT(index, WasmCode::kRuntimeStubCount); - Address entry_address = runtime_stub_entries_[index]; - DCHECK_NE(kNullAddress, entry_address); - return entry_address; - } - Address jump_table_start() const { return main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress; @@ -423,16 +433,33 @@ class V8_EXPORT_PRIVATE NativeModule final { uint32_t GetJumpTableOffset(uint32_t func_index) const; - bool is_jump_table_slot(Address address) const { - return main_jump_table_->contains(address); - } - // Returns the canonical target to call for the given function (the slot in // the first jump table). Address GetCallTargetForFunction(uint32_t func_index) const; - // Reverse lookup from a given call target (i.e. a jump table slot as the - // above {GetCallTargetForFunction} returns) to a function index. + struct JumpTablesRef { + const Address jump_table_start; + const Address far_jump_table_start; + }; + + // Finds the jump tables that should be used for the code at {code_addr}. This + // information is then passed to {GetNearCallTargetForFunction} and + // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information + // up there. + JumpTablesRef FindJumpTablesForCode(Address code_addr) const; + + // Similarly to {GetCallTargetForFunction}, but uses the jump table previously + // looked up via {FindJumpTablesForCode}. + Address GetNearCallTargetForFunction(uint32_t func_index, + const JumpTablesRef&) const; + + // Get a runtime stub entry (which is a far jump table slot) in the jump table + // previously looked up via {FindJumpTablesForCode}. + Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index, + const JumpTablesRef&) const; + + // Reverse lookup from a given call target (which must be a jump table slot) + // to a function index. uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const; bool SetExecutable(bool executable) { @@ -481,7 +508,11 @@ class V8_EXPORT_PRIVATE NativeModule final { const WasmFeatures& enabled_features() const { return enabled_features_; } - const char* GetRuntimeStubName(Address runtime_stub_entry) const; + // Returns the runtime stub id that corresponds to the given address (which + // must be a far jump table slot). Returns {kRuntimeStubCount} on failure. + WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const; + + const char* GetRuntimeStubName(Address runtime_stub_target) const; // Sample the current code size of this modules to the given counters. enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling }; @@ -501,6 +532,9 @@ class V8_EXPORT_PRIVATE NativeModule final { // its accounting. void FreeCode(Vector<WasmCode* const>); + // Retrieve the number of separately reserved code spaces for this module. + size_t GetNumberOfCodeSpacesForTesting() const; + private: friend class WasmCode; friend class WasmCodeAllocator; @@ -510,6 +544,7 @@ class V8_EXPORT_PRIVATE NativeModule final { struct CodeSpaceData { base::AddressRegion region; WasmCode* jump_table; + WasmCode* far_jump_table; }; // Private constructor, called via {WasmCodeManager::NewNativeModule()}. @@ -525,17 +560,23 @@ class V8_EXPORT_PRIVATE NativeModule final { OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions, OwnedVector<const byte> source_position_table, WasmCode::Kind kind, - ExecutionTier tier, Vector<uint8_t> code_space); + ExecutionTier tier, Vector<uint8_t> code_space, + const JumpTablesRef& jump_tables_ref); - // Add and publish anonymous code. - WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind, - const char* name = nullptr); + WasmCode* CreateEmptyJumpTableInRegion( + uint32_t jump_table_size, base::AddressRegion, + const WasmCodeAllocator::OptionalLock&); - WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size, - base::AddressRegion); + // Hold the {allocation_mutex_} when calling one of these methods. + // {slot_index} is the index in the declared functions, i.e. function index + // minus the number of imported functions. + void PatchJumpTablesLocked(uint32_t slot_index, Address target); + void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index, + Address target); // Called by the {WasmCodeAllocator} to register a new code space. - void AddCodeSpace(base::AddressRegion); + void AddCodeSpace(base::AddressRegion, + const WasmCodeAllocator::OptionalLock&); // Hold the {allocation_mutex_} when calling this method. bool has_interpreter_redirection(uint32_t func_index) { @@ -580,12 +621,6 @@ class V8_EXPORT_PRIVATE NativeModule final { // {WireBytesStorage}, held by background compile tasks. std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_; - // Contains entry points for runtime stub calls via {WASM_STUB_CALL}. - Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress}; - - // Jump table used for runtime stubs (i.e. trampolines to embedded builtins). - WasmCode* runtime_stub_table_ = nullptr; - // Jump table used by external calls (from JS). Wasm calls use one of the jump // tables stored in {code_space_data_}. WasmCode* main_jump_table_ = nullptr; @@ -612,7 +647,11 @@ class V8_EXPORT_PRIVATE NativeModule final { // instruction start address of the value. std::map<Address, std::unique_ptr<WasmCode>> owned_code_; - std::unique_ptr<WasmCode* []> code_table_; + // Table of the latest code object per function, updated on initial + // compilation and tier up. The number of entries is + // {WasmModule::num_declared_functions}, i.e. there are no entries for + // imported functions. + std::unique_ptr<WasmCode*[]> code_table_; // Null if no redirections exist, otherwise a bitset over all functions in // this module marking those functions that have been redirected. @@ -634,8 +673,7 @@ class V8_EXPORT_PRIVATE NativeModule final { class V8_EXPORT_PRIVATE WasmCodeManager final { public: - explicit WasmCodeManager(WasmMemoryTracker* memory_tracker, - size_t max_committed); + explicit WasmCodeManager(size_t max_committed); #ifdef DEBUG ~WasmCodeManager() { @@ -654,16 +692,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { return total_committed_code_space_.load(); } - void SetMaxCommittedMemoryForTesting(size_t limit); - - void DisableImplicitAllocationsForTesting() { - implicit_allocations_disabled_for_testing_ = true; - } - - bool IsImplicitAllocationsDisabledForTesting() const { - return implicit_allocations_disabled_for_testing_; - } - static size_t EstimateNativeModuleCodeSize(const WasmModule* module); static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module); @@ -686,11 +714,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { void AssignRange(base::AddressRegion, NativeModule*); - WasmMemoryTracker* const memory_tracker_; - - size_t max_committed_code_space_; - - bool implicit_allocations_disabled_for_testing_ = false; + const size_t max_committed_code_space_; std::atomic<size_t> total_committed_code_space_{0}; // If the committed code space exceeds {critical_committed_code_space_}, then diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h index fbbe19396cb..2b5cb6c9ec7 100644 --- a/chromium/v8/src/wasm/wasm-constants.h +++ b/chromium/v8/src/wasm/wasm-constants.h @@ -81,6 +81,7 @@ enum SectionCode : int8_t { // to be consistent. kNameSectionCode, // Name section (encoded as a string) kSourceMappingURLSectionCode, // Source Map URL section + kDebugInfoSectionCode, // DWARF section .debug_info kCompilationHintsSectionCode, // Compilation hints section // Helper values diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc index 2955bc602f6..ea989c081df 100644 --- a/chromium/v8/src/wasm/wasm-debug.cc +++ b/chromium/v8/src/wasm/wasm-debug.cc @@ -184,7 +184,7 @@ class InterpreterHandle { argument_values.begin()); bool finished = false; while (!finished) { - // TODO(clemensh): Add occasional StackChecks. + // TODO(clemensb): Add occasional StackChecks. WasmInterpreter::State state = ContinueExecution(thread); switch (state) { case WasmInterpreter::State::PAUSED: @@ -277,9 +277,10 @@ class InterpreterHandle { if (isolate_->debug()->break_points_active()) { Handle<WasmModuleObject> module_object( GetInstanceObject()->module_object(), isolate_); + Handle<Script> script(module_object->script(), isolate_); int position = GetTopPosition(module_object); Handle<FixedArray> breakpoints; - if (WasmModuleObject::CheckBreakPoints(isolate_, module_object, position) + if (WasmModuleObject::CheckBreakPoints(isolate_, script, position) .ToHandle(&breakpoints)) { // We hit one or several breakpoints. Clear stepping, notify the // listeners and return. @@ -318,7 +319,8 @@ class InterpreterHandle { DCHECK_LT(0, thread->GetFrameCount()); auto frame = thread->GetFrame(thread->GetFrameCount() - 1); - return module_object->GetFunctionOffset(frame->function()->func_index) + + return GetWasmFunctionOffset(module_object->module(), + frame->function()->func_index) + frame->pc(); } @@ -502,9 +504,11 @@ wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) { Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) { DCHECK(!instance->has_debug_info()); Factory* factory = instance->GetIsolate()->factory(); + Handle<Cell> stack_cell = factory->NewCell(factory->empty_fixed_array()); Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast( factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld)); debug_info->set_wasm_instance(*instance); + debug_info->set_interpreter_reference_stack(*stack_cell); instance->set_debug_info(*debug_info); return debug_info; } @@ -524,6 +528,7 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting( return interp_handle->raw()->interpreter(); } +// static void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info, int func_index, int offset) { Isolate* isolate = debug_info->GetIsolate(); @@ -533,6 +538,18 @@ void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info, handle->interpreter()->SetBreakpoint(func, offset, true); } +// static +void WasmDebugInfo::ClearBreakpoint(Handle<WasmDebugInfo> debug_info, + int func_index, int offset) { + Isolate* isolate = debug_info->GetIsolate(); + auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info); + // TODO(leese): If there are no more breakpoints left it would be good to + // undo redirecting to the interpreter. + const wasm::WasmFunction* func = &handle->module()->functions[func_index]; + handle->interpreter()->SetBreakpoint(func, offset, false); +} + +// static void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info, Vector<int> func_indexes) { Isolate* isolate = debug_info->GetIsolate(); @@ -635,8 +652,8 @@ Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info, if (index == -1) { index = static_cast<int32_t>(map->FindOrInsert(*sig)); if (index == entries->length()) { - entries = isolate->factory()->CopyFixedArrayAndGrow( - entries, entries->length(), AllocationType::kOld); + entries = + isolate->factory()->CopyFixedArrayAndGrow(entries, entries->length()); debug_info->set_c_wasm_entries(*entries); } DCHECK(entries->get(index).IsUndefined(isolate)); diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc index 97111f83497..adb566cb418 100644 --- a/chromium/v8/src/wasm/wasm-engine.cc +++ b/chromium/v8/src/wasm/wasm-engine.cc @@ -211,8 +211,7 @@ struct WasmEngine::NativeModuleInfo { int8_t num_code_gcs_triggered = 0; }; -WasmEngine::WasmEngine() - : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {} +WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {} WasmEngine::~WasmEngine() { // Synchronize on all background compile tasks. @@ -307,7 +306,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile( CreateWasmScript(isolate, bytes, native_module->module()->source_map_url); // Create the module object. - // TODO(clemensh): For the same module (same bytes / same hash), we should + // TODO(clemensb): For the same module (same bytes / same hash), we should // only have one WasmModuleObject. Otherwise, we might only set // breakpoints on a (potentially empty) subset of the instances. @@ -337,7 +336,7 @@ void WasmEngine::AsyncInstantiate( ErrorThrower thrower(isolate, "WebAssembly.instantiate()"); // Instantiate a TryCatch so that caught exceptions won't progagate out. // They will still be set as pending exceptions on the isolate. - // TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke + // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke // start function and report thrown exception explicitly via out argument. v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate)); catcher.SetVerbose(false); @@ -567,7 +566,7 @@ int GetGCTimeMicros(base::TimeTicks start) { void WasmEngine::AddIsolate(Isolate* isolate) { base::MutexGuard guard(&mutex_); DCHECK_EQ(0, isolates_.count(isolate)); - isolates_.emplace(isolate, base::make_unique<IsolateInfo>(isolate)); + isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate)); // Install sampling GC callback. // TODO(v8:7424): For now we sample module sizes in a GC callback. This will @@ -631,7 +630,7 @@ void WasmEngine::LogCode(WasmCode* code) { IsolateInfo* info = isolates_[isolate].get(); if (info->log_codes == false) continue; if (info->log_codes_task == nullptr) { - auto new_task = base::make_unique<LogCodesTask>( + auto new_task = std::make_unique<LogCodesTask>( &mutex_, &info->log_codes_task, isolate, this); info->log_codes_task = new_task.get(); info->foreground_task_runner->PostTask(std::move(new_task)); @@ -676,7 +675,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule( size_t code_size_estimate = wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get()); return NewNativeModule(isolate, enabled, code_size_estimate, - wasm::NativeModule::kCanAllocateMoreMemory, + !wasm::NativeModule::kNeedsFarJumpsBetweenCodeSpaces || + FLAG_wasm_far_jump_table, std::move(module)); } @@ -688,7 +688,7 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule( can_request_more, std::move(module)); base::MutexGuard lock(&mutex_); auto pair = native_modules_.insert(std::make_pair( - native_module.get(), base::make_unique<NativeModuleInfo>())); + native_module.get(), std::make_unique<NativeModuleInfo>())); DCHECK(pair.second); // inserted new entry. pair.first->second.get()->isolates.insert(isolate); isolates_[isolate]->native_modules.insert(native_module.get()); @@ -768,7 +768,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates( DCHECK_EQ(1, isolates_.count(isolate)); IsolateInfo* info = isolates_[isolate].get(); info->foreground_task_runner->PostTask( - base::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module)); + std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module)); } } @@ -880,7 +880,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) { for (auto* isolate : native_modules_[entry.first]->isolates) { auto& gc_task = current_gc_info_->outstanding_isolates[isolate]; if (!gc_task) { - auto new_task = base::make_unique<WasmGCForegroundTask>(isolate); + auto new_task = std::make_unique<WasmGCForegroundTask>(isolate); gc_task = new_task.get(); DCHECK_EQ(1, isolates_.count(isolate)); isolates_[isolate]->foreground_task_runner->PostTask( diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h index 401cf2b8805..424f85fa798 100644 --- a/chromium/v8/src/wasm/wasm-engine.h +++ b/chromium/v8/src/wasm/wasm-engine.h @@ -10,7 +10,6 @@ #include "src/tasks/cancelable-task.h" #include "src/wasm/wasm-code-manager.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-tier.h" #include "src/zone/accounting-allocator.h" @@ -23,6 +22,7 @@ class CompilationStatistics; class HeapNumber; class WasmInstanceObject; class WasmModuleObject; +class JSArrayBuffer; namespace wasm { @@ -120,8 +120,6 @@ class V8_EXPORT_PRIVATE WasmEngine { WasmCodeManager* code_manager() { return &code_manager_; } - WasmMemoryTracker* memory_tracker() { return &memory_tracker_; } - AccountingAllocator* allocator() { return &allocator_; } // Compilation statistics for TurboFan compilations. @@ -156,8 +154,8 @@ class V8_EXPORT_PRIVATE WasmEngine { template <typename T, typename... Args> std::unique_ptr<T> NewBackgroundCompileTask(Args&&... args) { - return base::make_unique<T>(&background_compile_task_manager_, - std::forward<Args>(args)...); + return std::make_unique<T>(&background_compile_task_manager_, + std::forward<Args>(args)...); } // Trigger code logging for this WasmCode in all Isolates which have access to @@ -243,7 +241,6 @@ class V8_EXPORT_PRIVATE WasmEngine { // calling this method. void PotentiallyFinishCurrentGC(); - WasmMemoryTracker memory_tracker_; WasmCodeManager code_manager_; AccountingAllocator allocator_; diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc index 9ca45183ef6..13c159c0efc 100644 --- a/chromium/v8/src/wasm/wasm-external-refs.cc +++ b/chromium/v8/src/wasm/wasm-external-refs.cc @@ -247,6 +247,10 @@ int32_t int64_mod_wrapper(Address data) { if (divisor == 0) { return 0; } + if (divisor == -1 && dividend == std::numeric_limits<int64_t>::min()) { + WriteUnalignedValue<int64_t>(data, 0); + return 1; + } WriteUnalignedValue<int64_t>(data, dividend % divisor); return 1; } diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h index 36f9ebd8a46..b18fa90acf7 100644 --- a/chromium/v8/src/wasm/wasm-feature-flags.h +++ b/chromium/v8/src/wasm/wasm-feature-flags.h @@ -10,12 +10,12 @@ V(eh, "exception handling opcodes", false) \ V(threads, "thread opcodes", false) \ V(simd, "SIMD opcodes", false) \ - V(bigint, "JS BigInt support", false) \ V(return_call, "return call opcodes", false) \ V(compilation_hints, "compilation hints section", false) #define FOREACH_WASM_STAGING_FEATURE_FLAG(V) \ V(anyref, "anyref opcodes", false) \ + V(bigint, "JS BigInt support", false) \ V(type_reflection, "wasm type reflection in JS", false) #define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) \ diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc index 299128860da..7c41f6a8e02 100644 --- a/chromium/v8/src/wasm/wasm-interpreter.cc +++ b/chromium/v8/src/wasm/wasm-interpreter.cc @@ -1128,13 +1128,41 @@ class ThreadImpl { }; public: + // The {ReferenceStackScope} sets up the reference stack in the interpreter. + // The handle to the reference stack has to be re-initialized everytime we + // call into the interpreter because there is no HandleScope that could + // contain that handle. A global handle is not an option because it can lead + // to a memory leak if a reference to the {WasmInstanceObject} is put onto the + // reference stack and thereby transitively keeps the interpreter alive. + class ReferenceStackScope { + public: + explicit ReferenceStackScope(ThreadImpl* impl) : impl_(impl) { + // The reference stack is already initialized, we don't have to do + // anything. + if (!impl_->reference_stack_cell_.is_null()) return; + impl_->reference_stack_cell_ = handle( + impl_->instance_object_->debug_info().interpreter_reference_stack(), + impl_->isolate_); + // We initialized the reference stack, so we also have to reset it later. + do_reset_stack_ = true; + } + + ~ReferenceStackScope() { + if (do_reset_stack_) { + impl_->reference_stack_cell_ = Handle<Cell>(); + } + } + + private: + ThreadImpl* impl_; + bool do_reset_stack_ = false; + }; + ThreadImpl(Zone* zone, CodeMap* codemap, - Handle<WasmInstanceObject> instance_object, - Handle<Cell> reference_stack_cell) + Handle<WasmInstanceObject> instance_object) : codemap_(codemap), isolate_(instance_object->GetIsolate()), instance_object_(instance_object), - reference_stack_cell_(reference_stack_cell), frames_(zone), activations_(zone) {} @@ -1394,6 +1422,7 @@ class ThreadImpl { }; friend class InterpretedFrameImpl; + friend class ReferenceStackScope; CodeMap* codemap_; Isolate* isolate_; @@ -1663,9 +1692,15 @@ class ThreadImpl { template <typename ctype, typename mtype> bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, - int* const len, MachineRepresentation rep) { - MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), - sizeof(ctype)); + int* const len, MachineRepresentation rep, + int prefix_len = 0) { + // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that + // the memarg is 1 byte from pc. We don't increment pc at the caller, + // because we want to keep pc to the start of the operation to keep trap + // reporting and tracing accurate, otherwise those will report at the middle + // of an opcode. + MemoryAccessImmediate<Decoder::kNoValidate> imm( + decoder, code->at(pc + prefix_len), sizeof(ctype)); uint32_t index = Pop().to<uint32_t>(); Address addr = BoundsCheckMem<mtype>(imm.offset, index); if (!addr) { @@ -1690,9 +1725,15 @@ class ThreadImpl { template <typename ctype, typename mtype> bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, - int* const len, MachineRepresentation rep) { - MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), - sizeof(ctype)); + int* const len, MachineRepresentation rep, + int prefix_len = 0) { + // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that + // the memarg is 1 byte from pc. We don't increment pc at the caller, + // because we want to keep pc to the start of the operation to keep trap + // reporting and tracing accurate, otherwise those will report at the middle + // of an opcode. + MemoryAccessImmediate<Decoder::kNoValidate> imm( + decoder, code->at(pc + prefix_len), sizeof(ctype)); ctype val = Pop().to<ctype>(); uint32_t index = Pop().to<uint32_t>(); @@ -2223,9 +2264,22 @@ class ThreadImpl { EXTRACT_LANE_CASE(F32x4, f32x4) EXTRACT_LANE_CASE(I64x2, i64x2) EXTRACT_LANE_CASE(I32x4, i32x4) - EXTRACT_LANE_CASE(I16x8, i16x8) - EXTRACT_LANE_CASE(I8x16, i8x16) #undef EXTRACT_LANE_CASE +#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \ + case kExpr##format##ExtractLane##sign: { \ + SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \ + *len += 1; \ + WasmValue val = Pop(); \ + Simd128 s = val.to_s128(); \ + auto ss = s.to_##name(); \ + Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \ + return true; \ + } + EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t) + EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t) + EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t) + EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t) +#undef EXTRACT_LANE_EXTEND_CASE #define BINOP_CASE(op, name, stype, count, expr) \ case kExpr##op: { \ WasmValue v2 = Pop(); \ @@ -2317,8 +2371,10 @@ class ThreadImpl { } UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a)) UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a) + UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a)) UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a)) UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a) + UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a)) UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a)) UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a)) UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a)) @@ -2431,10 +2487,12 @@ class ThreadImpl { #undef REPLACE_LANE_CASE case kExprS128LoadMem: return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len, - MachineRepresentation::kSimd128); + MachineRepresentation::kSimd128, + /*prefix_len=*/1); case kExprS128StoreMem: return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len, - MachineRepresentation::kSimd128); + MachineRepresentation::kSimd128, + /*prefix_len=*/1); #define SHIFT_CASE(op, name, stype, count, expr) \ case kExpr##op: { \ uint32_t shift = Pop().to<uint32_t>(); \ @@ -2448,19 +2506,26 @@ class ThreadImpl { Push(WasmValue(Simd128(res))); \ return true; \ } - SHIFT_CASE(I64x2Shl, i64x2, int2, 2, static_cast<uint64_t>(a) << shift) - SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> shift) - SHIFT_CASE(I64x2ShrU, i64x2, int2, 2, static_cast<uint64_t>(a) >> shift) - SHIFT_CASE(I32x4Shl, i32x4, int4, 4, static_cast<uint32_t>(a) << shift) - SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> shift) - SHIFT_CASE(I32x4ShrU, i32x4, int4, 4, static_cast<uint32_t>(a) >> shift) - SHIFT_CASE(I16x8Shl, i16x8, int8, 8, static_cast<uint16_t>(a) << shift) - SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> shift) - SHIFT_CASE(I16x8ShrU, i16x8, int8, 8, static_cast<uint16_t>(a) >> shift) - SHIFT_CASE(I8x16Shl, i8x16, int16, 16, static_cast<uint8_t>(a) << shift) - SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> shift) + SHIFT_CASE(I64x2Shl, i64x2, int2, 2, + static_cast<uint64_t>(a) << (shift % 64)) + SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64)) + SHIFT_CASE(I64x2ShrU, i64x2, int2, 2, + static_cast<uint64_t>(a) >> (shift % 64)) + SHIFT_CASE(I32x4Shl, i32x4, int4, 4, + static_cast<uint32_t>(a) << (shift % 32)) + SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32)) + SHIFT_CASE(I32x4ShrU, i32x4, int4, 4, + static_cast<uint32_t>(a) >> (shift % 32)) + SHIFT_CASE(I16x8Shl, i16x8, int8, 8, + static_cast<uint16_t>(a) << (shift % 16)) + SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16)) + SHIFT_CASE(I16x8ShrU, i16x8, int8, 8, + static_cast<uint16_t>(a) >> (shift % 16)) + SHIFT_CASE(I8x16Shl, i8x16, int16, 16, + static_cast<uint8_t>(a) << (shift % 8)) + SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8)) SHIFT_CASE(I8x16ShrU, i8x16, int16, 16, - static_cast<uint8_t>(a) >> shift) + static_cast<uint8_t>(a) >> (shift % 8)) #undef SHIFT_CASE #define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \ expr) \ @@ -2564,6 +2629,18 @@ class ThreadImpl { ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4) ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8) #undef ADD_HORIZ_CASE + case kExprS8x16Swizzle: { + int16 v2 = Pop().to_s128().to_i8x16(); + int16 v1 = Pop().to_s128().to_i8x16(); + int16 res; + for (size_t i = 0; i < kSimd128Size; ++i) { + int lane = v2.val[LANE(i, v1)]; + res.val[LANE(i, v1)] = + lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0; + } + Push(WasmValue(Simd128(res))); + return true; + } case kExprS8x16Shuffle: { Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); @@ -2604,6 +2681,23 @@ class ThreadImpl { REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &) REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &) #undef REDUCTION_CASE +#define QFM_CASE(op, name, stype, count, operation) \ + case kExpr##op: { \ + stype c = Pop().to_s128().to_##name(); \ + stype b = Pop().to_s128().to_##name(); \ + stype a = Pop().to_s128().to_##name(); \ + stype res; \ + for (size_t i = 0; i < count; i++) { \ + res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \ + } \ + Push(WasmValue(Simd128(res))); \ + return true; \ + } + QFM_CASE(F32x4Qfma, f32x4, float4, 4, +) + QFM_CASE(F32x4Qfms, f32x4, float4, 4, -) + QFM_CASE(F64x2Qfma, f64x2, float2, 2, +) + QFM_CASE(F64x2Qfms, f64x2, float2, 2, -) +#undef QFM_CASE default: return false; } @@ -2658,7 +2752,7 @@ class ThreadImpl { WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)), isolate_); uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception); - Handle<Object> exception_object = + Handle<WasmExceptionPackage> exception_object = WasmExceptionPackage::New(isolate_, exception_tag, encoded_size); Handle<FixedArray> encoded_values = Handle<FixedArray>::cast( WasmExceptionPackage::GetExceptionValues(isolate_, exception_object)); @@ -2727,8 +2821,9 @@ class ThreadImpl { // Determines whether the given exception has a tag matching the expected tag // for the given index within the exception table of the current instance. bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) { - Handle<Object> caught_tag = - WasmExceptionPackage::GetExceptionTag(isolate_, exception_object); + if (!exception_object->IsWasmExceptionPackage(isolate_)) return false; + Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag( + isolate_, Handle<WasmExceptionPackage>::cast(exception_object)); Handle<Object> expected_tag = handle(instance_object_->exceptions_table().get(index), isolate_); DCHECK(expected_tag->IsWasmExceptionTag()); @@ -2755,8 +2850,9 @@ class ThreadImpl { // the encoded values match the expected signature of the exception. void DoUnpackException(const WasmException* exception, Handle<Object> exception_object) { - Handle<FixedArray> encoded_values = Handle<FixedArray>::cast( - WasmExceptionPackage::GetExceptionValues(isolate_, exception_object)); + Handle<FixedArray> encoded_values = + Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues( + isolate_, Handle<WasmExceptionPackage>::cast(exception_object))); // Decode the exception values from the given exception package and push // them onto the operand stack. This encoding has to be in sync with other // backends so that exceptions can be passed between them. @@ -3054,14 +3150,14 @@ class ThreadImpl { len = 1 + imm.length; break; } - case kExprGetLocal: { + case kExprLocalGet: { LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); HandleScope handle_scope(isolate_); // Avoid leaking handles. Push(GetStackValue(frames_.back().sp + imm.index)); len = 1 + imm.length; break; } - case kExprSetLocal: { + case kExprLocalSet: { LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); HandleScope handle_scope(isolate_); // Avoid leaking handles. WasmValue val = Pop(); @@ -3069,7 +3165,7 @@ class ThreadImpl { len = 1 + imm.length; break; } - case kExprTeeLocal: { + case kExprLocalTee: { LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); HandleScope handle_scope(isolate_); // Avoid leaking handles. WasmValue val = Pop(); @@ -3231,7 +3327,7 @@ class ThreadImpl { } } break; - case kExprGetGlobal: { + case kExprGlobalGet: { GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); HandleScope handle_scope(isolate_); @@ -3239,7 +3335,7 @@ class ThreadImpl { len = 1 + imm.length; break; } - case kExprSetGlobal: { + case kExprGlobalSet: { GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); const WasmGlobal* global = &module()->globals[imm.index]; @@ -3770,7 +3866,8 @@ class ThreadImpl { static WasmCode* GetTargetCode(Isolate* isolate, Address target) { WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager(); NativeModule* native_module = code_manager->LookupNativeModule(target); - if (native_module->is_jump_table_slot(target)) { + WasmCode* code = native_module->Lookup(target); + if (code->kind() == WasmCode::kJumpTable) { uint32_t func_index = native_module->GetFunctionIndexFromJumpTableSlot(target); @@ -3784,7 +3881,6 @@ class ThreadImpl { return native_module->GetCode(func_index); } - WasmCode* code = native_module->Lookup(target); DCHECK_EQ(code->instruction_start(), target); return code; } @@ -3888,12 +3984,14 @@ class InterpretedFrameImpl { } WasmValue GetLocalValue(int index) const { + ThreadImpl::ReferenceStackScope stack_scope(thread_); DCHECK_LE(0, index); DCHECK_GT(GetLocalCount(), index); return thread_->GetStackValue(static_cast<int>(frame()->sp) + index); } WasmValue GetStackValue(int index) const { + ThreadImpl::ReferenceStackScope stack_scope(thread_); DCHECK_LE(0, index); // Index must be within the number of stack values of this frame. DCHECK_GT(GetStackHeight(), index); @@ -3941,21 +4039,33 @@ const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) { // translation unit anyway. //============================================================================ WasmInterpreter::State WasmInterpreter::Thread::state() { - return ToImpl(this)->state(); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->state(); } void WasmInterpreter::Thread::InitFrame(const WasmFunction* function, WasmValue* args) { - ToImpl(this)->InitFrame(function, args); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + impl->InitFrame(function, args); } WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) { - return ToImpl(this)->Run(num_steps); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->Run(num_steps); } void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); } -void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); } +void WasmInterpreter::Thread::Reset() { + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->Reset(); +} WasmInterpreter::Thread::ExceptionHandlingResult WasmInterpreter::Thread::RaiseException(Isolate* isolate, Handle<Object> exception) { - return ToImpl(this)->RaiseException(isolate, exception); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->RaiseException(isolate, exception); } pc_t WasmInterpreter::Thread::GetBreakpointPc() { return ToImpl(this)->GetBreakpointPc(); @@ -3969,7 +4079,9 @@ WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) { return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index))); } WasmValue WasmInterpreter::Thread::GetReturnValue(int index) { - return ToImpl(this)->GetReturnValue(index); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->GetReturnValue(index); } TrapReason WasmInterpreter::Thread::GetTrapReason() { return ToImpl(this)->GetTrapReason(); @@ -3996,41 +4108,38 @@ uint32_t WasmInterpreter::Thread::NumActivations() { return ToImpl(this)->NumActivations(); } uint32_t WasmInterpreter::Thread::StartActivation() { - return ToImpl(this)->StartActivation(); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->StartActivation(); } void WasmInterpreter::Thread::FinishActivation(uint32_t id) { - ToImpl(this)->FinishActivation(id); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + impl->FinishActivation(id); } uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) { - return ToImpl(this)->ActivationFrameBase(id); + ThreadImpl* impl = ToImpl(this); + ThreadImpl::ReferenceStackScope stack_scope(impl); + return impl->ActivationFrameBase(id); } //============================================================================ // The implementation details of the interpreter. //============================================================================ -class WasmInterpreterInternals : public ZoneObject { +class WasmInterpreterInternals { public: // Create a copy of the module bytes for the interpreter, since the passed // pointer might be invalidated after constructing the interpreter. const ZoneVector<uint8_t> module_bytes_; CodeMap codemap_; - ZoneVector<ThreadImpl> threads_; + std::vector<ThreadImpl> threads_; WasmInterpreterInternals(Zone* zone, const WasmModule* module, const ModuleWireBytes& wire_bytes, Handle<WasmInstanceObject> instance_object) : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone), - codemap_(module, module_bytes_.data(), zone), - threads_(zone) { - Isolate* isolate = instance_object->GetIsolate(); - Handle<Cell> reference_stack = isolate->global_handles()->Create( - *isolate->factory()->NewCell(isolate->factory()->empty_fixed_array())); - threads_.emplace_back(zone, &codemap_, instance_object, reference_stack); - } - - ~WasmInterpreterInternals() { - DCHECK_EQ(1, threads_.size()); - GlobalHandles::Destroy(threads_[0].reference_stack_cell().location()); + codemap_(module, module_bytes_.data(), zone) { + threads_.emplace_back(zone, &codemap_, instance_object); } }; @@ -4059,10 +4168,12 @@ WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module, const ModuleWireBytes& wire_bytes, Handle<WasmInstanceObject> instance_object) : zone_(isolate->allocator(), ZONE_NAME), - internals_(new (&zone_) WasmInterpreterInternals( + internals_(new WasmInterpreterInternals( &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {} -WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); } +// The destructor is here so we can forward declare {WasmInterpreterInternals} +// used in the {unique_ptr} in the header. +WasmInterpreter::~WasmInterpreter() {} void WasmInterpreter::Run() { internals_->threads_[0].Run(); } diff --git a/chromium/v8/src/wasm/wasm-interpreter.h b/chromium/v8/src/wasm/wasm-interpreter.h index da0ce01835c..4eb0675aba8 100644 --- a/chromium/v8/src/wasm/wasm-interpreter.h +++ b/chromium/v8/src/wasm/wasm-interpreter.h @@ -5,6 +5,8 @@ #ifndef V8_WASM_WASM_INTERPRETER_H_ #define V8_WASM_WASM_INTERPRETER_H_ +#include <memory> + #include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-value.h" #include "src/zone/zone-containers.h" @@ -131,7 +133,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter { // Stack inspection and modification. pc_t GetBreakpointPc(); - // TODO(clemensh): Make this uint32_t. + // TODO(clemensb): Make this uint32_t. int GetFrameCount(); // The InterpretedFrame is only valid as long as the Thread is paused. FramePtr GetFrame(int index); @@ -170,9 +172,12 @@ class V8_EXPORT_PRIVATE WasmInterpreter { uint32_t ActivationFrameBase(uint32_t activation_id); }; + MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInterpreter); + WasmInterpreter(Isolate* isolate, const WasmModule* module, const ModuleWireBytes& wire_bytes, Handle<WasmInstanceObject> instance); + ~WasmInterpreter(); //========================================================================== @@ -214,7 +219,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter { private: Zone zone_; - WasmInterpreterInternals* internals_; + std::unique_ptr<WasmInterpreterInternals> internals_; }; } // namespace wasm diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc index f10f5ff2bfe..80d2fcb0590 100644 --- a/chromium/v8/src/wasm/wasm-js.cc +++ b/chromium/v8/src/wasm/wasm-js.cc @@ -26,7 +26,6 @@ #include "src/wasm/streaming-decoder.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-serialization.h" @@ -207,20 +206,20 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes( if (source->IsArrayBuffer()) { // A raw array buffer was passed. Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source); - ArrayBuffer::Contents contents = buffer->GetContents(); + auto backing_store = buffer->GetBackingStore(); - start = reinterpret_cast<const uint8_t*>(contents.Data()); - length = contents.ByteLength(); + start = reinterpret_cast<const uint8_t*>(backing_store->Data()); + length = backing_store->ByteLength(); *is_shared = buffer->IsSharedArrayBuffer(); } else if (source->IsTypedArray()) { // A TypedArray was passed. Local<TypedArray> array = Local<TypedArray>::Cast(source); Local<ArrayBuffer> buffer = array->Buffer(); - ArrayBuffer::Contents contents = buffer->GetContents(); + auto backing_store = buffer->GetBackingStore(); - start = - reinterpret_cast<const uint8_t*>(contents.Data()) + array->ByteOffset(); + start = reinterpret_cast<const uint8_t*>(backing_store->Data()) + + array->ByteOffset(); length = array->ByteLength(); *is_shared = buffer->IsSharedArrayBuffer(); } else { @@ -434,8 +433,8 @@ class AsyncInstantiateCompileResultResolver finished_ = true; isolate_->wasm_engine()->AsyncInstantiate( isolate_, - base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_, - result), + std::make_unique<InstantiateBytesResultResolver>(isolate_, promise_, + result), result, maybe_imports_); } @@ -597,7 +596,7 @@ void WebAssemblyCompileStreaming( i::Handle<i::Managed<WasmStreaming>> data = i::Managed<WasmStreaming>::Allocate( i_isolate, 0, - base::make_unique<WasmStreaming::WasmStreamingImpl>( + std::make_unique<WasmStreaming::WasmStreamingImpl>( isolate, kAPIMethodName, resolver)); DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback()); @@ -876,7 +875,7 @@ void WebAssemblyInstantiateStreaming( i::Handle<i::Managed<WasmStreaming>> data = i::Managed<WasmStreaming>::Allocate( i_isolate, 0, - base::make_unique<WasmStreaming::WasmStreamingImpl>( + std::make_unique<WasmStreaming::WasmStreamingImpl>( isolate, kAPIMethodName, compilation_resolver)); DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback()); @@ -1156,7 +1155,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) { return; } - bool is_shared_memory = false; + auto shared = i::SharedFlag::kNotShared; auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); if (enabled_features.threads) { // Shared property of descriptor @@ -1165,10 +1164,11 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) { descriptor->Get(context, shared_key); v8::Local<v8::Value> value; if (maybe_value.ToLocal(&value)) { - is_shared_memory = value->BooleanValue(isolate); + shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared + : i::SharedFlag::kNotShared; } // Throw TypeError if shared is true, and the descriptor has no "maximum" - if (is_shared_memory && maximum == -1) { + if (shared == i::SharedFlag::kShared && maximum == -1) { thrower.TypeError( "If shared is true, maximum property should be defined."); return; @@ -1177,13 +1177,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) { i::Handle<i::JSObject> memory_obj; if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial), - static_cast<uint32_t>(maximum), - is_shared_memory) + static_cast<uint32_t>(maximum), shared) .ToHandle(&memory_obj)) { thrower.RangeError("could not allocate memory"); return; } - if (is_shared_memory) { + if (shared == i::SharedFlag::kShared) { i::Handle<i::JSArrayBuffer> buffer( i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(), i_isolate); @@ -2034,8 +2033,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { JSFunction::EnsureHasInitialMap(module_constructor); Handle<JSObject> module_proto( JSObject::cast(module_constructor->instance_prototype()), isolate); - Handle<Map> module_map = - isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize); + Handle<Map> module_map = isolate->factory()->NewMap( + i::WASM_MODULE_OBJECT_TYPE, WasmModuleObject::kSize); JSFunction::SetInitialMap(module_constructor, module_map, module_proto); InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports, 1); @@ -2055,7 +2054,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSObject> instance_proto( JSObject::cast(instance_constructor->instance_prototype()), isolate); Handle<Map> instance_map = isolate->factory()->NewMap( - i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize); + i::WASM_INSTANCE_OBJECT_TYPE, WasmInstanceObject::kSize); JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto); InstallGetter(isolate, instance_proto, "exports", WebAssemblyInstanceGetExports); @@ -2075,8 +2074,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { JSFunction::EnsureHasInitialMap(table_constructor); Handle<JSObject> table_proto( JSObject::cast(table_constructor->instance_prototype()), isolate); - Handle<Map> table_map = - isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize); + Handle<Map> table_map = isolate->factory()->NewMap(i::WASM_TABLE_OBJECT_TYPE, + WasmTableObject::kSize); JSFunction::SetInitialMap(table_constructor, table_map, table_proto); InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength); InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1); @@ -2096,8 +2095,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { JSFunction::EnsureHasInitialMap(memory_constructor); Handle<JSObject> memory_proto( JSObject::cast(memory_constructor->instance_prototype()), isolate); - Handle<Map> memory_map = - isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize); + Handle<Map> memory_map = isolate->factory()->NewMap( + i::WASM_MEMORY_OBJECT_TYPE, WasmMemoryObject::kSize); JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto); InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1); InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer); @@ -2115,8 +2114,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { JSFunction::EnsureHasInitialMap(global_constructor); Handle<JSObject> global_proto( JSObject::cast(global_constructor->instance_prototype()), isolate); - Handle<Map> global_map = - isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize); + Handle<Map> global_map = isolate->factory()->NewMap( + i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kSize); JSFunction::SetInitialMap(global_constructor, global_map, global_proto); InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0); InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue, @@ -2137,7 +2136,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSObject> exception_proto( JSObject::cast(exception_constructor->instance_prototype()), isolate); Handle<Map> exception_map = isolate->factory()->NewMap( - i::WASM_EXCEPTION_TYPE, WasmExceptionObject::kSize); + i::WASM_EXCEPTION_OBJECT_TYPE, WasmExceptionObject::kSize); JSFunction::SetInitialMap(exception_constructor, exception_map, exception_proto); } diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h index c7c95aca26b..6dc652aba2d 100644 --- a/chromium/v8/src/wasm/wasm-limits.h +++ b/chromium/v8/src/wasm/wasm-limits.h @@ -36,6 +36,7 @@ constexpr size_t kV8MaxWasmFunctionLocals = 50000; constexpr size_t kV8MaxWasmFunctionParams = 1000; constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000; constexpr size_t kV8MaxWasmFunctionReturns = 1; +constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520; // Don't use this limit directly, but use the value of FLAG_wasm_max_table_size. constexpr size_t kV8MaxWasmTableSize = 10000000; constexpr size_t kV8MaxWasmTableInitEntries = 10000000; diff --git a/chromium/v8/src/wasm/wasm-memory.cc b/chromium/v8/src/wasm/wasm-memory.cc index f2036495425..bbb0d67f9c3 100644 --- a/chromium/v8/src/wasm/wasm-memory.cc +++ b/chromium/v8/src/wasm/wasm-memory.cc @@ -566,7 +566,7 @@ MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate, WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker(); - // Set by TryAllocateBackingStore or GetEmptyBackingStore + // Set by TryAllocateBackingStore. void* allocation_base = nullptr; size_t allocation_length = 0; diff --git a/chromium/v8/src/wasm/wasm-memory.h b/chromium/v8/src/wasm/wasm-memory.h deleted file mode 100644 index ecb6203ac5a..00000000000 --- a/chromium/v8/src/wasm/wasm-memory.h +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_WASM_WASM_MEMORY_H_ -#define V8_WASM_WASM_MEMORY_H_ - -#include <atomic> -#include <unordered_map> -#include <unordered_set> - -#include "src/base/platform/mutex.h" -#include "src/flags/flags.h" -#include "src/handles/handles.h" -#include "src/objects/js-array-buffer.h" - -namespace v8 { -namespace internal { -namespace wasm { - -// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory -// and wasm code. There is an upper limit on the total reserved memory which is -// checked by this class. Allocations are stored so we can look them up when an -// array buffer dies and figure out the reservation and allocation bounds for -// that buffer. -class WasmMemoryTracker { - public: - WasmMemoryTracker() = default; - V8_EXPORT_PRIVATE ~WasmMemoryTracker(); - - // ReserveAddressSpace attempts to increase the reserved address space counter - // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead - // and reserve {num_bytes} bytes), false otherwise. - bool ReserveAddressSpace(size_t num_bytes); - - void RegisterAllocation(Isolate* isolate, void* allocation_base, - size_t allocation_length, void* buffer_start, - size_t buffer_length); - - struct SharedMemoryObjectState { - Handle<WasmMemoryObject> memory_object; - Isolate* isolate; - - SharedMemoryObjectState() = default; - SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object, - Isolate* isolate) - : memory_object(memory_object), isolate(isolate) {} - }; - - struct AllocationData { - void* allocation_base = nullptr; - size_t allocation_length = 0; - void* buffer_start = nullptr; - size_t buffer_length = 0; - bool is_shared = false; - // Wasm memories are growable by default, this will be false only when - // shared with an asmjs module. - bool is_growable = true; - - // Track Wasm Memory instances across isolates, this is populated on - // PostMessage using persistent handles for memory objects. - std::vector<WasmMemoryTracker::SharedMemoryObjectState> - memory_object_vector; - - private: - AllocationData() = default; - AllocationData(void* allocation_base, size_t allocation_length, - void* buffer_start, size_t buffer_length) - : allocation_base(allocation_base), - allocation_length(allocation_length), - buffer_start(buffer_start), - buffer_length(buffer_length) { - DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base), - reinterpret_cast<uintptr_t>(buffer_start)); - DCHECK_GE( - reinterpret_cast<uintptr_t>(allocation_base) + allocation_length, - reinterpret_cast<uintptr_t>(buffer_start)); - DCHECK_GE( - reinterpret_cast<uintptr_t>(allocation_base) + allocation_length, - reinterpret_cast<uintptr_t>(buffer_start) + buffer_length); - } - - friend WasmMemoryTracker; - }; - - // Allow tests to allocate a backing store the same way as we do it for - // WebAssembly memory. This is used in unit tests for trap handler to - // generate the same signals/exceptions for invalid memory accesses as - // we would get with WebAssembly memory. - V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting( - Heap* heap, size_t size, void** allocation_base, - size_t* allocation_length); - - // Free memory allocated with TryAllocateBackingStoreForTesting. - V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory, - void* buffer_start); - - // Decreases the amount of reserved address space. - void ReleaseReservation(size_t num_bytes); - - V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start); - - bool IsWasmSharedMemory(const void* buffer_start); - - // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the - // buffer is not tracked. - V8_EXPORT_PRIVATE const AllocationData* FindAllocationData( - const void* buffer_start); - - // Free Memory allocated by the Wasm memory tracker - bool FreeWasmMemory(Isolate* isolate, const void* buffer_start); - - void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer); - - bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer); - - // When WebAssembly.Memory is transferred over PostMessage, register the - // allocation as shared and track the memory objects that will need - // updating if memory is resized. - void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object, - Isolate* isolate); - - // This method is called when the underlying backing store is grown, but - // instances that share the backing_store have not yet been updated. - void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer, - size_t new_size); - - // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects - // and instances that share the memory objects after a Grow call. - void UpdateSharedMemoryInstances(Isolate* isolate); - - // Due to timing of when buffers are garbage collected, vs. when isolate - // object handles are destroyed, it is possible to leak global handles. To - // avoid this, cleanup any global handles on isolate destruction if any exist. - void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate); - - // Allocation results are reported to UMA - // - // See wasm_memory_allocation_result in counters.h - enum class AllocationStatus { - kSuccess, // Succeeded on the first try - - kSuccessAfterRetry, // Succeeded after garbage collection - - kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address - // space limit - - kOtherFailure // Failed for an unknown reason - }; - - private: - // Helper methods to free memory only if not shared by other isolates, memory - // objects. - void FreeMemoryIfNotShared_Locked(Isolate* isolate, - const void* backing_store); - bool CanFreeSharedMemory_Locked(const void* backing_store); - void RemoveSharedBufferState_Locked(Isolate* isolate, - const void* backing_store); - - // Registers the allocation as shared, and tracks all the memory objects - // associates with this allocation across isolates. - void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object, - Isolate* isolate); - - // Map the new size after grow to the buffer backing store, so that instances - // and memory objects that share the WebAssembly.Memory across isolates can - // be updated.. - void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer, - size_t new_size); - - // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory - // objects that share this buffer. - void TriggerSharedGrowInterruptOnAllIsolates_Locked( - Handle<JSArrayBuffer> old_buffer); - - // When isolates hit a stack check, update the memory objects associated with - // that isolate. - void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate, - void* backing_store, - size_t new_size); - - // Check if all the isolates that share a backing_store have hit a stack - // check. If a stack check is hit, and the backing store is pending grow, - // this isolate will have updated memory objects. - bool AreAllIsolatesUpdated_Locked(const void* backing_store); - - // If a grow call is made to a buffer with a pending grow, and all the - // isolates that share this buffer have not hit a StackCheck, clear the set of - // already updated instances so they can be updated with the new size on the - // most recent grow call. - void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store); - - // Helper functions to update memory objects on grow, and maintain state for - // which isolates hit a stack check. - void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate, - void* backing_store, - size_t new_size); - bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate, - const void* backing_store); - - // Destroy global handles to memory objects, and remove backing store from - // isolates_per_buffer on Free. - void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - Isolate* isolate, const void* backing_store); - void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - const void* backing_store); - - void RemoveIsolateFromBackingStore_Locked(Isolate* isolate, - const void* backing_store); - - // Removes an allocation from the tracker. - AllocationData ReleaseAllocation_Locked(Isolate* isolate, - const void* buffer_start); - - // Clients use a two-part process. First they "reserve" the address space, - // which signifies an intent to actually allocate it. This determines whether - // doing the allocation would put us over our limit. Once there is a - // reservation, clients can do the allocation and register the result. - // - // We should always have: - // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit - std::atomic<size_t> reserved_address_space_{0}; - - // Used to protect access to the allocated address space counter and - // allocation map. This is needed because Wasm memories can be freed on - // another thread by the ArrayBufferTracker. - base::Mutex mutex_; - - size_t allocated_address_space_ = 0; - - ////////////////////////////////////////////////////////////////////////////// - // Protected by {mutex_}: - - // Track Wasm memory allocation information. This is keyed by the start of the - // buffer, rather than by the start of the allocation. - std::unordered_map<const void*, AllocationData> allocations_; - - // Maps each buffer to the isolates that share the backing store. - std::unordered_map<const void*, std::unordered_set<Isolate*>> - isolates_per_buffer_; - - // Maps which isolates have had a grow interrupt handled on the buffer. This - // is maintained to ensure that the instances are updated with the right size - // on Grow. - std::unordered_map<const void*, std::unordered_set<Isolate*>> - isolates_updated_on_grow_; - - // Maps backing stores(void*) to the size of the underlying memory in - // (size_t). An entry to this map is made on a grow call to the corresponding - // backing store. On consecutive grow calls to the same backing store, - // the size entry is updated. This entry is made right after the mprotect - // call to change the protections on a backing_store, so the memory objects - // have not been updated yet. The backing store entry in this map is erased - // when all the memory objects, or instances that share this backing store - // have their bounds updated. - std::unordered_map<void*, size_t> grow_update_map_; - - // End of fields protected by {mutex_}. - ////////////////////////////////////////////////////////////////////////////// - - DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker); -}; - -// Attempts to allocate an array buffer with guard regions suitable for trap -// handling. If address space is not available, it will return a buffer with -// mini-guards that will require bounds checks. -V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*, - size_t size); - -// Attempts to allocate a SharedArrayBuffer with guard regions suitable for -// trap handling. If address space is not available, it will try to reserve -// up to the maximum for that memory. If all else fails, it will return a -// buffer with mini-guards of initial size. -V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer( - Isolate*, size_t initial_size, size_t max_size); - -Handle<JSArrayBuffer> SetupArrayBuffer( - Isolate*, void* backing_store, size_t size, bool is_external, - SharedFlag shared = SharedFlag::kNotShared); - -V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate, - Handle<JSArrayBuffer> buffer, - bool free_memory); - -} // namespace wasm -} // namespace internal -} // namespace v8 - -#endif // V8_WASM_WASM_MEMORY_H_ diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc index d3874e1a344..0bbc104070d 100644 --- a/chromium/v8/src/wasm/wasm-module-builder.cc +++ b/chromium/v8/src/wasm/wasm-module-builder.cc @@ -71,15 +71,15 @@ uint32_t WasmFunctionBuilder::AddLocal(ValueType type) { } void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) { - EmitWithU32V(kExprGetLocal, local_index); + EmitWithU32V(kExprLocalGet, local_index); } void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) { - EmitWithU32V(kExprSetLocal, local_index); + EmitWithU32V(kExprLocalSet, local_index); } void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) { - EmitWithU32V(kExprTeeLocal, local_index); + EmitWithU32V(kExprLocalTee, local_index); } void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) { @@ -505,7 +505,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const { buffer->write_f64(global.init.val.f64_const); break; case WasmInitExpr::kGlobalIndex: - buffer->write_u8(kExprGetGlobal); + buffer->write_u8(kExprGlobalGet); buffer->write_u32v(global.init.val.global_index); break; case WasmInitExpr::kRefNullConst: diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc index 5a10368a8b6..033f12ae241 100644 --- a/chromium/v8/src/wasm/wasm-module.cc +++ b/chromium/v8/src/wasm/wasm-module.cc @@ -22,6 +22,7 @@ #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-result.h" +#include "src/wasm/wasm-text.h" namespace v8 { namespace internal { @@ -58,6 +59,57 @@ int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig, return result; } +// static +int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index) { + const std::vector<WasmFunction>& functions = module->functions; + if (static_cast<uint32_t>(func_index) >= functions.size()) return -1; + DCHECK_GE(kMaxInt, functions[func_index].code.offset()); + return static_cast<int>(functions[func_index].code.offset()); +} + +// static +int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) { + const std::vector<WasmFunction>& functions = module->functions; + + // Binary search for a function containing the given position. + int left = 0; // inclusive + int right = static_cast<int>(functions.size()); // exclusive + if (right == 0) return false; + while (right - left > 1) { + int mid = left + (right - left) / 2; + if (functions[mid].code.offset() <= byte_offset) { + left = mid; + } else { + right = mid; + } + } + // If the found function does not contains the given position, return -1. + const WasmFunction& func = functions[left]; + if (byte_offset < func.code.offset() || + byte_offset >= func.code.end_offset()) { + return -1; + } + + return left; +} + +// static +v8::debug::WasmDisassembly DisassembleWasmFunction( + const WasmModule* module, const ModuleWireBytes& wire_bytes, + int func_index) { + if (func_index < 0 || + static_cast<uint32_t>(func_index) >= module->functions.size()) + return {}; + + std::ostringstream disassembly_os; + v8::debug::WasmDisassembly::OffsetTable offset_table; + + PrintWasmText(module, wire_bytes, static_cast<uint32_t>(func_index), + disassembly_os, &offset_table); + + return {disassembly_os.str(), std::move(offset_table)}; +} + void WasmModule::AddFunctionNameForTesting(int function_index, WireBytesRef name) { if (!function_names) { @@ -475,21 +527,19 @@ Handle<JSArray> GetCustomSections(Isolate* isolate, // Make a copy of the payload data in the section. size_t size = section.payload.length(); - void* memory = - size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size); - - if (size && !memory) { + MaybeHandle<JSArrayBuffer> result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + size, InitializedFlag::kUninitialized); + Handle<JSArrayBuffer> array_buffer; + if (!result.ToHandle(&array_buffer)) { thrower->RangeError("out of memory allocating custom section data"); return Handle<JSArray>(); } - Handle<JSArrayBuffer> buffer = - isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - constexpr bool is_external = false; - JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size); - memcpy(memory, wire_bytes.begin() + section.payload.offset(), + memcpy(array_buffer->backing_store(), + wire_bytes.begin() + section.payload.offset(), section.payload.length()); - matching_sections.push_back(buffer); + matching_sections.push_back(array_buffer); } int num_custom_sections = static_cast<int>(matching_sections.size()); diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h index 69c57725de3..79c3b23a332 100644 --- a/chromium/v8/src/wasm/wasm-module.h +++ b/chromium/v8/src/wasm/wasm-module.h @@ -16,9 +16,13 @@ #include "src/wasm/wasm-opcodes.h" namespace v8 { + +namespace debug { +struct WasmDisassembly; +} + namespace internal { -class WasmDebugInfo; class WasmModuleObject; namespace wasm { @@ -240,6 +244,25 @@ V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module); int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig, bool is_import); +// Return the byte offset of the function identified by the given index. +// The offset will be relative to the start of the module bytes. +// Returns -1 if the function index is invalid. +int GetWasmFunctionOffset(const WasmModule* module, uint32_t func_index); + +// Returns the function containing the given byte offset. +// Returns -1 if the byte offset is not contained in any function of this +// module. +int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset); + +// Compute the disassembly of a wasm function. +// Returns the disassembly string and a list of <byte_offset, line, column> +// entries, mapping wasm byte offsets to line and column in the disassembly. +// The list is guaranteed to be ordered by the byte_offset. +// Returns an empty string and empty vector if the function index is invalid. +V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleWasmFunction( + const WasmModule* module, const ModuleWireBytes& wire_bytes, + int func_index); + // Interface to the storage (wire bytes) of a wasm module. // It is illegal for anyone receiving a ModuleWireBytes to store pointers based // on module_bytes, as this storage is only guaranteed to be alive as long as @@ -290,15 +313,6 @@ struct WasmFunctionName { std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name); -// Get the debug info associated with the given wasm object. -// If no debug info exists yet, it is created automatically. -Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm); - -V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes( - Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower, - ModuleOrigin origin, Handle<Script> asm_js_script, - Vector<const byte> asm_offset_table); - V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context); diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h index 66d3a2716e9..a7f74381ae9 100644 --- a/chromium/v8/src/wasm/wasm-objects-inl.h +++ b/chromium/v8/src/wasm/wasm-objects-inl.h @@ -88,12 +88,8 @@ ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>, kNativeModuleOffset) ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset) ACCESSORS(WasmModuleObject, script, Script, kScriptOffset) -ACCESSORS(WasmModuleObject, weak_instance_list, WeakArrayList, - kWeakInstanceListOffset) OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray, kAsmJsOffsetTableOffset) -OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray, - kBreakPointInfosOffset) wasm::NativeModule* WasmModuleObject::native_module() const { return managed_native_module().raw(); } @@ -102,13 +98,9 @@ WasmModuleObject::shared_native_module() const { return managed_native_module().get(); } const wasm::WasmModule* WasmModuleObject::module() const { - // TODO(clemensh): Remove this helper (inline in callers). + // TODO(clemensb): Remove this helper (inline in callers). return native_module()->module(); } -void WasmModuleObject::reset_breakpoint_infos() { - WRITE_FIELD(*this, kBreakPointInfosOffset, - GetReadOnlyRoots().undefined_value()); -} bool WasmModuleObject::is_asm_js() { bool asm_js = is_asmjs_module(module()); DCHECK_EQ(asm_js, script().IsUserJavaScript()); @@ -309,6 +301,10 @@ ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>, kSerializedSignatureOffset) ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset) +// WasmExceptionPackage +OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSReceiver) +CAST_ACCESSOR(WasmExceptionPackage) + // WasmExportedFunction WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) { SLOW_DCHECK(IsWasmExportedFunction(*this)); @@ -382,6 +378,8 @@ ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset) // WasmDebugInfo ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset) ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset) +ACCESSORS(WasmDebugInfo, interpreter_reference_stack, Cell, + kInterpreterReferenceStackOffset) OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset) OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray, kCWasmEntriesOffset) diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc index d9417943a84..93ce345a5fb 100644 --- a/chromium/v8/src/wasm/wasm-objects.cc +++ b/chromium/v8/src/wasm/wasm-objects.cc @@ -25,10 +25,8 @@ #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" -#include "src/wasm/wasm-text.h" #define TRACE(...) \ do { \ @@ -244,37 +242,40 @@ Handle<WasmModuleObject> WasmModuleObject::New( isolate->factory()->NewJSObject(isolate->wasm_module_constructor())); module_object->set_export_wrappers(*export_wrappers); if (script->type() == Script::TYPE_WASM) { - script->set_wasm_module_object(*module_object); + script->set_wasm_breakpoint_infos( + ReadOnlyRoots(isolate).empty_fixed_array()); + script->set_wasm_managed_native_module(*managed_native_module); + script->set_wasm_weak_instance_list( + ReadOnlyRoots(isolate).empty_weak_array_list()); } module_object->set_script(*script); - module_object->set_weak_instance_list( - ReadOnlyRoots(isolate).empty_weak_array_list()); module_object->set_managed_native_module(*managed_native_module); return module_object; } -bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object, - int* position, +// static +bool WasmModuleObject::SetBreakPoint(Handle<Script> script, int* position, Handle<BreakPoint> break_point) { - Isolate* isolate = module_object->GetIsolate(); + Isolate* isolate = script->GetIsolate(); // Find the function for this breakpoint. - int func_index = module_object->GetContainingFunction(*position); + const WasmModule* module = script->wasm_native_module()->module(); + int func_index = GetContainingWasmFunction(module, *position); if (func_index < 0) return false; - const WasmFunction& func = module_object->module()->functions[func_index]; + const WasmFunction& func = module->functions[func_index]; int offset_in_func = *position - func.code.offset(); // According to the current design, we should only be called with valid // breakable positions. - DCHECK(IsBreakablePosition(module_object->native_module(), func_index, + DCHECK(IsBreakablePosition(script->wasm_native_module(), func_index, offset_in_func)); // Insert new break point into break_positions of module object. - WasmModuleObject::AddBreakpoint(module_object, *position, break_point); + WasmModuleObject::AddBreakpointToInfo(script, *position, break_point); - // Iterate over all instances of this module and tell them to set this new - // breakpoint. We do this using the weak list of all instances. - Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(), + // Iterate over all instances and tell them to set this new breakpoint. + // We do this using the weak list of all instances from the script. + Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(), isolate); for (int i = 0; i < weak_instance_list->length(); ++i) { MaybeObject maybe_instance = weak_instance_list->Get(i); @@ -291,6 +292,42 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object, return true; } +// static +bool WasmModuleObject::ClearBreakPoint(Handle<Script> script, int position, + Handle<BreakPoint> break_point) { + Isolate* isolate = script->GetIsolate(); + + // Find the function for this breakpoint. + const WasmModule* module = script->wasm_native_module()->module(); + int func_index = GetContainingWasmFunction(module, position); + if (func_index < 0) return false; + const WasmFunction& func = module->functions[func_index]; + int offset_in_func = position - func.code.offset(); + + if (!WasmModuleObject::RemoveBreakpointFromInfo(script, position, + break_point)) { + return false; + } + + // Iterate over all instances and tell them to remove this breakpoint. + // We do this using the weak list of all instances from the script. + Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(), + isolate); + for (int i = 0; i < weak_instance_list->length(); ++i) { + MaybeObject maybe_instance = weak_instance_list->Get(i); + if (maybe_instance->IsWeak()) { + Handle<WasmInstanceObject> instance( + WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()), + isolate); + Handle<WasmDebugInfo> debug_info = + WasmInstanceObject::GetOrCreateDebugInfo(instance); + WasmDebugInfo::ClearBreakpoint(debug_info, func_index, offset_in_func); + } + } + + return true; +} + namespace { int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) { @@ -323,17 +360,17 @@ int FindBreakpointInfoInsertPos(Isolate* isolate, } // namespace -void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object, - int position, - Handle<BreakPoint> break_point) { - Isolate* isolate = module_object->GetIsolate(); +// static +void WasmModuleObject::AddBreakpointToInfo(Handle<Script> script, int position, + Handle<BreakPoint> break_point) { + Isolate* isolate = script->GetIsolate(); Handle<FixedArray> breakpoint_infos; - if (module_object->has_breakpoint_infos()) { - breakpoint_infos = handle(module_object->breakpoint_infos(), isolate); + if (script->has_wasm_breakpoint_infos()) { + breakpoint_infos = handle(script->wasm_breakpoint_infos(), isolate); } else { breakpoint_infos = isolate->factory()->NewFixedArray(4, AllocationType::kOld); - module_object->set_breakpoint_infos(*breakpoint_infos); + script->set_wasm_breakpoint_infos(*breakpoint_infos); } int insert_pos = @@ -357,7 +394,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object, if (need_realloc) { new_breakpoint_infos = isolate->factory()->NewFixedArray( 2 * breakpoint_infos->length(), AllocationType::kOld); - module_object->set_breakpoint_infos(*new_breakpoint_infos); + script->set_wasm_breakpoint_infos(*new_breakpoint_infos); // Copy over the entries [0, insert_pos). for (int i = 0; i < insert_pos; ++i) new_breakpoint_infos->set(i, breakpoint_infos->get(i)); @@ -379,16 +416,45 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object, new_breakpoint_infos->set(insert_pos, *breakpoint_info); } +// static +bool WasmModuleObject::RemoveBreakpointFromInfo( + Handle<Script> script, int position, Handle<BreakPoint> break_point) { + if (!script->has_wasm_breakpoint_infos()) return false; + + Isolate* isolate = script->GetIsolate(); + Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate); + + int pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position); + + // Does a BreakPointInfo object already exist for this position? + if (pos == breakpoint_infos->length()) return false; + + Handle<BreakPointInfo> info(BreakPointInfo::cast(breakpoint_infos->get(pos)), + isolate); + BreakPointInfo::ClearBreakPoint(isolate, info, break_point); + + // Check if there are no more breakpoints at this location. + if (info->GetBreakPointCount(isolate) == 0) { + // Update array by moving breakpoints up one position. + for (int i = pos; i < breakpoint_infos->length() - 1; i++) { + Object entry = breakpoint_infos->get(i + 1); + breakpoint_infos->set(i, entry); + if (entry.IsUndefined(isolate)) break; + } + // Make sure last array element is empty as a result. + breakpoint_infos->set_undefined(breakpoint_infos->length() - 1); + } + return true; +} + void WasmModuleObject::SetBreakpointsOnNewInstance( - Handle<WasmModuleObject> module_object, - Handle<WasmInstanceObject> instance) { - if (!module_object->has_breakpoint_infos()) return; - Isolate* isolate = module_object->GetIsolate(); + Handle<Script> script, Handle<WasmInstanceObject> instance) { + if (!script->has_wasm_breakpoint_infos()) return; + Isolate* isolate = script->GetIsolate(); Handle<WasmDebugInfo> debug_info = WasmInstanceObject::GetOrCreateDebugInfo(instance); - Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(), - isolate); + Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate); // If the array exists, it should not be empty. DCHECK_LT(0, breakpoint_infos->length()); @@ -404,9 +470,10 @@ void WasmModuleObject::SetBreakpointsOnNewInstance( int position = breakpoint_info->source_position(); // Find the function for this breakpoint, and set the breakpoint. - int func_index = module_object->GetContainingFunction(position); + const WasmModule* module = script->wasm_native_module()->module(); + int func_index = GetContainingWasmFunction(module, position); DCHECK_LE(0, func_index); - const WasmFunction& func = module_object->module()->functions[func_index]; + const WasmFunction& func = module->functions[func_index]; int offset_in_func = position - func.code.offset(); WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func); } @@ -497,7 +564,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object, if (module->origin == wasm::kWasmOrigin) { // for non-asm.js modules, we just add the function's start offset // to make a module-relative position. - return byte_offset + module_object->GetFunctionOffset(func_index); + return byte_offset + GetWasmFunctionOffset(module, func_index); } // asm.js modules have an additional offset table that must be searched. @@ -529,31 +596,15 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object, return offset_table->get_int(kOTESize * left + idx); } -v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction( - int func_index) { - DisallowHeapAllocation no_gc; - - if (func_index < 0 || - static_cast<uint32_t>(func_index) >= module()->functions.size()) - return {}; - - wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes()); - - std::ostringstream disassembly_os; - v8::debug::WasmDisassembly::OffsetTable offset_table; - - PrintWasmText(module(), wire_bytes, static_cast<uint32_t>(func_index), - disassembly_os, &offset_table); - - return {disassembly_os.str(), std::move(offset_table)}; -} - +// static bool WasmModuleObject::GetPossibleBreakpoints( - const v8::debug::Location& start, const v8::debug::Location& end, + wasm::NativeModule* native_module, const v8::debug::Location& start, + const v8::debug::Location& end, std::vector<v8::debug::BreakLocation>* locations) { DisallowHeapAllocation no_gc; - const std::vector<WasmFunction>& functions = module()->functions; + const std::vector<WasmFunction>& functions = + native_module->module()->functions; if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 || (!end.IsEmpty() && (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0))) @@ -595,7 +646,7 @@ bool WasmModuleObject::GetPossibleBreakpoints( AccountingAllocator alloc; Zone tmp(&alloc, ZONE_NAME); - const byte* module_start = native_module()->wire_bytes().begin(); + const byte* module_start = native_module->wire_bytes().begin(); for (uint32_t func_idx = start_func_index; func_idx <= end_func_index; ++func_idx) { @@ -620,12 +671,12 @@ bool WasmModuleObject::GetPossibleBreakpoints( return true; } +// static MaybeHandle<FixedArray> WasmModuleObject::CheckBreakPoints( - Isolate* isolate, Handle<WasmModuleObject> module_object, int position) { - if (!module_object->has_breakpoint_infos()) return {}; + Isolate* isolate, Handle<Script> script, int position) { + if (!script->has_wasm_breakpoint_infos()) return {}; - Handle<FixedArray> breakpoint_infos(module_object->breakpoint_infos(), - isolate); + Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate); int insert_pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position); if (insert_pos >= breakpoint_infos->length()) return {}; @@ -709,60 +760,6 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName( return Vector<const uint8_t>::cast(name); } -int WasmModuleObject::GetFunctionOffset(uint32_t func_index) { - const std::vector<WasmFunction>& functions = module()->functions; - if (static_cast<uint32_t>(func_index) >= functions.size()) return -1; - DCHECK_GE(kMaxInt, functions[func_index].code.offset()); - return static_cast<int>(functions[func_index].code.offset()); -} - -int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) { - const std::vector<WasmFunction>& functions = module()->functions; - - // Binary search for a function containing the given position. - int left = 0; // inclusive - int right = static_cast<int>(functions.size()); // exclusive - if (right == 0) return false; - while (right - left > 1) { - int mid = left + (right - left) / 2; - if (functions[mid].code.offset() <= byte_offset) { - left = mid; - } else { - right = mid; - } - } - // If the found function does not contains the given position, return -1. - const WasmFunction& func = functions[left]; - if (byte_offset < func.code.offset() || - byte_offset >= func.code.end_offset()) { - return -1; - } - - return left; -} - -bool WasmModuleObject::GetPositionInfo(uint32_t position, - Script::PositionInfo* info) { - if (script().source_mapping_url().IsString()) { - if (module()->functions.size() == 0) return false; - info->line = 0; - info->column = position; - info->line_start = module()->functions[0].code.offset(); - info->line_end = module()->functions.back().code.end_offset(); - return true; - } - int func_index = GetContainingFunction(position); - if (func_index < 0) return false; - - const WasmFunction& function = module()->functions[func_index]; - - info->line = func_index; - info->column = position - function.code.offset(); - info->line_start = function.code.offset(); - info->line_end = function.code.end_offset(); - return true; -} - Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, wasm::ValueType type, uint32_t initial, bool has_maximum, @@ -1217,66 +1214,17 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate, } namespace { -bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer, - size_t new_size) { - if (new_size > old_buffer->allocation_length()) return false; - void* old_mem_start = old_buffer->backing_store(); - size_t old_size = old_buffer->byte_length(); - if (old_size != new_size) { - DCHECK_NOT_NULL(old_mem_start); - DCHECK_GE(new_size, old_size); - // If adjusting permissions fails, propagate error back to return - // failure to grow. - if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size, - PageAllocator::kReadWrite)) { - return false; - } - reinterpret_cast<v8::Isolate*>(isolate) - ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size); - } - return true; -} -MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate, - Handle<JSArrayBuffer> old_buffer, - size_t new_size) { - CHECK_EQ(0, new_size % wasm::kWasmPageSize); - // Reusing the backing store from externalized buffers causes problems with - // Blink's array buffers. The connection between the two is lost, which can - // lead to Blink not knowing about the other reference to the buffer and - // freeing it too early. - if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) { - // We couldn't reuse the old backing store, so create a new one and copy the - // old contents in. - Handle<JSArrayBuffer> new_buffer; - if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) { - return {}; - } - void* old_mem_start = old_buffer->backing_store(); - size_t old_size = old_buffer->byte_length(); - if (old_size == 0) return new_buffer; - memcpy(new_buffer->backing_store(), old_mem_start, old_size); - DCHECK(old_buffer.is_null() || !old_buffer->is_shared()); - constexpr bool free_memory = true; - i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory); - return new_buffer; - } else { - if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {}; - // NOTE: We must allocate a new array buffer here because the spec - // assumes that ArrayBuffers do not change size. - void* backing_store = old_buffer->backing_store(); - bool is_external = old_buffer->is_external(); - // Disconnect buffer early so GC won't free it. - i::wasm::DetachMemoryBuffer(isolate, old_buffer, false); - Handle<JSArrayBuffer> new_buffer = - wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external); - return new_buffer; - } -} - -// May GC, because SetSpecializationMemInfoFrom may GC void SetInstanceMemory(Handle<WasmInstanceObject> instance, Handle<JSArrayBuffer> buffer) { + bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin; + bool use_trap_handler = + instance->module_object().native_module()->use_trap_handler(); + // Wasm modules compiled to use the trap handler don't have bounds checks, + // so they must have a memory that has guard regions. + CHECK_IMPLIES(is_wasm_module && use_trap_handler, + buffer->GetBackingStore()->has_guard_regions()); + instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()), buffer->byte_length()); #if DEBUG @@ -1294,7 +1242,6 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance, } #endif } - } // namespace Handle<WasmMemoryObject> WasmMemoryObject::New( @@ -1302,44 +1249,54 @@ Handle<WasmMemoryObject> WasmMemoryObject::New( uint32_t maximum) { Handle<JSArrayBuffer> buffer; if (!maybe_buffer.ToHandle(&buffer)) { - // If no buffer was provided, create a 0-length one. - buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false); + // If no buffer was provided, create a zero-length one. + auto backing_store = + BackingStore::AllocateWasmMemory(isolate, 0, 0, SharedFlag::kNotShared); + buffer = isolate->factory()->NewJSArrayBuffer(std::move(backing_store)); } - // TODO(kschimpf): Do we need to add an argument that defines the - // style of memory the user prefers (with/without trap handling), so - // that the memory will match the style of the compiled wasm module. - // See issue v8:7143 Handle<JSFunction> memory_ctor( isolate->native_context()->wasm_memory_constructor(), isolate); - auto memory_obj = Handle<WasmMemoryObject>::cast( + auto memory_object = Handle<WasmMemoryObject>::cast( isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld)); - memory_obj->set_array_buffer(*buffer); - memory_obj->set_maximum_pages(maximum); + memory_object->set_array_buffer(*buffer); + memory_object->set_maximum_pages(maximum); - return memory_obj; + if (buffer->is_shared()) { + auto backing_store = buffer->GetBackingStore(); + backing_store->AttachSharedWasmMemoryObject(isolate, memory_object); + } + + return memory_object; } MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate, uint32_t initial, uint32_t maximum, - bool is_shared_memory) { - Handle<JSArrayBuffer> buffer; - size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) * - static_cast<size_t>(initial); - if (is_shared_memory) { - size_t max_size = static_cast<size_t>(i::wasm::kWasmPageSize) * - static_cast<size_t>(maximum); - if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size) - .ToHandle(&buffer)) { - return {}; - } - } else { - if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) { - return {}; - } + SharedFlag shared) { + auto heuristic_maximum = maximum; +#ifdef V8_TARGET_ARCH_32_BIT + // TODO(wasm): use a better heuristic for reserving more than the initial + // number of pages on 32-bit systems. Being too greedy in reserving capacity + // limits the number of memories that can be allocated, causing OOMs in many + // tests. For now, on 32-bit we never reserve more than initial, unless the + // memory is shared. + if (shared == SharedFlag::kNotShared || !FLAG_wasm_grow_shared_memory) { + heuristic_maximum = initial; } +#endif + + auto backing_store = BackingStore::AllocateWasmMemory( + isolate, initial, heuristic_maximum, shared); + + if (!backing_store) return {}; + + Handle<JSArrayBuffer> buffer = + (shared == SharedFlag::kShared) + ? isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)) + : isolate->factory()->NewJSArrayBuffer(std::move(backing_store)); + return New(isolate, buffer, maximum); } @@ -1383,11 +1340,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, uint32_t pages) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory"); Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate); - if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1; - auto* memory_tracker = isolate->wasm_engine()->memory_tracker(); - if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1; + // Any buffer used as an asmjs memory cannot be detached, and + // therefore this memory cannot be grown. + if (old_buffer->is_asmjs_memory()) return -1; - // Checks for maximum memory size, compute new size. + // Checks for maximum memory size. uint32_t maximum_pages = wasm::max_mem_pages(); if (memory_object->has_maximum_pages()) { maximum_pages = std::min( @@ -1402,47 +1359,54 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit return -1; } - size_t new_size = - static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize; + std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore(); + if (!backing_store) return -1; + + // Compute new size. + size_t new_pages = old_pages + pages; + size_t new_byte_length = new_pages * wasm::kWasmPageSize; - // Memory is grown, but the memory objects and instances are not yet updated. - // Handle this in the interrupt handler so that it's safe for all the isolates - // that share this buffer to be updated safely. - Handle<JSArrayBuffer> new_buffer; + // Try to handle shared memory first. if (old_buffer->is_shared()) { - // Adjust protections for the buffer. - if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) { - return -1; - } - void* backing_store = old_buffer->backing_store(); - if (memory_tracker->IsWasmSharedMemory(backing_store)) { - // This memory is shared between different isolates. - DCHECK(old_buffer->is_shared()); - // Update pending grow state, and trigger a grow interrupt on all the - // isolates that share this buffer. - memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size); - // Handle interrupts for this isolate so that the instances with this - // isolate are updated. - isolate->stack_guard()->HandleInterrupts(); - // Failure to allocate, or adjust pemissions already handled here, and - // updates to instances handled in the interrupt handler safe to return. - return static_cast<uint32_t>(old_size / wasm::kWasmPageSize); + if (FLAG_wasm_grow_shared_memory) { + // Shared memories can only be grown in place; no copying. + if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) { + BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store, + new_pages); + // Broadcasting the update should update this memory object too. + CHECK_NE(*old_buffer, memory_object->array_buffer()); + // This is a less than check, as it is not guaranteed that the SAB + // length here will be equal to the stashed length above as calls to + // grow the same memory object can come in from different workers. + // It is also possible that a call to Grow was in progress when + // handling this call. + CHECK_LE(new_byte_length, memory_object->array_buffer().byte_length()); + return static_cast<int32_t>(old_pages); // success + } } - // SharedArrayBuffer, but not shared across isolates. Setup a new buffer - // with updated permissions and update the instances. - new_buffer = - wasm::SetupArrayBuffer(isolate, backing_store, new_size, - old_buffer->is_external(), SharedFlag::kShared); + return -1; + } + + // Try to grow non-shared memory in-place. + if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) { + // Detach old and create a new one with the grown backing store. + old_buffer->Detach(true); + Handle<JSArrayBuffer> new_buffer = + isolate->factory()->NewJSArrayBuffer(std::move(backing_store)); memory_object->update_instances(isolate, new_buffer); - } else { - if (!MemoryGrowBuffer(isolate, old_buffer, new_size) - .ToHandle(&new_buffer)) { - return -1; - } + return static_cast<int32_t>(old_pages); // success } - // Update instances if any. + // Try allocating a new backing store and copying. + std::unique_ptr<BackingStore> new_backing_store = + backing_store->CopyWasmMemory(isolate, new_pages); + if (!new_backing_store) return -1; + + // Detach old and create a new one with the new backing store. + old_buffer->Detach(true); + Handle<JSArrayBuffer> new_buffer = + isolate->factory()->NewJSArrayBuffer(std::move(new_backing_store)); memory_object->update_instances(isolate, new_buffer); - return static_cast<uint32_t>(old_size / wasm::kWasmPageSize); + return static_cast<int32_t>(old_pages); // success } // static @@ -1476,18 +1440,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New( global_obj->set_tagged_buffer(*tagged_buffer); } else { DCHECK(maybe_tagged_buffer.is_null()); - Handle<JSArrayBuffer> untagged_buffer; uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type); + + Handle<JSArrayBuffer> untagged_buffer; if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) { - // If no buffer was provided, create one long enough for the given type. - untagged_buffer = isolate->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, AllocationType::kOld); - - const bool initialize = true; - if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate, - type_size, initialize)) { - return {}; - } + MaybeHandle<JSArrayBuffer> result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + offset + type_size, InitializedFlag::kZeroInitialized); + + if (!result.ToHandle(&untagged_buffer)) return {}; } // Check that the offset is in bounds. @@ -1725,13 +1686,16 @@ Handle<WasmInstanceObject> WasmInstanceObject::New( instance->set_jump_table_start( module_object->native_module()->jump_table_start()); - // Insert the new instance into the modules weak list of instances. + // Insert the new instance into the scripts weak list of instances. This list + // is used for breakpoints affecting all instances belonging to the script. // TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below. - Handle<WeakArrayList> weak_instance_list(module_object->weak_instance_list(), - isolate); - weak_instance_list = WeakArrayList::AddToEnd( - isolate, weak_instance_list, MaybeObjectHandle::Weak(instance)); - module_object->set_weak_instance_list(*weak_instance_list); + if (module_object->script().type() == Script::TYPE_WASM) { + Handle<WeakArrayList> weak_instance_list( + module_object->script().wasm_weak_instance_list(), isolate); + weak_instance_list = WeakArrayList::AddToEnd( + isolate, weak_instance_list, MaybeObjectHandle::Weak(instance)); + module_object->script().set_wasm_weak_instance_list(*weak_instance_list); + } InitDataSegmentArrays(instance, module_object); InitElemSegmentArrays(instance, module_object); @@ -2040,7 +2004,7 @@ bool WasmCapiFunction::IsSignatureEqual(const wasm::FunctionSig* sig) const { } // static -Handle<JSReceiver> WasmExceptionPackage::New( +Handle<WasmExceptionPackage> WasmExceptionPackage::New( Isolate* isolate, Handle<WasmExceptionTag> exception_tag, int size) { Handle<Object> exception = isolate->factory()->NewWasmRuntimeError( MessageTemplate::kWasmExceptionError); @@ -2055,37 +2019,31 @@ Handle<JSReceiver> WasmExceptionPackage::New( values, StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError)) .is_null()); - return Handle<JSReceiver>::cast(exception); + return Handle<WasmExceptionPackage>::cast(exception); } // static Handle<Object> WasmExceptionPackage::GetExceptionTag( - Isolate* isolate, Handle<Object> exception_object) { - if (exception_object->IsJSReceiver()) { - Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object); - Handle<Object> tag; - if (JSReceiver::GetProperty(isolate, exception, - isolate->factory()->wasm_exception_tag_symbol()) - .ToHandle(&tag)) { - return tag; - } + Isolate* isolate, Handle<WasmExceptionPackage> exception_package) { + Handle<Object> tag; + if (JSReceiver::GetProperty(isolate, exception_package, + isolate->factory()->wasm_exception_tag_symbol()) + .ToHandle(&tag)) { + return tag; } return ReadOnlyRoots(isolate).undefined_value_handle(); } // static Handle<Object> WasmExceptionPackage::GetExceptionValues( - Isolate* isolate, Handle<Object> exception_object) { - if (exception_object->IsJSReceiver()) { - Handle<JSReceiver> exception = Handle<JSReceiver>::cast(exception_object); - Handle<Object> values; - if (JSReceiver::GetProperty( - isolate, exception, - isolate->factory()->wasm_exception_values_symbol()) - .ToHandle(&values)) { - DCHECK(values->IsFixedArray()); - return values; - } + Isolate* isolate, Handle<WasmExceptionPackage> exception_package) { + Handle<Object> values; + if (JSReceiver::GetProperty( + isolate, exception_package, + isolate->factory()->wasm_exception_values_symbol()) + .ToHandle(&values)) { + DCHECK(values->IsFixedArray()); + return values; } return ReadOnlyRoots(isolate).undefined_value_handle(); } diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h index c198a9bc637..23c13c43299 100644 --- a/chromium/v8/src/wasm/wasm-objects.h +++ b/chromium/v8/src/wasm/wasm-objects.h @@ -5,13 +5,13 @@ #ifndef V8_WASM_WASM_OBJECTS_H_ #define V8_WASM_WASM_OBJECTS_H_ +#include <memory> + #include "src/base/bits.h" #include "src/codegen/signature.h" #include "src/debug/debug.h" -#include "src/debug/interface-types.h" #include "src/heap/heap.h" #include "src/objects/objects.h" -#include "src/objects/script.h" #include "src/wasm/value-type.h" // Has to be the last include (doesn't have include guards) @@ -47,6 +47,8 @@ class WasmJSFunction; class WasmModuleObject; class WasmIndirectFunctionTable; +enum class SharedFlag : uint8_t; + template <class CppType> class Managed; @@ -124,14 +126,11 @@ class WasmModuleObject : public JSObject { DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>) DECL_ACCESSORS(export_wrappers, FixedArray) DECL_ACCESSORS(script, Script) - DECL_ACCESSORS(weak_instance_list, WeakArrayList) DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray) - DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray) inline wasm::NativeModule* native_module() const; inline const std::shared_ptr<wasm::NativeModule>& shared_native_module() const; inline const wasm::WasmModule* module() const; - inline void reset_breakpoint_infos(); // Dispatched behavior. DECL_PRINTER(WasmModuleObject) @@ -153,23 +152,28 @@ class WasmModuleObject : public JSObject { Handle<Script> script, Handle<FixedArray> export_wrappers, size_t code_size_estimate); + // TODO(mstarzinger): The below breakpoint handling methods taking a {Script} + // instead of a {WasmModuleObject} as first argument should be moved onto a + // separate {WasmScript} class, implementation move to wasm-debug.cc then. + // Set a breakpoint on the given byte position inside the given module. // This will affect all live and future instances of the module. // The passed position might be modified to point to the next breakable // location inside the same function. // If it points outside a function, or behind the last breakable location, // this function returns false and does not set any breakpoint. - V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<WasmModuleObject>, - int* position, + V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<Script>, int* position, Handle<BreakPoint> break_point); + // Remove a previously set breakpoint at the given byte position inside the + // given module. If this breakpoint is not found this function returns false. + V8_EXPORT_PRIVATE static bool ClearBreakPoint(Handle<Script>, int position, + Handle<BreakPoint> break_point); + // Check whether this module was generated from asm.js source. inline bool is_asm_js(); - static void AddBreakpoint(Handle<WasmModuleObject>, int position, - Handle<BreakPoint> break_point); - - static void SetBreakpointsOnNewInstance(Handle<WasmModuleObject>, + static void SetBreakpointsOnNewInstance(Handle<Script>, Handle<WasmInstanceObject>); // Get the module name, if set. Returns an empty handle otherwise. @@ -195,34 +199,12 @@ class WasmModuleObject : public JSObject { // Does not allocate, hence gc-safe. Vector<const uint8_t> GetRawFunctionName(uint32_t func_index); - // Return the byte offset of the function identified by the given index. - // The offset will be relative to the start of the module bytes. - // Returns -1 if the function index is invalid. - int GetFunctionOffset(uint32_t func_index); - - // Returns the function containing the given byte offset. - // Returns -1 if the byte offset is not contained in any function of this - // module. - int GetContainingFunction(uint32_t byte_offset); - - // Translate from byte offset in the module to function number and byte offset - // within that function, encoded as line and column in the position info. - // Returns true if the position is valid inside this module, false otherwise. - bool GetPositionInfo(uint32_t position, Script::PositionInfo* info); - // Get the source position from a given function index and byte offset, // for either asm.js or pure Wasm modules. static int GetSourcePosition(Handle<WasmModuleObject>, uint32_t func_index, uint32_t byte_offset, bool is_at_number_conversion); - // Compute the disassembly of a wasm function. - // Returns the disassembly string and a list of <byte_offset, line, column> - // entries, mapping wasm byte offsets to line and column in the disassembly. - // The list is guaranteed to be ordered by the byte_offset. - // Returns an empty string and empty vector if the function index is invalid. - V8_EXPORT_PRIVATE debug::WasmDisassembly DisassembleFunction(int func_index); - // Extract a portion of the wire bytes as UTF-8 string. // Returns a null handle if the respective bytes do not form a valid UTF-8 // string. @@ -233,17 +215,24 @@ class WasmModuleObject : public JSObject { wasm::WireBytesRef ref); // Get a list of all possible breakpoints within a given range of this module. - V8_EXPORT_PRIVATE bool GetPossibleBreakpoints( - const debug::Location& start, const debug::Location& end, - std::vector<debug::BreakLocation>* locations); + V8_EXPORT_PRIVATE static bool GetPossibleBreakpoints( + wasm::NativeModule* native_module, const debug::Location& start, + const debug::Location& end, std::vector<debug::BreakLocation>* locations); // Return an empty handle if no breakpoint is hit at that location, or a // FixedArray with all hit breakpoint objects. - static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*, - Handle<WasmModuleObject>, + static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*, Handle<Script>, int position); OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject); + + private: + // Helper functions that update the breakpoint info list. + static void AddBreakpointToInfo(Handle<Script>, int position, + Handle<BreakPoint> break_point); + + static bool RemoveBreakpointFromInfo(Handle<Script>, int position, + Handle<BreakPoint> break_point); }; // Representation of a WebAssembly.Table JavaScript-level object. @@ -354,9 +343,10 @@ class WasmMemoryObject : public JSObject { V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New( Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum); - V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New( - Isolate* isolate, uint32_t initial, uint32_t maximum, - bool is_shared_memory); + V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate, + uint32_t initial, + uint32_t maximum, + SharedFlag shared); void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer); @@ -645,20 +635,22 @@ class WasmExceptionObject : public JSObject { // A Wasm exception that has been thrown out of Wasm code. class WasmExceptionPackage : public JSReceiver { public: - // TODO(mstarzinger): Ideally this interface would use {WasmExceptionPackage} - // instead of {JSReceiver} throughout. For now a type-check implies doing a - // property lookup however, which would result in casts being handlified. - static Handle<JSReceiver> New(Isolate* isolate, - Handle<WasmExceptionTag> exception_tag, - int encoded_size); + static Handle<WasmExceptionPackage> New( + Isolate* isolate, Handle<WasmExceptionTag> exception_tag, + int encoded_size); // The below getters return {undefined} in case the given exception package // does not carry the requested values (i.e. is of a different type). - static Handle<Object> GetExceptionTag(Isolate*, Handle<Object> exception); - static Handle<Object> GetExceptionValues(Isolate*, Handle<Object> exception); + static Handle<Object> GetExceptionTag( + Isolate* isolate, Handle<WasmExceptionPackage> exception_package); + static Handle<Object> GetExceptionValues( + Isolate* isolate, Handle<WasmExceptionPackage> exception_package); // Determines the size of the array holding all encoded exception values. static uint32_t GetEncodedSize(const wasm::WasmException* exception); + + DECL_CAST(WasmExceptionPackage) + OBJECT_CONSTRUCTORS(WasmExceptionPackage, JSReceiver); }; // A Wasm function that is wrapped and exported to JavaScript. @@ -801,7 +793,7 @@ class WasmExportedFunctionData : public Struct { DECL_PRINTER(WasmExportedFunctionData) DECL_VERIFIER(WasmExportedFunctionData) -// Layout description. + // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS( HeapObject::kHeaderSize, TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS) @@ -828,7 +820,7 @@ class WasmJSFunctionData : public Struct { // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, - TORQUE_GENERATED_WASM_JSFUNCTION_DATA_FIELDS) + TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS) OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct); }; @@ -838,6 +830,7 @@ class WasmDebugInfo : public Struct { NEVER_READ_ONLY_SPACE DECL_ACCESSORS(wasm_instance, WasmInstanceObject) DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined + DECL_ACCESSORS(interpreter_reference_stack, Cell) DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray) DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray) DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>) @@ -848,7 +841,7 @@ class WasmDebugInfo : public Struct { DECL_PRINTER(WasmDebugInfo) DECL_VERIFIER(WasmDebugInfo) -// Layout description. + // Layout description. DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS) @@ -867,6 +860,11 @@ class WasmDebugInfo : public Struct { V8_EXPORT_PRIVATE static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset); + // Clear a previously set breakpoint in the given function at the given byte + // offset within that function. + V8_EXPORT_PRIVATE static void ClearBreakpoint(Handle<WasmDebugInfo>, + int func_index, int offset); + // Make a set of functions always execute in the interpreter without setting // breakpoints. V8_EXPORT_PRIVATE static void RedirectToInterpreter(Handle<WasmDebugInfo>, diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc index 879da1445ba..3bd76ae43b8 100644 --- a/chromium/v8/src/wasm/wasm-opcodes.cc +++ b/chromium/v8/src/wasm/wasm-opcodes.cc @@ -147,11 +147,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { CASE_OP(Drop, "drop") CASE_OP(Select, "select") CASE_OP(SelectWithType, "select") - CASE_OP(GetLocal, "local.get") - CASE_OP(SetLocal, "local.set") - CASE_OP(TeeLocal, "local.tee") - CASE_OP(GetGlobal, "global.get") - CASE_OP(SetGlobal, "global.set") + CASE_OP(LocalGet, "local.get") + CASE_OP(LocalSet, "local.set") + CASE_OP(LocalTee, "local.tee") + CASE_OP(GlobalGet, "global.get") + CASE_OP(GlobalSet, "global.set") CASE_OP(TableGet, "table.get") CASE_OP(TableSet, "table.set") CASE_ALL_OP(Const, "const") @@ -222,6 +222,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { CASE_SIMD_OP(Splat, "splat") CASE_SIMD_OP(Neg, "neg") CASE_F64x2_OP(Neg, "neg") + CASE_F64x2_OP(Sqrt, "sqrt") + CASE_F32x4_OP(Sqrt, "sqrt") CASE_I64x2_OP(Neg, "neg") CASE_SIMD_OP(Eq, "eq") CASE_F64x2_OP(Eq, "eq") @@ -272,7 +274,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { CASE_F32x4_OP(ReplaceLane, "replace_lane") CASE_I64x2_OP(ExtractLane, "extract_lane") CASE_I64x2_OP(ReplaceLane, "replace_lane") - CASE_SIMDI_OP(ExtractLane, "extract_lane") + CASE_I32x4_OP(ExtractLane, "extract_lane") + CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane") + CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane") CASE_SIMDI_OP(ReplaceLane, "replace_lane") CASE_SIGN_OP(SIMDI, Min, "min") CASE_SIGN_OP(I64x2, Min, "min") @@ -302,6 +306,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { CASE_S128_OP(Xor, "xor") CASE_S128_OP(Not, "not") CASE_S128_OP(Select, "select") + CASE_S8x16_OP(Swizzle, "swizzle") CASE_S8x16_OP(Shuffle, "shuffle") CASE_S1x2_OP(AnyTrue, "any_true") CASE_S1x2_OP(AllTrue, "all_true") @@ -311,6 +316,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { CASE_S1x8_OP(AllTrue, "all_true") CASE_S1x16_OP(AnyTrue, "any_true") CASE_S1x16_OP(AllTrue, "all_true") + CASE_F64x2_OP(Qfma, "qfma") + CASE_F64x2_OP(Qfms, "qfms") + CASE_F32x4_OP(Qfma, "qfma") + CASE_F32x4_OP(Qfms, "qfms") // Atomic operations. CASE_OP(AtomicNotify, "atomic.notify") @@ -489,7 +498,7 @@ constexpr const FunctionSig* kCachedSigs[] = { // gcc 4.7 - 4.9 has a bug which causes the constexpr attribute to get lost when // passing functions (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). Hence // encapsulate these constexpr functions in functors. -// TODO(clemensh): Remove this once we require gcc >= 5.0. +// TODO(clemensb): Remove this once we require gcc >= 5.0. struct GetShortOpcodeSigIndex { constexpr WasmOpcodeSig operator()(byte opcode) const { diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h index 0b19d7452c3..f37f7f05207 100644 --- a/chromium/v8/src/wasm/wasm-opcodes.h +++ b/chromium/v8/src/wasm/wasm-opcodes.h @@ -48,11 +48,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); V(Drop, 0x1a, _) \ V(Select, 0x1b, _) \ V(SelectWithType, 0x1c, _) \ - V(GetLocal, 0x20, _) \ - V(SetLocal, 0x21, _) \ - V(TeeLocal, 0x22, _) \ - V(GetGlobal, 0x23, _) \ - V(SetGlobal, 0x24, _) \ + V(LocalGet, 0x20, _) \ + V(LocalSet, 0x21, _) \ + V(LocalTee, 0x22, _) \ + V(GlobalGet, 0x23, _) \ + V(GlobalSet, 0x24, _) \ V(TableGet, 0x25, _) \ V(TableSet, 0x26, _) \ V(I32Const, 0x41, _) \ @@ -396,8 +396,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); V(I64x2MaxU, 0xfd91, s_ss) \ V(F32x4Abs, 0xfd95, s_s) \ V(F32x4Neg, 0xfd96, s_s) \ - V(F32x4RecipApprox, 0xfd98, s_s) \ - V(F32x4RecipSqrtApprox, 0xfd99, s_s) \ + V(F32x4Sqrt, 0xfd97, s_s) \ + V(F32x4Qfma, 0xfd98, s_sss) \ + V(F32x4Qfms, 0xfd99, s_sss) \ V(F32x4Add, 0xfd9a, s_ss) \ V(F32x4Sub, 0xfd9b, s_ss) \ V(F32x4Mul, 0xfd9c, s_ss) \ @@ -406,6 +407,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); V(F32x4Max, 0xfd9f, s_ss) \ V(F64x2Abs, 0xfda0, s_s) \ V(F64x2Neg, 0xfda1, s_s) \ + V(F64x2Sqrt, 0xfda2, s_s) \ + V(F64x2Qfma, 0xfda3, s_sss) \ + V(F64x2Qfms, 0xfda4, s_sss) \ V(F64x2Add, 0xfda5, s_ss) \ V(F64x2Sub, 0xfda6, s_ss) \ V(F64x2Mul, 0xfda7, s_ss) \ @@ -416,6 +420,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); V(I32x4UConvertF32x4, 0xfdac, s_s) \ V(F32x4SConvertI32x4, 0xfdaf, s_s) \ V(F32x4UConvertI32x4, 0xfdb0, s_s) \ + V(S8x16Swizzle, 0xfdc0, s_ss) \ V(I8x16SConvertI16x8, 0xfdc6, s_ss) \ V(I8x16UConvertI16x8, 0xfdc7, s_ss) \ V(I16x8SConvertI32x4, 0xfdc8, s_ss) \ @@ -430,11 +435,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); V(I32x4UConvertI16x8High, 0xfdd1, s_s) \ V(I16x8AddHoriz, 0xfdbd, s_ss) \ V(I32x4AddHoriz, 0xfdbe, s_ss) \ - V(F32x4AddHoriz, 0xfdbf, s_ss) + V(F32x4AddHoriz, 0xfdbf, s_ss) \ + V(F32x4RecipApprox, 0xfde0, s_s) \ + V(F32x4RecipSqrtApprox, 0xfde1, s_s) #define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \ - V(I8x16ExtractLane, 0xfd05, _) \ - V(I16x8ExtractLane, 0xfd09, _) \ + V(I8x16ExtractLaneS, 0xfd05, _) \ + V(I8x16ExtractLaneU, 0xfd06, _) \ + V(I16x8ExtractLaneS, 0xfd09, _) \ + V(I16x8ExtractLaneU, 0xfd0a, _) \ V(I32x4ExtractLane, 0xfd0d, _) \ V(I64x2ExtractLane, 0xfd10, _) \ V(F32x4ExtractLane, 0xfd13, _) \ diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc index 81460b9fe29..f1fa76b98a8 100644 --- a/chromium/v8/src/wasm/wasm-serialization.cc +++ b/chromium/v8/src/wasm/wasm-serialization.cc @@ -289,9 +289,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer { Vector<WasmCode* const> code_table_; bool write_called_; - // Reverse lookup tables for embedded addresses. - std::map<Address, uint32_t> wasm_stub_targets_lookup_; - DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer); }; @@ -301,11 +298,6 @@ NativeModuleSerializer::NativeModuleSerializer( DCHECK_NOT_NULL(native_module_); // TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist // the unique ones, i.e. the cache. - for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) { - Address addr = native_module_->runtime_stub_entry( - static_cast<WasmCode::RuntimeStubId>(i)); - wasm_stub_targets_lookup_.insert(std::make_pair(addr, i)); - } } size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const { @@ -367,7 +359,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { writer->WriteVector(code->source_positions()); writer->WriteVector(Vector<byte>::cast(code->protected_instructions())); #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \ - V8_TARGET_ARCH_PPC + V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390X // On platforms that don't support misaligned word stores, copy to an aligned // buffer if necessary so we can relocate the serialized code. std::unique_ptr<byte[]> aligned_buffer; @@ -400,10 +392,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { SetWasmCalleeTag(iter.rinfo(), tag); } break; case RelocInfo::WASM_STUB_CALL: { - Address orig_target = orig_iter.rinfo()->wasm_stub_call_address(); - auto stub_iter = wasm_stub_targets_lookup_.find(orig_target); - DCHECK(stub_iter != wasm_stub_targets_lookup_.end()); - uint32_t tag = stub_iter->second; + Address target = orig_iter.rinfo()->wasm_stub_call_address(); + uint32_t tag = native_module_->GetRuntimeStubId(target); + DCHECK_GT(WasmCode::kRuntimeStubCount, tag); SetWasmCalleeTag(iter.rinfo(), tag); } break; case RelocInfo::EXTERNAL_REFERENCE: { @@ -550,6 +541,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); + auto jump_tables_ref = + native_module_->FindJumpTablesForCode(code->instruction_start()); for (RelocIterator iter(code->instructions(), code->reloc_info(), code->constant_pool(), mask); !iter.done(); iter.next()) { @@ -557,15 +550,16 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { switch (mode) { case RelocInfo::WASM_CALL: { uint32_t tag = GetWasmCalleeTag(iter.rinfo()); - Address target = native_module_->GetCallTargetForFunction(tag); + Address target = + native_module_->GetNearCallTargetForFunction(tag, jump_tables_ref); iter.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH); break; } case RelocInfo::WASM_STUB_CALL: { uint32_t tag = GetWasmCalleeTag(iter.rinfo()); DCHECK_LT(tag, WasmCode::kRuntimeStubCount); - Address target = native_module_->runtime_stub_entry( - static_cast<WasmCode::RuntimeStubId>(tag)); + Address target = native_module_->GetNearRuntimeStubEntry( + static_cast<WasmCode::RuntimeStubId>(tag), jump_tables_ref); iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH); break; } @@ -628,7 +622,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( auto shared_native_module = isolate->wasm_engine()->NewNativeModule( isolate, enabled_features, std::move(decode_result.value())); shared_native_module->SetWireBytes(OwnedVector<uint8_t>::Of(wire_bytes_vec)); - shared_native_module->SetRuntimeStubs(isolate); Handle<FixedArray> export_wrappers; CompileJsToWasmWrappers(isolate, shared_native_module->module(), diff --git a/chromium/v8/src/wasm/wasm-text.cc b/chromium/v8/src/wasm/wasm-text.cc index 44abd714459..fedd37ccd38 100644 --- a/chromium/v8/src/wasm/wasm-text.cc +++ b/chromium/v8/src/wasm/wasm-text.cc @@ -154,9 +154,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes, os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index; break; } - case kExprGetLocal: - case kExprSetLocal: - case kExprTeeLocal: { + case kExprLocalGet: + case kExprLocalSet: + case kExprLocalTee: { LocalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc()); os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index; break; @@ -166,8 +166,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes, os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index; break; } - case kExprGetGlobal: - case kExprSetGlobal: { + case kExprGlobalGet: + case kExprGlobalSet: { GlobalIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc()); os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index; break; @@ -304,8 +304,10 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes, break; } - case kExprI8x16ExtractLane: - case kExprI16x8ExtractLane: + case kExprI8x16ExtractLaneS: + case kExprI8x16ExtractLaneU: + case kExprI16x8ExtractLaneS: + case kExprI16x8ExtractLaneU: case kExprI32x4ExtractLane: case kExprI64x2ExtractLane: case kExprF32x4ExtractLane: diff --git a/chromium/v8/src/zone/OWNERS b/chromium/v8/src/zone/OWNERS index 01c515ab90f..e4e653da5ba 100644 --- a/chromium/v8/src/zone/OWNERS +++ b/chromium/v8/src/zone/OWNERS @@ -1,3 +1,3 @@ -clemensh@chromium.org +clemensb@chromium.org sigurds@chromium.org verwaest@chromium.org |