diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2019-08-30 10:22:43 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2019-08-30 12:36:28 +0000 |
commit | 271a6c3487a14599023a9106329505597638d793 (patch) | |
tree | e040d58ffc86c1480b79ca8528020ca9ec919bf8 /chromium/v8/src/interpreter | |
parent | 7b2ffa587235a47d4094787d72f38102089f402a (diff) | |
download | qtwebengine-chromium-271a6c3487a14599023a9106329505597638d793.tar.gz |
BASELINE: Update Chromium to 77.0.3865.59
Change-Id: I1e89a5f3b009a9519a6705102ad65c92fe736f21
Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/interpreter')
18 files changed, 310 insertions, 115 deletions
diff --git a/chromium/v8/src/interpreter/OWNERS b/chromium/v8/src/interpreter/OWNERS index e985bda1027..254e6e60d1a 100644 --- a/chromium/v8/src/interpreter/OWNERS +++ b/chromium/v8/src/interpreter/OWNERS @@ -1,5 +1,3 @@ -set noparent - bmeurer@chromium.org leszeks@chromium.org mstarzinger@chromium.org diff --git a/chromium/v8/src/interpreter/bytecode-array-accessor.cc b/chromium/v8/src/interpreter/bytecode-array-accessor.cc index d7232fcd4c1..d460c1a45f7 100644 --- a/chromium/v8/src/interpreter/bytecode-array-accessor.cc +++ b/chromium/v8/src/interpreter/bytecode-array-accessor.cc @@ -14,15 +14,61 @@ namespace v8 { namespace internal { namespace interpreter { +namespace { + +class OnHeapBytecodeArray final : public AbstractBytecodeArray { + public: + explicit OnHeapBytecodeArray(Handle<BytecodeArray> bytecode_array) + : array_(bytecode_array) {} + + int length() const override { return array_->length(); } + + int parameter_count() const override { return array_->parameter_count(); } + + uint8_t get(int index) const override { return array_->get(index); } + + void set(int index, uint8_t value) override { + return array_->set(index, value); + } + + Address GetFirstBytecodeAddress() const override { + return array_->GetFirstBytecodeAddress(); + } + + Handle<Object> GetConstantAtIndex(int index, + Isolate* isolate) const override { + return handle(array_->constant_pool().get(index), isolate); + } + + bool IsConstantAtIndexSmi(int index) const override { + return array_->constant_pool().get(index).IsSmi(); + } + + Smi GetConstantAtIndexAsSmi(int index) const override { + return Smi::cast(array_->constant_pool().get(index)); + } + + private: + Handle<BytecodeArray> array_; +}; + +} // namespace + BytecodeArrayAccessor::BytecodeArrayAccessor( - Handle<BytecodeArray> bytecode_array, int initial_offset) - : bytecode_array_(bytecode_array), + std::unique_ptr<AbstractBytecodeArray> bytecode_array, int initial_offset) + : bytecode_array_(std::move(bytecode_array)), bytecode_offset_(initial_offset), operand_scale_(OperandScale::kSingle), prefix_offset_(0) { UpdateOperandScale(); } +BytecodeArrayAccessor::BytecodeArrayAccessor( + Handle<BytecodeArray> bytecode_array, int initial_offset) + : BytecodeArrayAccessor( + base::make_unique<OnHeapBytecodeArray>(bytecode_array), + initial_offset) {} + void BytecodeArrayAccessor::SetOffset(int offset) { bytecode_offset_ = offset; UpdateOperandScale(); @@ -33,12 +79,12 @@ void BytecodeArrayAccessor::ApplyDebugBreak() { // scaling prefix, which we can patch with the matching debug-break // variant. interpreter::Bytecode bytecode = - interpreter::Bytecodes::FromByte(bytecode_array_->get(bytecode_offset_)); + interpreter::Bytecodes::FromByte(bytecode_array()->get(bytecode_offset_)); if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return; interpreter::Bytecode debugbreak = interpreter::Bytecodes::GetDebugBreak(bytecode); - bytecode_array_->set(bytecode_offset_, - interpreter::Bytecodes::ToByte(debugbreak)); + bytecode_array()->set(bytecode_offset_, + interpreter::Bytecodes::ToByte(debugbreak)); } void BytecodeArrayAccessor::UpdateOperandScale() { @@ -197,13 +243,22 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand( static_cast<IntrinsicsHelper::IntrinsicId>(raw_id)); } -Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const { - return bytecode_array()->constant_pool().get(index); +Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex( + int index, Isolate* isolate) const { + return bytecode_array()->GetConstantAtIndex(index, isolate); } -Object BytecodeArrayAccessor::GetConstantForIndexOperand( - int operand_index) const { - return GetConstantAtIndex(GetIndexOperand(operand_index)); +bool BytecodeArrayAccessor::IsConstantAtIndexSmi(int index) const { + return bytecode_array()->IsConstantAtIndexSmi(index); +} + +Smi BytecodeArrayAccessor::GetConstantAtIndexAsSmi(int index) const { + return bytecode_array()->GetConstantAtIndexAsSmi(index); +} + +Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand( + int operand_index, Isolate* isolate) const { + return GetConstantAtIndex(GetIndexOperand(operand_index), isolate); } int BytecodeArrayAccessor::GetJumpTargetOffset() const { @@ -215,7 +270,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const { } return GetAbsoluteOffset(relative_offset); } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) { - Smi smi = Smi::cast(GetConstantForIndexOperand(0)); + Smi smi = GetConstantAtIndexAsSmi(GetIndexOperand(0)); return GetAbsoluteOffset(smi.value()); } else { UNREACHABLE(); @@ -315,19 +370,16 @@ bool JumpTableTargetOffsets::iterator::operator!=( } void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() { - if (table_offset_ >= table_end_) return; - - Object current = accessor_->GetConstantAtIndex(table_offset_); - while (!current.IsSmi()) { - DCHECK(current.IsTheHole()); + while (table_offset_ < table_end_ && + !accessor_->IsConstantAtIndexSmi(table_offset_)) { ++table_offset_; ++index_; - if (table_offset_ >= table_end_) break; - current = accessor_->GetConstantAtIndex(table_offset_); } + // Make sure we haven't reached the end of the table with a hole in current. - if (current.IsSmi()) { - current_ = Smi::cast(current); + if (table_offset_ < table_end_) { + DCHECK(accessor_->IsConstantAtIndexSmi(table_offset_)); + current_ = accessor_->GetConstantAtIndexAsSmi(table_offset_); } } diff --git a/chromium/v8/src/interpreter/bytecode-array-accessor.h b/chromium/v8/src/interpreter/bytecode-array-accessor.h index 91b6886204e..97278af7bd0 100644 --- a/chromium/v8/src/interpreter/bytecode-array-accessor.h +++ b/chromium/v8/src/interpreter/bytecode-array-accessor.h @@ -5,6 +5,7 @@ #ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_ #define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_ +#include "src/base/optional.h" #include "src/common/globals.h" #include "src/handles/handles.h" #include "src/interpreter/bytecode-register.h" @@ -64,8 +65,27 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final { int case_value_base_; }; +class V8_EXPORT_PRIVATE AbstractBytecodeArray { + public: + virtual int length() const = 0; + virtual int parameter_count() const = 0; + virtual uint8_t get(int index) const = 0; + virtual void set(int index, uint8_t value) = 0; + virtual Address GetFirstBytecodeAddress() const = 0; + + virtual Handle<Object> GetConstantAtIndex(int index, + Isolate* isolate) const = 0; + virtual bool IsConstantAtIndexSmi(int index) const = 0; + virtual Smi GetConstantAtIndexAsSmi(int index) const = 0; + + virtual ~AbstractBytecodeArray() = default; +}; + class V8_EXPORT_PRIVATE BytecodeArrayAccessor { public: + BytecodeArrayAccessor(std::unique_ptr<AbstractBytecodeArray> bytecode_array, + int initial_offset); + BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array, int initial_offset); @@ -78,8 +98,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor { int current_offset() const { return bytecode_offset_; } OperandScale current_operand_scale() const { return operand_scale_; } int current_prefix_offset() const { return prefix_offset_; } - const Handle<BytecodeArray>& bytecode_array() const { - return bytecode_array_; + AbstractBytecodeArray* bytecode_array() const { + return bytecode_array_.get(); } uint32_t GetFlagOperand(int operand_index) const; @@ -93,8 +113,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor { Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const; Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const; uint32_t GetNativeContextIndexOperand(int operand_index) const; - Object GetConstantAtIndex(int offset) const; - Object GetConstantForIndexOperand(int operand_index) const; + Handle<Object> GetConstantAtIndex(int offset, Isolate* isolate) const; + bool IsConstantAtIndexSmi(int offset) const; + Smi GetConstantAtIndexAsSmi(int offset) const; + Handle<Object> GetConstantForIndexOperand(int operand_index, + Isolate* isolate) const; // Returns the absolute offset of the branch target at the current bytecode. // It is an error to call this method if the bytecode is not for a jump or @@ -122,7 +145,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor { void UpdateOperandScale(); - Handle<BytecodeArray> bytecode_array_; + std::unique_ptr<AbstractBytecodeArray> bytecode_array_; int bytecode_offset_; OperandScale operand_scale_; int prefix_offset_; diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.cc b/chromium/v8/src/interpreter/bytecode-array-iterator.cc index b5823110070..0fc57f85b8f 100644 --- a/chromium/v8/src/interpreter/bytecode-array-iterator.cc +++ b/chromium/v8/src/interpreter/bytecode-array-iterator.cc @@ -11,6 +11,10 @@ namespace internal { namespace interpreter { BytecodeArrayIterator::BytecodeArrayIterator( + std::unique_ptr<AbstractBytecodeArray> bytecode_array) + : BytecodeArrayAccessor(std::move(bytecode_array), 0) {} + +BytecodeArrayIterator::BytecodeArrayIterator( Handle<BytecodeArray> bytecode_array) : BytecodeArrayAccessor(bytecode_array, 0) {} diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.h b/chromium/v8/src/interpreter/bytecode-array-iterator.h index 7ec9d1288ce..e6b58deadc4 100644 --- a/chromium/v8/src/interpreter/bytecode-array-iterator.h +++ b/chromium/v8/src/interpreter/bytecode-array-iterator.h @@ -14,7 +14,9 @@ namespace interpreter { class V8_EXPORT_PRIVATE BytecodeArrayIterator final : public BytecodeArrayAccessor { public: - explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array); + explicit BytecodeArrayIterator(std::unique_ptr<AbstractBytecodeArray> array); + + explicit BytecodeArrayIterator(Handle<BytecodeArray> array); void Advance(); bool done() const; diff --git a/chromium/v8/src/interpreter/bytecode-array-random-iterator.cc b/chromium/v8/src/interpreter/bytecode-array-random-iterator.cc index 4ed5ce5e7d5..93622328996 100644 --- a/chromium/v8/src/interpreter/bytecode-array-random-iterator.cc +++ b/chromium/v8/src/interpreter/bytecode-array-random-iterator.cc @@ -11,11 +11,21 @@ namespace internal { namespace interpreter { BytecodeArrayRandomIterator::BytecodeArrayRandomIterator( + std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone) + : BytecodeArrayAccessor(std::move(bytecode_array), 0), offsets_(zone) { + Initialize(); +} + +BytecodeArrayRandomIterator::BytecodeArrayRandomIterator( Handle<BytecodeArray> bytecode_array, Zone* zone) : BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) { + Initialize(); +} + +void BytecodeArrayRandomIterator::Initialize() { // Run forwards through the bytecode array to determine the offset of each // bytecode. - while (current_offset() < bytecode_array->length()) { + while (current_offset() < bytecode_array()->length()) { offsets_.push_back(current_offset()); SetOffset(current_offset() + current_bytecode_size()); } diff --git a/chromium/v8/src/interpreter/bytecode-array-random-iterator.h b/chromium/v8/src/interpreter/bytecode-array-random-iterator.h index 7d559ea1764..a3b69b70158 100644 --- a/chromium/v8/src/interpreter/bytecode-array-random-iterator.h +++ b/chromium/v8/src/interpreter/bytecode-array-random-iterator.h @@ -16,8 +16,10 @@ namespace interpreter { class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final : public BytecodeArrayAccessor { public: - explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, - Zone* zone); + BytecodeArrayRandomIterator( + std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone); + + BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone); BytecodeArrayRandomIterator& operator++() { ++current_index_; @@ -66,6 +68,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final ZoneVector<int> offsets_; int current_index_; + void Initialize(); void UpdateOffsetFromIndex(); DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator); diff --git a/chromium/v8/src/interpreter/bytecode-array-writer.cc b/chromium/v8/src/interpreter/bytecode-array-writer.cc index 3769eefda1f..3ecc5e1a892 100644 --- a/chromium/v8/src/interpreter/bytecode-array-writer.cc +++ b/chromium/v8/src/interpreter/bytecode-array-writer.cc @@ -334,8 +334,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location, // The jump fits within the range of an Imm16 operand, so cancel // the reservation and jump directly. constant_array_builder()->DiscardReservedEntry(OperandSize::kShort); - WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes), - static_cast<uint16_t>(delta)); + base::WriteUnalignedValue<uint16_t>( + reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(delta)); } else { // The jump does not fit within the range of an Imm16 operand, so // commit reservation putting the offset into the constant pool, @@ -344,8 +344,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location, OperandSize::kShort, Smi::FromInt(delta)); jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); - WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes), - static_cast<uint16_t>(entry)); + base::WriteUnalignedValue<uint16_t>( + reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(entry)); } DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder && bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder); @@ -359,8 +359,8 @@ void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location, Bytecodes::FromByte(bytecodes()->at(jump_location)))); constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad); uint8_t operand_bytes[4]; - WriteUnalignedUInt32(reinterpret_cast<Address>(operand_bytes), - static_cast<uint32_t>(delta)); + base::WriteUnalignedValue<uint32_t>(reinterpret_cast<Address>(operand_bytes), + static_cast<uint32_t>(delta)); size_t operand_location = jump_location + 1; DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder && bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder && diff --git a/chromium/v8/src/interpreter/bytecode-decoder.cc b/chromium/v8/src/interpreter/bytecode-decoder.cc index 6f2f9dda0d4..3a297b1ddf3 100644 --- a/chromium/v8/src/interpreter/bytecode-decoder.cc +++ b/chromium/v8/src/interpreter/bytecode-decoder.cc @@ -42,9 +42,11 @@ int32_t BytecodeDecoder::DecodeSignedOperand(Address operand_start, case OperandSize::kByte: return *reinterpret_cast<const int8_t*>(operand_start); case OperandSize::kShort: - return static_cast<int16_t>(ReadUnalignedUInt16(operand_start)); + return static_cast<int16_t>( + base::ReadUnalignedValue<uint16_t>(operand_start)); case OperandSize::kQuad: - return static_cast<int32_t>(ReadUnalignedUInt32(operand_start)); + return static_cast<int32_t>( + base::ReadUnalignedValue<uint32_t>(operand_start)); case OperandSize::kNone: UNREACHABLE(); } @@ -60,9 +62,9 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(Address operand_start, case OperandSize::kByte: return *reinterpret_cast<const uint8_t*>(operand_start); case OperandSize::kShort: - return ReadUnalignedUInt16(operand_start); + return base::ReadUnalignedValue<uint16_t>(operand_start); case OperandSize::kQuad: - return ReadUnalignedUInt32(operand_start); + return base::ReadUnalignedValue<uint32_t>(operand_start); case OperandSize::kNone: UNREACHABLE(); } diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc index 706580ac147..d3b27b4375f 100644 --- a/chromium/v8/src/interpreter/bytecode-generator.cc +++ b/chromium/v8/src/interpreter/bytecode-generator.cc @@ -915,6 +915,45 @@ class BytecodeGenerator::IteratorRecord final { Register next_; }; +namespace { + +// A map from property names to getter/setter pairs allocated in the zone that +// also provides a way of accessing the pairs in the order they were first +// added so that the generated bytecode is always the same. +class AccessorTable + : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors, + bool (*)(void*, void*), + ZoneAllocationPolicy> { + public: + explicit AccessorTable(Zone* zone) + : base::TemplateHashMap<Literal, ObjectLiteral::Accessors, + bool (*)(void*, void*), ZoneAllocationPolicy>( + Literal::Match, ZoneAllocationPolicy(zone)), + zone_(zone) {} + + Iterator lookup(Literal* literal) { + Iterator it = find(literal, true, ZoneAllocationPolicy(zone_)); + if (it->second == nullptr) { + it->second = new (zone_) ObjectLiteral::Accessors(); + ordered_accessors_.push_back({literal, it->second}); + } + return it; + } + + const std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>& + ordered_accessors() { + return ordered_accessors_; + } + + private: + std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>> + ordered_accessors_; + + Zone* zone_; +}; + +} // namespace + #ifdef DEBUG static bool IsInEagerLiterals( @@ -1354,7 +1393,8 @@ void BytecodeGenerator::VisitModuleNamespaceImports() { RegisterAllocationScope register_scope(this); Register module_request = register_allocator()->NewRegister(); - ModuleDescriptor* descriptor = closure_scope()->AsModuleScope()->module(); + SourceTextModuleDescriptor* descriptor = + closure_scope()->AsModuleScope()->module(); for (auto entry : descriptor->namespace_imports()) { builder() ->LoadLiteral(Smi::FromInt(entry->module_request)) @@ -2201,6 +2241,19 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement( } } +void BytecodeGenerator::BuildThrowPrivateMethodWriteError( + const AstRawString* name) { + RegisterAllocationScope register_scope(this); + RegisterList args = register_allocator()->NewRegisterList(2); + builder() + ->LoadLiteral(Smi::FromEnum(MessageTemplate::kInvalidPrivateMethodWrite)) + .StoreAccumulatorInRegister(args[0]) + .LoadLiteral(name) + .StoreAccumulatorInRegister(args[1]) + .CallRuntime(Runtime::kNewTypeError, args) + .Throw(); +} + void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) { RegisterList brand_args = register_allocator()->NewRegisterList(2); Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand(); @@ -2366,13 +2419,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { RegisterAllocationScope register_scope(this); Expression* property = expr->properties()->first()->value(); Register from_value = VisitForRegisterValue(property); - - BytecodeLabels clone_object(zone()); - builder()->JumpIfUndefined(clone_object.New()); - builder()->JumpIfNull(clone_object.New()); - builder()->ToObject(from_value); - - clone_object.Bind(builder()); int clone_index = feedback_index(feedback_spec()->AddCloneObjectSlot()); builder()->CloneObject(from_value, flags, clone_index); builder()->StoreAccumulatorInRegister(literal); @@ -2473,14 +2519,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { // Define accessors, using only a single call to the runtime for each pair of // corresponding getters and setters. - for (AccessorTable::Iterator it = accessor_table.begin(); - it != accessor_table.end(); ++it) { + for (auto accessors : accessor_table.ordered_accessors()) { RegisterAllocationScope inner_register_scope(this); RegisterList args = register_allocator()->NewRegisterList(5); builder()->MoveRegister(literal, args[0]); - VisitForRegisterValue(it->first, args[1]); - VisitObjectLiteralAccessor(literal, it->second->getter, args[2]); - VisitObjectLiteralAccessor(literal, it->second->setter, args[3]); + VisitForRegisterValue(accessors.first, args[1]); + VisitObjectLiteralAccessor(literal, accessors.second->getter, args[2]); + VisitObjectLiteralAccessor(literal, accessors.second->setter, args[3]); builder() ->LoadLiteral(Smi::FromInt(NONE)) .StoreAccumulatorInRegister(args[4]) @@ -3156,6 +3201,13 @@ BytecodeGenerator::AssignmentLhsData::NamedSuperProperty( } // static BytecodeGenerator::AssignmentLhsData +BytecodeGenerator::AssignmentLhsData::PrivateMethod(Register object, + const AstRawString* name) { + return AssignmentLhsData(PRIVATE_METHOD, nullptr, RegisterList(), object, + Register(), nullptr, name); +} +// static +BytecodeGenerator::AssignmentLhsData BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty( RegisterList super_property_args) { return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args, @@ -3185,6 +3237,13 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs( Register key = VisitForRegisterValue(property->key()); return AssignmentLhsData::KeyedProperty(object, key); } + case PRIVATE_METHOD: { + DCHECK(!property->IsSuperAccess()); + AccumulatorPreservingScope scope(this, accumulator_preserving_mode); + Register object = VisitForRegisterValue(property->obj()); + const AstRawString* name = property->key()->AsVariableProxy()->raw_name(); + return AssignmentLhsData::PrivateMethod(object, name); + } case NAMED_SUPER_PROPERTY: { AccumulatorPreservingScope scope(this, accumulator_preserving_mode); RegisterList super_property_args = @@ -3219,15 +3278,16 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs( // Build the iteration finalizer called in the finally block of an iteration // protocol execution. This closes the iterator if needed, and suppresses any -// exception it throws if necessary. +// exception it throws if necessary, including the exception when the return +// method is not callable. // // In pseudo-code, this builds: // // if (!done) { // let method = iterator.return // if (method !== null && method !== undefined) { -// if (typeof(method) !== "function") throw TypeError // try { +// if (typeof(method) !== "function") throw TypeError // let return_val = method.call(iterator) // if (!%IsObject(return_val)) throw TypeError // } catch (e) { @@ -3259,33 +3319,35 @@ void BytecodeGenerator::BuildFinalizeIteration( .JumpIfUndefined(iterator_is_done.New()) .JumpIfNull(iterator_is_done.New()); - // if (typeof(method) !== "function") throw TypeError - BytecodeLabel if_callable; - builder() - ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction) - .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable); - { - // throw %NewTypeError(kReturnMethodNotCallable) - RegisterAllocationScope register_scope(this); - RegisterList new_type_error_args = register_allocator()->NewRegisterList(2); - builder() - ->LoadLiteral(Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable)) - .StoreAccumulatorInRegister(new_type_error_args[0]) - .LoadLiteral(ast_string_constants()->empty_string()) - .StoreAccumulatorInRegister(new_type_error_args[1]) - .CallRuntime(Runtime::kNewTypeError, new_type_error_args) - .Throw(); - } - builder()->Bind(&if_callable); - { RegisterAllocationScope register_scope(this); BuildTryCatch( // try { + // if (typeof(method) !== "function") throw TypeError // let return_val = method.call(iterator) // if (!%IsObject(return_val)) throw TypeError // } [&]() { + BytecodeLabel if_callable; + builder() + ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction) + .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable); + { + // throw %NewTypeError(kReturnMethodNotCallable) + RegisterAllocationScope register_scope(this); + RegisterList new_type_error_args = + register_allocator()->NewRegisterList(2); + builder() + ->LoadLiteral( + Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable)) + .StoreAccumulatorInRegister(new_type_error_args[0]) + .LoadLiteral(ast_string_constants()->empty_string()) + .StoreAccumulatorInRegister(new_type_error_args[1]) + .CallRuntime(Runtime::kNewTypeError, new_type_error_args) + .Throw(); + } + builder()->Bind(&if_callable); + RegisterList args(iterator.object()); builder()->CallProperty( method, args, feedback_index(feedback_spec()->AddCallICSlot())); @@ -3736,6 +3798,10 @@ void BytecodeGenerator::BuildAssignment( lhs_data.super_property_args()); break; } + case PRIVATE_METHOD: { + BuildThrowPrivateMethodWriteError(lhs_data.name()); + break; + } } } @@ -3781,6 +3847,10 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) { lhs_data.super_property_args().Truncate(3)); break; } + case PRIVATE_METHOD: { + BuildThrowPrivateMethodWriteError(lhs_data.name()); + break; + } } BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation(); FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot(); @@ -4238,6 +4308,23 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) { case KEYED_SUPER_PROPERTY: VisitKeyedSuperPropertyLoad(property, Register::invalid_value()); break; + case PRIVATE_METHOD: { + Variable* private_name = property->key()->AsVariableProxy()->var(); + + // Perform the brand check. + DCHECK(private_name->requires_brand_check()); + ClassScope* scope = private_name->scope()->AsClassScope(); + Variable* brand = scope->brand(); + BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided); + builder()->SetExpressionPosition(property); + builder()->LoadKeyedProperty( + obj, feedback_index(feedback_spec()->AddKeyedLoadICSlot())); + + // In the case of private methods, property->key() is the function to be + // loaded (stored in a context slot), so load this directly. + VisitForAccumulatorValue(property->key()); + break; + } } } @@ -4342,7 +4429,8 @@ void BytecodeGenerator::VisitCall(Call* expr) { // the semantics of the underlying call type. switch (call_type) { case Call::NAMED_PROPERTY_CALL: - case Call::KEYED_PROPERTY_CALL: { + case Call::KEYED_PROPERTY_CALL: + case Call::PRIVATE_CALL: { Property* property = callee_expr->AsProperty(); VisitAndPushIntoRegisterList(property->obj(), &args); VisitPropertyLoadForRegister(args.last_register(), property, callee); @@ -4678,6 +4766,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) { // Delete of an object property is allowed both in sloppy // and strict modes. Property* property = expr->AsProperty(); + DCHECK(!property->IsPrivateReference()); Register object = VisitForRegisterValue(property->obj()); VisitForAccumulatorValue(property->key()); builder()->Delete(object, language_mode()); @@ -4785,6 +4874,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args); break; } + case PRIVATE_METHOD: { + BuildThrowPrivateMethodWriteError( + property->key()->AsVariableProxy()->raw_name()); + break; + } } // Save result for postfix expressions. @@ -4851,6 +4945,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) { .CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args); break; } + case PRIVATE_METHOD: { + BuildThrowPrivateMethodWriteError( + property->key()->AsVariableProxy()->raw_name()); + break; + } } // Restore old value for postfix expressions. diff --git a/chromium/v8/src/interpreter/bytecode-generator.h b/chromium/v8/src/interpreter/bytecode-generator.h index dda8b15c804..b754d2c296c 100644 --- a/chromium/v8/src/interpreter/bytecode-generator.h +++ b/chromium/v8/src/interpreter/bytecode-generator.h @@ -84,6 +84,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { Register object, const AstRawString* name); static AssignmentLhsData KeyedProperty(Register object, Register key); + static AssignmentLhsData PrivateMethod(Register object, + const AstRawString* name); static AssignmentLhsData NamedSuperProperty( RegisterList super_property_args); static AssignmentLhsData KeyedSuperProperty( @@ -99,15 +101,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { return object_expr_; } Register object() const { - DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY); + DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY || + assign_type_ == PRIVATE_METHOD); return object_; } Register key() const { - DCHECK_EQ(assign_type_, KEYED_PROPERTY); + DCHECK(assign_type_ == KEYED_PROPERTY); return key_; } const AstRawString* name() const { - DCHECK_EQ(assign_type_, NAMED_PROPERTY); + DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == PRIVATE_METHOD); return name_; } RegisterList super_property_args() const { @@ -135,7 +138,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { // // NON_PROPERTY: expr // NAMED_PROPERTY: object_expr, object, name - // KEYED_PROPERTY: object, key + // KEYED_PROPERTY, PRIVATE_METHOD: object, key // NAMED_SUPER_PROPERTY: super_property_args // KEYED_SUPER_PROPERT: super_property_args Expression* expr_; @@ -238,8 +241,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { // Build jump to targets[value], where // start_index <= value < start_index + size. - void BuildIndexedJump(Register value, size_t start_index, size_t size, - ZoneVector<BytecodeLabel>& targets); + void BuildIndexedJump( + Register value, size_t start_index, size_t size, + ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references) void BuildNewLocalActivationContext(); void BuildLocalActivationContextInitialization(); @@ -291,6 +295,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> { void VisitArgumentsObject(Variable* variable); void VisitRestArgumentsArray(Variable* rest); void VisitCallSuper(Call* call); + void BuildThrowPrivateMethodWriteError(const AstRawString* name); void BuildPrivateClassMemberNameAssignment(ClassLiteral::Property* property); void BuildClassLiteral(ClassLiteral* expr, Register name); void VisitClassLiteral(ClassLiteral* expr, Register name); diff --git a/chromium/v8/src/interpreter/handler-table-builder.h b/chromium/v8/src/interpreter/handler-table-builder.h index db7ed750ddb..66b8d1f937a 100644 --- a/chromium/v8/src/interpreter/handler-table-builder.h +++ b/chromium/v8/src/interpreter/handler-table-builder.h @@ -5,9 +5,10 @@ #ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_ #define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_ -#include "src/execution/frames.h" +#include "src/codegen/handler-table.h" #include "src/interpreter/bytecode-register.h" #include "src/interpreter/bytecodes.h" +#include "src/objects/fixed-array.h" #include "src/zone/zone-containers.h" namespace v8 { diff --git a/chromium/v8/src/interpreter/interpreter-assembler.cc b/chromium/v8/src/interpreter/interpreter-assembler.cc index 0af58b674fa..7291ea1c35d 100644 --- a/chromium/v8/src/interpreter/interpreter-assembler.cc +++ b/chromium/v8/src/interpreter/interpreter-assembler.cc @@ -1265,7 +1265,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) { // Make sure we include the current bytecode in the budget calculation. TNode<Int32T> budget_after_bytecode = - Signed(Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()))); + Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize())); Label done(this); TVARIABLE(Int32T, new_budget); @@ -1501,9 +1501,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { UpdateInterruptBudget(profiling_weight, true); } -Node* InterpreterAssembler::LoadOSRNestingLevel() { +Node* InterpreterAssembler::LoadOsrNestingLevel() { return LoadObjectField(BytecodeArrayTaggedPointer(), - BytecodeArray::kOSRNestingLevelOffset, + BytecodeArray::kOsrNestingLevelOffset, MachineType::Int8()); } diff --git a/chromium/v8/src/interpreter/interpreter-assembler.h b/chromium/v8/src/interpreter/interpreter-assembler.h index db4523b7440..a135eaacdd1 100644 --- a/chromium/v8/src/interpreter/interpreter-assembler.h +++ b/chromium/v8/src/interpreter/interpreter-assembler.h @@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { void UpdateInterruptBudgetOnReturn(); // Returns the OSR nesting level from the bytecode header. - compiler::Node* LoadOSRNestingLevel(); + compiler::Node* LoadOsrNestingLevel(); // Dispatch to the bytecode. compiler::Node* Dispatch(); diff --git a/chromium/v8/src/interpreter/interpreter-generator.cc b/chromium/v8/src/interpreter/interpreter-generator.cc index 852aae4482c..00ce8eaf689 100644 --- a/chromium/v8/src/interpreter/interpreter-generator.cc +++ b/chromium/v8/src/interpreter/interpreter-generator.cc @@ -21,9 +21,9 @@ #include "src/interpreter/interpreter-intrinsics-generator.h" #include "src/objects/cell.h" #include "src/objects/js-generator.h" -#include "src/objects/module.h" #include "src/objects/objects-inl.h" #include "src/objects/oddball.h" +#include "src/objects/source-text-module.h" #include "src/utils/ostreams.h" namespace v8 { @@ -512,17 +512,18 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { // Load receiver. Node* recv = LoadRegisterAtOperandIndex(0); - // Load the name. - // TODO(jgruber): Not needed for monomorphic smi handler constant/field case. - Node* name = LoadConstantPoolEntryAtOperandIndex(1); - Node* context = GetContext(); + // Load the name and context lazily. + LazyNode<Name> name = [=] { + return CAST(LoadConstantPoolEntryAtOperandIndex(1)); + }; + LazyNode<Context> context = [=] { return CAST(GetContext()); }; Label done(this); Variable var_result(this, MachineRepresentation::kTagged); ExitPoint exit_point(this, &done, &var_result); - AccessorAssembler::LoadICParameters params(context, recv, name, smi_slot, - feedback_vector); + AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot, + feedback_vector); AccessorAssembler accessor_asm(state()); accessor_asm.LoadIC_BytecodeHandler(¶ms, &exit_point); @@ -735,7 +736,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { BIND(&if_export); { TNode<FixedArray> regular_exports = - CAST(LoadObjectField(module, Module::kRegularExportsOffset)); + CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset)); // The actual array index is (cell_index - 1). Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1)); Node* cell = LoadFixedArrayElement(regular_exports, export_index); @@ -746,7 +747,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { BIND(&if_import); { TNode<FixedArray> regular_imports = - CAST(LoadObjectField(module, Module::kRegularImportsOffset)); + CAST(LoadObjectField(module, SourceTextModule::kRegularImportsOffset)); // The actual array index is (-cell_index - 1). Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index); Node* cell = LoadFixedArrayElement(regular_imports, import_index); @@ -777,7 +778,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) { BIND(&if_export); { TNode<FixedArray> regular_exports = - CAST(LoadObjectField(module, Module::kRegularExportsOffset)); + CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset)); // The actual array index is (cell_index - 1). Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1)); Node* cell = LoadFixedArrayElement(regular_exports, export_index); @@ -2336,7 +2337,7 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) { IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { Node* relative_jump = BytecodeOperandUImmWord(0); Node* loop_depth = BytecodeOperandImm(1); - Node* osr_level = LoadOSRNestingLevel(); + Node* osr_level = LoadOsrNestingLevel(); // Check if OSR points at the given {loop_depth} are armed by comparing it to // the current {osr_level} loaded from the header of the BytecodeArray. diff --git a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc index 19d17baa523..d5818023402 100644 --- a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc +++ b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc @@ -12,9 +12,9 @@ #include "src/interpreter/interpreter-assembler.h" #include "src/interpreter/interpreter-intrinsics.h" #include "src/objects/js-generator.h" -#include "src/objects/module.h" -#include "src/utils/allocation.h" #include "src/objects/objects-inl.h" +#include "src/objects/source-text-module.h" +#include "src/utils/allocation.h" namespace v8 { namespace internal { @@ -324,7 +324,7 @@ Node* IntrinsicsGenerator::GetImportMetaObject( Node* const module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Node* const import_meta = - __ LoadObjectField(module, Module::kImportMetaOffset); + __ LoadObjectField(module, SourceTextModule::kImportMetaOffset); InterpreterAssembler::Variable return_value(assembler_, MachineRepresentation::kTagged); diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc index 9e06d95fded..eb91ae06a41 100644 --- a/chromium/v8/src/interpreter/interpreter.cc +++ b/chromium/v8/src/interpreter/interpreter.cc @@ -112,7 +112,7 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) { CHECK(code_entry == kNullAddress || InstructionStream::PcIsOffHeap(isolate_, code_entry)); } -#endif // ENABLE_SLOW_DCHECKS +#endif // DEBUG return; } @@ -230,12 +230,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl( return SUCCEEDED; } -UnoptimizedCompilationJob* Interpreter::NewCompilationJob( +std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator, std::vector<FunctionLiteral*>* eager_inner_literals) { - return new InterpreterCompilationJob(parse_info, literal, allocator, - eager_inner_literals); + return base::make_unique<InterpreterCompilationJob>( + parse_info, literal, allocator, eager_inner_literals); } void Interpreter::ForEachBytecode( @@ -290,14 +290,9 @@ bool Interpreter::IsDispatchTableInitialized() const { } const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) { -#ifdef ENABLE_DISASSEMBLER -#define RETURN_NAME(Name, ...) \ - if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code.entry()) { \ - return #Name; \ + if (code.kind() == Code::BYTECODE_HANDLER) { + return Builtins::name(code.builtin_index()); } - BYTECODE_LIST(RETURN_NAME) -#undef RETURN_NAME -#endif // ENABLE_DISASSEMBLER return nullptr; } diff --git a/chromium/v8/src/interpreter/interpreter.h b/chromium/v8/src/interpreter/interpreter.h index 8c6216b6a61..e8c494a6cec 100644 --- a/chromium/v8/src/interpreter/interpreter.h +++ b/chromium/v8/src/interpreter/interpreter.h @@ -43,7 +43,7 @@ class Interpreter { // Creates a compilation job which will generate bytecode for |literal|. // Additionally, if |eager_inner_literals| is not null, adds any eagerly // compilable inner FunctionLiterals to this list. - static UnoptimizedCompilationJob* NewCompilationJob( + static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator, std::vector<FunctionLiteral*>* eager_inner_literals); @@ -60,8 +60,8 @@ class Interpreter { // GC support. void IterateDispatchTable(RootVisitor* v); - // Disassembler support (only useful with ENABLE_DISASSEMBLER defined). - const char* LookupNameOfBytecodeHandler(const Code code); + // Disassembler support. + V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code); V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject(); |