diff options
Diffstat (limited to 'deps/v8/src/compiler')
77 files changed, 3846 insertions, 8701 deletions
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index b9e9293235..21f453f4d8 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -28,7 +28,7 @@ namespace compiler { namespace { -bool CanInlinePropertyAccess(Handle<Map> map, AccessMode access_mode) { +bool CanInlinePropertyAccess(MapRef map, AccessMode access_mode) { // We can inline property access to prototypes of all primitives, except // the special Oddball ones that have no wrapper counterparts (i.e. Null, // Undefined and TheHole). @@ -37,16 +37,17 @@ bool CanInlinePropertyAccess(Handle<Map> map, AccessMode access_mode) { // relationship between the map and the object (and therefore the property // dictionary). STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE); - if (map->IsBooleanMap()) return true; - if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true; - if (map->IsJSObjectMap()) { - if (map->is_dictionary_map()) { + if (map.object()->IsBooleanMap()) return true; + if (map.instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true; + if (map.object()->IsJSObjectMap()) { + if (map.is_dictionary_map()) { if (!V8_DICT_PROPERTY_CONST_TRACKING_BOOL) return false; - return access_mode == AccessMode::kLoad && map->is_prototype_map(); + return access_mode == AccessMode::kLoad && + map.object()->is_prototype_map(); } - return !map->has_named_interceptor() && + return !map.object()->has_named_interceptor() && // TODO(verwaest): Allowlist contexts to which we have access. - !map->is_access_check_needed(); + !map.is_access_check_needed(); } return false; } @@ -82,8 +83,8 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) { } ElementAccessInfo::ElementAccessInfo( - ZoneVector<Handle<Map>>&& lookup_start_object_maps, - ElementsKind elements_kind, Zone* zone) + ZoneVector<MapRef>&& lookup_start_object_maps, ElementsKind elements_kind, + Zone* zone) : elements_kind_(elements_kind), lookup_start_object_maps_(lookup_start_object_maps), transition_sources_(zone) { @@ -96,22 +97,25 @@ PropertyAccessInfo PropertyAccessInfo::Invalid(Zone* zone) { } // static -PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone, - Handle<Map> receiver_map, - MaybeHandle<JSObject> holder) { +PropertyAccessInfo PropertyAccessInfo::NotFound( + Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder) { return PropertyAccessInfo(zone, kNotFound, holder, {{receiver_map}, zone}); } // static PropertyAccessInfo PropertyAccessInfo::DataField( - Zone* zone, Handle<Map> receiver_map, + Zone* zone, MapRef receiver_map, ZoneVector<CompilationDependency const*>&& dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, - MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) { + Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map, + base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) { DCHECK_IMPLIES( field_representation.IsDouble(), - HasFieldRepresentationDependenciesOnMap(dependencies, field_owner_map)); + HasFieldRepresentationDependenciesOnMap( + dependencies, transition_map.has_value() + ? transition_map->object() + : holder.has_value() ? holder->map().object() + : receiver_map.object())); return PropertyAccessInfo(kDataField, holder, transition_map, field_index, field_representation, field_type, field_owner_map, field_map, {{receiver_map}, zone}, @@ -120,11 +124,11 @@ PropertyAccessInfo PropertyAccessInfo::DataField( // static PropertyAccessInfo PropertyAccessInfo::FastDataConstant( - Zone* zone, Handle<Map> receiver_map, + Zone* zone, MapRef receiver_map, ZoneVector<CompilationDependency const*>&& dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, - MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) { + Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map, + base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) { return PropertyAccessInfo(kFastDataConstant, holder, transition_map, field_index, field_representation, field_type, field_owner_map, field_map, {{receiver_map}, zone}, @@ -133,39 +137,38 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant( // static PropertyAccessInfo PropertyAccessInfo::FastAccessorConstant( - Zone* zone, Handle<Map> receiver_map, Handle<Object> constant, - MaybeHandle<JSObject> holder) { - return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant, - MaybeHandle<Name>(), {{receiver_map}, zone}); + Zone* zone, MapRef receiver_map, base::Optional<ObjectRef> constant, + base::Optional<JSObjectRef> holder) { + return PropertyAccessInfo(zone, kFastAccessorConstant, holder, constant, {}, + {{receiver_map}, zone}); } // static PropertyAccessInfo PropertyAccessInfo::ModuleExport(Zone* zone, - Handle<Map> receiver_map, - Handle<Cell> cell) { - return PropertyAccessInfo(zone, kModuleExport, MaybeHandle<JSObject>(), cell, - MaybeHandle<Name>{}, {{receiver_map}, zone}); + MapRef receiver_map, + CellRef cell) { + return PropertyAccessInfo(zone, kModuleExport, {}, cell, {}, + {{receiver_map}, zone}); } // static PropertyAccessInfo PropertyAccessInfo::StringLength(Zone* zone, - Handle<Map> receiver_map) { - return PropertyAccessInfo(zone, kStringLength, MaybeHandle<JSObject>(), - {{receiver_map}, zone}); + MapRef receiver_map) { + return PropertyAccessInfo(zone, kStringLength, {}, {{receiver_map}, zone}); } // static PropertyAccessInfo PropertyAccessInfo::DictionaryProtoDataConstant( - Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder, - InternalIndex dictionary_index, Handle<Name> name) { + Zone* zone, MapRef receiver_map, JSObjectRef holder, + InternalIndex dictionary_index, NameRef name) { return PropertyAccessInfo(zone, kDictionaryProtoDataConstant, holder, {{receiver_map}, zone}, dictionary_index, name); } // static PropertyAccessInfo PropertyAccessInfo::DictionaryProtoAccessorConstant( - Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder, - Handle<Object> constant, Handle<Name> property_name) { + Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder, + ObjectRef constant, NameRef property_name) { return PropertyAccessInfo(zone, kDictionaryProtoAccessorConstant, holder, constant, property_name, {{receiver_map}, zone}); } @@ -193,8 +196,8 @@ PropertyAccessInfo::PropertyAccessInfo(Zone* zone) dictionary_index_(InternalIndex::NotFound()) {} PropertyAccessInfo::PropertyAccessInfo( - Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - ZoneVector<Handle<Map>>&& lookup_start_object_maps) + Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + ZoneVector<MapRef>&& lookup_start_object_maps) : kind_(kind), lookup_start_object_maps_(lookup_start_object_maps), holder_(holder), @@ -204,9 +207,9 @@ PropertyAccessInfo::PropertyAccessInfo( dictionary_index_(InternalIndex::NotFound()) {} PropertyAccessInfo::PropertyAccessInfo( - Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - Handle<Object> constant, MaybeHandle<Name> property_name, - ZoneVector<Handle<Map>>&& lookup_start_object_maps) + Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + base::Optional<ObjectRef> constant, base::Optional<NameRef> name, + ZoneVector<MapRef>&& lookup_start_object_maps) : kind_(kind), lookup_start_object_maps_(lookup_start_object_maps), constant_(constant), @@ -215,15 +218,16 @@ PropertyAccessInfo::PropertyAccessInfo( field_representation_(Representation::None()), field_type_(Type::Any()), dictionary_index_(InternalIndex::NotFound()), - name_(property_name) { - DCHECK_IMPLIES(kind == kDictionaryProtoAccessorConstant, - !property_name.is_null()); + name_(name) { + DCHECK_IMPLIES(kind == kDictionaryProtoAccessorConstant, name.has_value()); } + PropertyAccessInfo::PropertyAccessInfo( - Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map, - FieldIndex field_index, Representation field_representation, - Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, - ZoneVector<Handle<Map>>&& lookup_start_object_maps, + Kind kind, base::Optional<JSObjectRef> holder, + base::Optional<MapRef> transition_map, FieldIndex field_index, + Representation field_representation, Type field_type, + MapRef field_owner_map, base::Optional<MapRef> field_map, + ZoneVector<MapRef>&& lookup_start_object_maps, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies) : kind_(kind), lookup_start_object_maps_(lookup_start_object_maps), @@ -236,14 +240,14 @@ PropertyAccessInfo::PropertyAccessInfo( field_owner_map_(field_owner_map), field_map_(field_map), dictionary_index_(InternalIndex::NotFound()) { - DCHECK_IMPLIES(!transition_map.is_null(), - field_owner_map.address() == transition_map.address()); + DCHECK_IMPLIES(transition_map.has_value(), + field_owner_map.equals(transition_map.value())); } PropertyAccessInfo::PropertyAccessInfo( - Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - ZoneVector<Handle<Map>>&& lookup_start_object_maps, - InternalIndex dictionary_index, Handle<Name> name) + Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + ZoneVector<MapRef>&& lookup_start_object_maps, + InternalIndex dictionary_index, NameRef name) : kind_(kind), lookup_start_object_maps_(lookup_start_object_maps), holder_(holder), @@ -262,14 +266,31 @@ MinimorphicLoadPropertyAccessInfo::MinimorphicLoadPropertyAccessInfo( field_representation_(field_representation), field_type_(field_type) {} +namespace { + +template <class RefT> +bool OptionalRefEquals(base::Optional<RefT> lhs, base::Optional<RefT> rhs) { + if (!lhs.has_value()) return !rhs.has_value(); + if (!rhs.has_value()) return false; + return lhs->equals(rhs.value()); +} + +template <class T> +void AppendVector(ZoneVector<T>* dst, const ZoneVector<T>& src) { + dst->insert(dst->end(), src.begin(), src.end()); +} + +} // namespace + bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, AccessMode access_mode, Zone* zone) { - if (this->kind_ != that->kind_) return false; - if (this->holder_.address() != that->holder_.address()) return false; + if (kind_ != that->kind_) return false; + if (!OptionalRefEquals(holder_, that->holder_)) return false; - switch (this->kind_) { + switch (kind_) { case kInvalid: - return that->kind_ == kInvalid; + DCHECK_EQ(that->kind_, kInvalid); + return true; case kDataField: case kFastDataConstant: { @@ -277,90 +298,70 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, // GetFieldAccessStubKey method here just like the ICs do // since that way we only compare the relevant bits of the // field indices). - if (this->field_index_.GetFieldAccessStubKey() == + if (field_index_.GetFieldAccessStubKey() != that->field_index_.GetFieldAccessStubKey()) { - switch (access_mode) { - case AccessMode::kHas: - case AccessMode::kLoad: { - if (!this->field_representation_.Equals( - that->field_representation_)) { - if (this->field_representation_.IsDouble() || - that->field_representation_.IsDouble()) { - return false; - } - this->field_representation_ = Representation::Tagged(); - } - if (this->field_map_.address() != that->field_map_.address()) { - this->field_map_ = MaybeHandle<Map>(); - } - break; - } - case AccessMode::kStore: - case AccessMode::kStoreInLiteral: { - // For stores, the field map and field representation information - // must match exactly, otherwise we cannot merge the stores. We - // also need to make sure that in case of transitioning stores, - // the transition targets match. - if (this->field_map_.address() != that->field_map_.address() || - !this->field_representation_.Equals( - that->field_representation_) || - this->transition_map_.address() != - that->transition_map_.address()) { + return false; + } + + switch (access_mode) { + case AccessMode::kHas: + case AccessMode::kLoad: { + if (!field_representation_.Equals(that->field_representation_)) { + if (field_representation_.IsDouble() || + that->field_representation_.IsDouble()) { return false; } - break; + field_representation_ = Representation::Tagged(); + } + if (!OptionalRefEquals(field_map_, that->field_map_)) { + field_map_ = {}; + } + break; + } + case AccessMode::kStore: + case AccessMode::kStoreInLiteral: { + // For stores, the field map and field representation information + // must match exactly, otherwise we cannot merge the stores. We + // also need to make sure that in case of transitioning stores, + // the transition targets match. + if (!OptionalRefEquals(field_map_, that->field_map_) || + !field_representation_.Equals(that->field_representation_) || + !OptionalRefEquals(transition_map_, that->transition_map_)) { + return false; } + break; } - this->field_type_ = - Type::Union(this->field_type_, that->field_type_, zone); - this->lookup_start_object_maps_.insert( - this->lookup_start_object_maps_.end(), - that->lookup_start_object_maps_.begin(), - that->lookup_start_object_maps_.end()); - this->unrecorded_dependencies_.insert( - this->unrecorded_dependencies_.end(), - that->unrecorded_dependencies_.begin(), - that->unrecorded_dependencies_.end()); - return true; } - return false; + + field_type_ = Type::Union(field_type_, that->field_type_, zone); + AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_); + AppendVector(&unrecorded_dependencies_, that->unrecorded_dependencies_); + return true; } case kDictionaryProtoAccessorConstant: case kFastAccessorConstant: { // Check if we actually access the same constant. - if (this->constant_.address() == that->constant_.address()) { - DCHECK(this->unrecorded_dependencies_.empty()); - DCHECK(that->unrecorded_dependencies_.empty()); - this->lookup_start_object_maps_.insert( - this->lookup_start_object_maps_.end(), - that->lookup_start_object_maps_.begin(), - that->lookup_start_object_maps_.end()); - return true; - } - return false; + if (!OptionalRefEquals(constant_, that->constant_)) return false; + + DCHECK(unrecorded_dependencies_.empty()); + DCHECK(that->unrecorded_dependencies_.empty()); + AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_); + return true; } case kDictionaryProtoDataConstant: { DCHECK_EQ(AccessMode::kLoad, access_mode); - if (this->dictionary_index_ == that->dictionary_index_) { - this->lookup_start_object_maps_.insert( - this->lookup_start_object_maps_.end(), - that->lookup_start_object_maps_.begin(), - that->lookup_start_object_maps_.end()); - return true; - } - return false; + if (dictionary_index_ != that->dictionary_index_) return false; + AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_); + return true; } case kNotFound: case kStringLength: { - DCHECK(this->unrecorded_dependencies_.empty()); + DCHECK(unrecorded_dependencies_.empty()); DCHECK(that->unrecorded_dependencies_.empty()); - this->lookup_start_object_maps_.insert( - this->lookup_start_object_maps_.end(), - that->lookup_start_object_maps_.begin(), - that->lookup_start_object_maps_.end()); + AppendVector(&lookup_start_object_maps_, that->lookup_start_object_maps_); return true; } case kModuleExport: @@ -369,10 +370,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, } ConstFieldInfo PropertyAccessInfo::GetConstFieldInfo() const { - if (IsFastDataConstant()) { - return ConstFieldInfo(field_owner_map_.ToHandleChecked()); - } - return ConstFieldInfo::None(); + return IsFastDataConstant() ? ConstFieldInfo(field_owner_map_->object()) + : ConstFieldInfo::None(); } AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, @@ -384,13 +383,9 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, zone_(zone) {} base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo( - Handle<Map> map, AccessMode access_mode) const { - // Check if it is safe to inline element access for the {map}. - base::Optional<MapRef> map_ref = TryMakeRef(broker(), map); - if (!map_ref.has_value()) return {}; - if (!CanInlineElementAccess(*map_ref)) return base::nullopt; - ElementsKind const elements_kind = map_ref->elements_kind(); - return ElementAccessInfo({{map}, zone()}, elements_kind, zone()); + MapRef map, AccessMode access_mode) const { + if (!CanInlineElementAccess(map)) return {}; + return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone()); } bool AccessInfoFactory::ComputeElementAccessInfos( @@ -412,13 +407,17 @@ bool AccessInfoFactory::ComputeElementAccessInfos( for (auto const& group : feedback.transition_groups()) { DCHECK(!group.empty()); - Handle<Map> target = group.front(); + base::Optional<MapRef> target = + MakeRefAssumeMemoryFence(broker(), group.front()); base::Optional<ElementAccessInfo> access_info = - ComputeElementAccessInfo(target, access_mode); + ComputeElementAccessInfo(target.value(), access_mode); if (!access_info.has_value()) return false; for (size_t i = 1; i < group.size(); ++i) { - access_info->AddTransitionSource(group[i]); + base::Optional<MapRef> map_ref = + MakeRefAssumeMemoryFence(broker(), group[i]); + if (!map_ref.has_value()) continue; + access_info->AddTransitionSource(map_ref.value()); } access_infos->push_back(*access_info); } @@ -426,11 +425,11 @@ bool AccessInfoFactory::ComputeElementAccessInfos( } PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( - Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder, + MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder, InternalIndex descriptor, AccessMode access_mode) const { DCHECK(descriptor.is_found()); - Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle( - map->instance_descriptors(kAcquireLoad)); + // TODO(jgruber,v8:7790): Use DescriptorArrayRef instead. + Handle<DescriptorArray> descriptors = map.instance_descriptors().object(); PropertyDetails const details = descriptors->GetDetails(descriptor); int index = descriptors->GetFieldIndex(descriptor); Representation details_representation = details.representation(); @@ -442,34 +441,31 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( // here and fall back to use the regular IC logic instead. return Invalid(); } - FieldIndex field_index = - FieldIndex::ForPropertyIndex(*map, index, details_representation); + FieldIndex field_index = FieldIndex::ForPropertyIndex(*map.object(), index, + details_representation); Type field_type = Type::NonInternal(); - MaybeHandle<Map> field_map; - - base::Optional<MapRef> map_ref = TryMakeRef(broker(), map); - if (!map_ref.has_value()) return Invalid(); + base::Optional<MapRef> field_map; ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone()); - if (!map_ref->TrySerializeOwnDescriptor(descriptor)) { - return Invalid(); - } + + Handle<FieldType> descriptors_field_type = + broker()->CanonicalPersistentHandle( + descriptors->GetFieldType(descriptor)); + base::Optional<ObjectRef> descriptors_field_type_ref = + TryMakeRef<Object>(broker(), descriptors_field_type); + if (!descriptors_field_type_ref.has_value()) return Invalid(); + if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); unrecorded_dependencies.push_back( - dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref, - descriptor)); + dependencies()->FieldRepresentationDependencyOffTheRecord( + map, descriptor, details_representation)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; unrecorded_dependencies.push_back( - dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref, - descriptor)); + dependencies()->FieldRepresentationDependencyOffTheRecord( + map, descriptor, details_representation)); } else if (details_representation.IsHeapObject()) { - // Extract the field type from the property details (make sure its - // representation is TaggedPointer to reflect the heap object case). - Handle<FieldType> descriptors_field_type = - broker()->CanonicalPersistentHandle( - descriptors->GetFieldType(descriptor)); if (descriptors_field_type->IsNone()) { // Store is not safe if the field type was cleared. if (access_mode == AccessMode::kStore) { @@ -480,16 +476,15 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( // about the contents now. } unrecorded_dependencies.push_back( - dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref, - descriptor)); + dependencies()->FieldRepresentationDependencyOffTheRecord( + map, descriptor, details_representation)); if (descriptors_field_type->IsClass()) { // Remember the field map, and try to infer a useful type. - Handle<Map> map = broker()->CanonicalPersistentHandle( - descriptors_field_type->AsClass()); - base::Optional<MapRef> maybe_ref = TryMakeRef(broker(), map); - if (!maybe_ref.has_value()) return Invalid(); - field_type = Type::For(*maybe_ref); - field_map = MaybeHandle<Map>(map); + base::Optional<MapRef> maybe_field_map = + TryMakeRef(broker(), descriptors_field_type->AsClass()); + if (!maybe_field_map.has_value()) return Invalid(); + field_type = Type::For(maybe_field_map.value()); + field_map = maybe_field_map; } } else { CHECK(details_representation.IsTagged()); @@ -497,63 +492,74 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( // TODO(turbofan): We may want to do this only depending on the use // of the access info. unrecorded_dependencies.push_back( - dependencies()->FieldTypeDependencyOffTheRecord(*map_ref, descriptor)); + dependencies()->FieldTypeDependencyOffTheRecord( + map, descriptor, descriptors_field_type_ref.value())); PropertyConstness constness; if (details.IsReadOnly() && !details.IsConfigurable()) { constness = PropertyConstness::kConst; } else { - constness = dependencies()->DependOnFieldConstness(*map_ref, descriptor); + constness = dependencies()->DependOnFieldConstness(map, descriptor); } - // TODO(v8:11670): Make FindFieldOwner and friends robust wrt concurrency. - Handle<Map> field_owner_map = broker()->CanonicalPersistentHandle( - map->FindFieldOwner(isolate(), descriptor)); + + // Note: FindFieldOwner may be called multiple times throughout one + // compilation. This is safe since its result is fixed for a given map and + // descriptor. + MapRef field_owner_map = map.FindFieldOwner(descriptor); + switch (constness) { case PropertyConstness::kMutable: return PropertyAccessInfo::DataField( zone(), receiver_map, std::move(unrecorded_dependencies), field_index, details_representation, field_type, field_owner_map, field_map, - holder); + holder, {}); + case PropertyConstness::kConst: return PropertyAccessInfo::FastDataConstant( zone(), receiver_map, std::move(unrecorded_dependencies), field_index, details_representation, field_type, field_owner_map, field_map, - holder); + holder, {}); } UNREACHABLE(); } namespace { + using AccessorsObjectGetter = std::function<Handle<Object>()>; PropertyAccessInfo AccessorAccessInfoHelper( Isolate* isolate, Zone* zone, JSHeapBroker* broker, - const AccessInfoFactory* ai_factory, Handle<Map> receiver_map, - Handle<Name> name, Handle<Map> map, MaybeHandle<JSObject> holder, - AccessMode access_mode, AccessorsObjectGetter get_accessors) { - if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) { - DCHECK(map->is_prototype_map()); + const AccessInfoFactory* ai_factory, MapRef receiver_map, NameRef name, + MapRef map, base::Optional<JSObjectRef> holder, AccessMode access_mode, + AccessorsObjectGetter get_accessors) { + if (map.instance_type() == JS_MODULE_NAMESPACE_TYPE) { + DCHECK(map.object()->is_prototype_map()); Handle<PrototypeInfo> proto_info = broker->CanonicalPersistentHandle( - PrototypeInfo::cast(map->prototype_info())); + PrototypeInfo::cast(map.object()->prototype_info())); Handle<JSModuleNamespace> module_namespace = broker->CanonicalPersistentHandle( JSModuleNamespace::cast(proto_info->module_namespace())); Handle<Cell> cell = broker->CanonicalPersistentHandle( Cell::cast(module_namespace->module().exports().Lookup( - isolate, name, Smi::ToInt(name->GetHash())))); + isolate, name.object(), Smi::ToInt(name.object()->GetHash())))); if (cell->value().IsTheHole(isolate)) { // This module has not been fully initialized yet. return PropertyAccessInfo::Invalid(zone); } - return PropertyAccessInfo::ModuleExport(zone, receiver_map, cell); + base::Optional<CellRef> cell_ref = TryMakeRef(broker, cell); + if (!cell_ref.has_value()) { + return PropertyAccessInfo::Invalid(zone); + } + return PropertyAccessInfo::ModuleExport(zone, receiver_map, + cell_ref.value()); } if (access_mode == AccessMode::kHas) { // kHas is not supported for dictionary mode objects. - DCHECK(!map->is_dictionary_map()); + DCHECK(!map.is_dictionary_map()); // HasProperty checks don't call getter/setters, existence is sufficient. - return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map, - Handle<Object>(), holder); + return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map, {}, + holder); } Handle<Object> maybe_accessors = get_accessors(); if (!maybe_accessors->IsAccessorPair()) { @@ -561,61 +567,74 @@ PropertyAccessInfo AccessorAccessInfoHelper( } Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_accessors); Handle<Object> accessor = broker->CanonicalPersistentHandle( - access_mode == AccessMode::kLoad ? accessors->getter() - : accessors->setter()); + access_mode == AccessMode::kLoad ? accessors->getter(kAcquireLoad) + : accessors->setter(kAcquireLoad)); - ObjectData* data = broker->TryGetOrCreateData(accessor); - if (data == nullptr) return PropertyAccessInfo::Invalid(zone); + base::Optional<ObjectRef> accessor_ref = TryMakeRef(broker, accessor); + if (!accessor_ref.has_value()) return PropertyAccessInfo::Invalid(zone); if (!accessor->IsJSFunction()) { CallOptimization optimization(broker->local_isolate_or_isolate(), accessor); if (!optimization.is_simple_api_call() || optimization.IsCrossContextLazyAccessorPair( - *broker->target_native_context().object(), *map)) { + *broker->target_native_context().object(), *map.object())) { return PropertyAccessInfo::Invalid(zone); } CallOptimization::HolderLookup lookup; - holder = broker->CanonicalPersistentHandle( + Handle<JSObject> holder_handle = broker->CanonicalPersistentHandle( optimization.LookupHolderOfExpectedType( - broker->local_isolate_or_isolate(), receiver_map, &lookup)); + broker->local_isolate_or_isolate(), receiver_map.object(), + &lookup)); if (lookup == CallOptimization::kHolderNotFound) { return PropertyAccessInfo::Invalid(zone); } DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver, - holder.is_null()); - DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null()); + holder_handle.is_null()); + DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, + !holder_handle.is_null()); + + if (holder_handle.is_null()) { + holder = {}; + } else { + holder = TryMakeRef(broker, holder_handle); + if (!holder.has_value()) return PropertyAccessInfo::Invalid(zone); + } } if (access_mode == AccessMode::kLoad) { - base::Optional<Name> maybe_cached_property_name = + base::Optional<Name> cached_property_name = FunctionTemplateInfo::TryGetCachedPropertyName(isolate, *accessor); - if (maybe_cached_property_name.has_value()) { - Handle<Name> cached_property_name = - broker->CanonicalPersistentHandle(maybe_cached_property_name.value()); - PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo( - map, cached_property_name, access_mode); - if (!access_info.IsInvalid()) return access_info; + if (cached_property_name.has_value()) { + base::Optional<NameRef> cached_property_name_ref = + TryMakeRef(broker, cached_property_name.value()); + if (cached_property_name_ref.has_value()) { + PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo( + map, cached_property_name_ref.value(), access_mode); + if (!access_info.IsInvalid()) return access_info; + } } } - if (map->is_dictionary_map()) { + + if (map.is_dictionary_map()) { return PropertyAccessInfo::DictionaryProtoAccessorConstant( - zone, receiver_map, holder, accessor, name); + zone, receiver_map, holder, accessor_ref.value(), name); } else { - return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map, - accessor, holder); + return PropertyAccessInfo::FastAccessorConstant( + zone, receiver_map, accessor_ref.value(), holder); } } } // namespace PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( - Handle<Map> receiver_map, Handle<Name> name, Handle<Map> holder_map, - MaybeHandle<JSObject> holder, InternalIndex descriptor, + MapRef receiver_map, NameRef name, MapRef holder_map, + base::Optional<JSObjectRef> holder, InternalIndex descriptor, AccessMode access_mode) const { DCHECK(descriptor.is_found()); Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle( - holder_map->instance_descriptors(kRelaxedLoad)); - SLOW_DCHECK(descriptor == descriptors->Search(*name, *holder_map)); + holder_map.object()->instance_descriptors(kRelaxedLoad)); + SLOW_DCHECK(descriptor == + descriptors->Search(*name.object(), *holder_map.object())); auto get_accessors = [&]() { return broker()->CanonicalPersistentHandle( @@ -627,11 +646,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( } PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo( - Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder, + MapRef receiver_map, NameRef name, JSObjectRef holder, InternalIndex dictionary_index, AccessMode access_mode, PropertyDetails details) const { CHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL); - DCHECK(holder->map().is_prototype_map()); + DCHECK(holder.map().object()->is_prototype_map()); DCHECK_EQ(access_mode, AccessMode::kLoad); // We can only inline accesses to constant properties. @@ -645,11 +664,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo( } auto get_accessors = [&]() { - return JSObject::DictionaryPropertyAt(isolate(), holder, dictionary_index); + return JSObject::DictionaryPropertyAt(isolate(), holder.object(), + dictionary_index); }; - Handle<Map> holder_map = broker()->CanonicalPersistentHandle(holder->map()); return AccessorAccessInfoHelper(isolate(), zone(), broker(), this, - receiver_map, name, holder_map, holder, + receiver_map, name, holder.map(), holder, access_mode, get_accessors); } @@ -668,15 +687,15 @@ MinimorphicLoadPropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( } bool AccessInfoFactory::TryLoadPropertyDetails( - Handle<Map> map, MaybeHandle<JSObject> maybe_holder, Handle<Name> name, + MapRef map, base::Optional<JSObjectRef> maybe_holder, NameRef name, InternalIndex* index_out, PropertyDetails* details_out) const { - if (map->is_dictionary_map()) { + if (map.is_dictionary_map()) { DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL); - DCHECK(map->is_prototype_map()); + DCHECK(map.object()->is_prototype_map()); DisallowGarbageCollection no_gc; - if (maybe_holder.is_null()) { + if (!maybe_holder.has_value()) { // TODO(v8:11457) In this situation, we have a dictionary mode prototype // as a receiver. Consider other means of obtaining the holder in this // situation. @@ -685,24 +704,24 @@ bool AccessInfoFactory::TryLoadPropertyDetails( return false; } - Handle<JSObject> holder = maybe_holder.ToHandleChecked(); + Handle<JSObject> holder = maybe_holder->object(); if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { SwissNameDictionary dict = holder->property_dictionary_swiss(); - *index_out = dict.FindEntry(isolate(), name); + *index_out = dict.FindEntry(isolate(), name.object()); if (index_out->is_found()) { *details_out = dict.DetailsAt(*index_out); } } else { NameDictionary dict = holder->property_dictionary(); - *index_out = dict.FindEntry(isolate(), name); + *index_out = dict.FindEntry(isolate(), name.object()); if (index_out->is_found()) { *details_out = dict.DetailsAt(*index_out); } } } else { - DescriptorArray descriptors = map->instance_descriptors(kAcquireLoad); - *index_out = - descriptors.Search(*name, *map, broker()->is_concurrent_inlining()); + DescriptorArray descriptors = *map.instance_descriptors().object(); + *index_out = descriptors.Search(*name.object(), *map.object(), + broker()->is_concurrent_inlining()); if (index_out->is_found()) { *details_out = descriptors.GetDetails(*index_out); } @@ -712,12 +731,17 @@ bool AccessInfoFactory::TryLoadPropertyDetails( } PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( - Handle<Map> map, Handle<Name> name, AccessMode access_mode) const { - CHECK(name->IsUniqueName()); + MapRef map, NameRef name, AccessMode access_mode) const { + CHECK(name.IsUniqueName()); + + // Dictionary property const tracking is unsupported when concurrent inlining + // is enabled. + CHECK_IMPLIES(V8_DICT_PROPERTY_CONST_TRACKING_BOOL, + !broker()->is_concurrent_inlining()); JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker()); - if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) { + if (access_mode == AccessMode::kHas && !map.object()->IsJSReceiverMap()) { return Invalid(); } @@ -737,8 +761,21 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( bool fast_mode_prototype_on_chain = false; // Remember the receiver map. We use {map} as loop variable. - Handle<Map> receiver_map = map; - MaybeHandle<JSObject> holder; + MapRef receiver_map = map; + base::Optional<JSObjectRef> holder; + + // Perform the implicit ToObject for primitives here. + // Implemented according to ES6 section 7.3.2 GetV (V, P). + // Note: Keep sync'd with + // CompilationDependencies::DependOnStablePrototypeChains. + if (receiver_map.IsPrimitiveMap()) { + base::Optional<JSFunctionRef> constructor = + broker()->target_native_context().GetConstructorFunction(receiver_map); + if (!constructor.has_value()) return Invalid(); + map = constructor->initial_map(broker()->dependencies()); + DCHECK(!map.IsPrimitiveMap()); + } + while (true) { PropertyDetails details = PropertyDetails::Empty(); InternalIndex index = InternalIndex::NotFound(); @@ -749,13 +786,12 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( if (index.is_found()) { if (access_mode == AccessMode::kStore || access_mode == AccessMode::kStoreInLiteral) { - DCHECK(!map->is_dictionary_map()); + DCHECK(!map.is_dictionary_map()); // Don't bother optimizing stores to read-only properties. - if (details.IsReadOnly()) { - return Invalid(); - } - if (details.kind() == kData && !holder.is_null()) { + if (details.IsReadOnly()) return Invalid(); + + if (details.kind() == kData && holder.has_value()) { // This is a store to a property not found on the receiver but on a // prototype. According to ES6 section 9.1.9 [[Set]], we need to // create a new data property on the receiver. We can still optimize @@ -763,7 +799,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( return LookupTransition(receiver_map, name, holder); } } - if (map->is_dictionary_map()) { + + if (map.is_dictionary_map()) { DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL); if (fast_mode_prototype_on_chain) { @@ -776,10 +813,10 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( } // TryLoadPropertyDetails only succeeds if we know the holder. - return ComputeDictionaryProtoAccessInfo(receiver_map, name, - holder.ToHandleChecked(), index, - access_mode, details); + return ComputeDictionaryProtoAccessInfo( + receiver_map, name, holder.value(), index, access_mode, details); } + if (dictionary_prototype_on_chain) { // If V8_DICT_PROPERTY_CONST_TRACKING_BOOL was disabled, then a // dictionary prototype would have caused a bailout earlier. @@ -817,12 +854,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( } // The property wasn't found on {map}. Look on the prototype if appropriate. + DCHECK(!index.is_found()); // Don't search on the prototype chain for special indices in case of // integer indexed exotic objects (see ES6 section 9.4.5). - if (map->IsJSTypedArrayMap() && name->IsString()) { + if (map.object()->IsJSTypedArrayMap() && name.IsString()) { if (broker()->IsMainThread()) { - if (IsSpecialIndex(String::cast(*name))) { + if (IsSpecialIndex(String::cast(*name.object()))) { return Invalid(); } } else { @@ -839,72 +877,67 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( } // Don't lookup private symbols on the prototype chain. - if (name->IsPrivate()) { + if (name.object()->IsPrivate()) { return Invalid(); } - if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !holder.is_null()) { + if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && holder.has_value()) { // At this point, we are past the first loop iteration. - DCHECK(holder.ToHandleChecked()->map().is_prototype_map()); - DCHECK_NE(holder.ToHandleChecked()->map(), *receiver_map); + DCHECK(holder->object()->map().is_prototype_map()); + DCHECK(!holder->map().equals(receiver_map)); fast_mode_prototype_on_chain = - fast_mode_prototype_on_chain || !map->is_dictionary_map(); + fast_mode_prototype_on_chain || !map.is_dictionary_map(); dictionary_prototype_on_chain = - dictionary_prototype_on_chain || map->is_dictionary_map(); + dictionary_prototype_on_chain || map.is_dictionary_map(); } // Walk up the prototype chain. - base::Optional<MapRef> map_ref = TryMakeRef(broker(), map); - if (!map_ref.has_value()) return Invalid(); - if (!map_ref->TrySerializePrototype()) return Invalid(); - - // Acquire synchronously the map's prototype's map to guarantee that every - // time we use it, we use the same Map. - Handle<Map> map_prototype_map = - broker()->CanonicalPersistentHandle(map->prototype().map(kAcquireLoad)); - if (!map_prototype_map->IsJSObjectMap()) { - // Perform the implicit ToObject for primitives here. - // Implemented according to ES6 section 7.3.2 GetV (V, P). - Handle<JSFunction> constructor; - base::Optional<JSFunction> maybe_constructor = - Map::GetConstructorFunction( - *map, *broker()->target_native_context().object()); - if (maybe_constructor.has_value()) { - map = broker()->CanonicalPersistentHandle( - maybe_constructor->initial_map()); - map_prototype_map = broker()->CanonicalPersistentHandle( - map->prototype().map(kAcquireLoad)); - DCHECK(map_prototype_map->IsJSObjectMap()); - } else if (map->prototype().IsNull()) { - if (dictionary_prototype_on_chain) { - // TODO(v8:11248) See earlier comment about - // dictionary_prototype_on_chain. We don't support absent properties - // with dictionary mode prototypes on the chain, either. This is again - // just due to how we currently deal with dependencies for dictionary - // properties during finalization. - return Invalid(); - } + if (!broker()->is_concurrent_inlining()) { + if (!map.TrySerializePrototype(NotConcurrentInliningTag{broker()})) { + return Invalid(); + } + } - // Store to property not found on the receiver or any prototype, we need - // to transition to a new data property. - // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver) - if (access_mode == AccessMode::kStore) { - return LookupTransition(receiver_map, name, holder); - } - // The property was not found (access returns undefined or throws - // depending on the language mode of the load operation. - // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver) - return PropertyAccessInfo::NotFound(zone(), receiver_map, holder); - } else { + // Load the map's prototype's map to guarantee that every time we use it, + // we use the same Map. + base::Optional<HeapObjectRef> prototype = map.prototype(); + if (!prototype.has_value()) return Invalid(); + + MapRef map_prototype_map = prototype->map(); + if (!map_prototype_map.object()->IsJSObjectMap()) { + // Don't allow proxies on the prototype chain. + if (!prototype->IsNull()) { + DCHECK(prototype->object()->IsJSProxy()); + return Invalid(); + } + + DCHECK(prototype->IsNull()); + + if (dictionary_prototype_on_chain) { + // TODO(v8:11248) See earlier comment about + // dictionary_prototype_on_chain. We don't support absent properties + // with dictionary mode prototypes on the chain, either. This is again + // just due to how we currently deal with dependencies for dictionary + // properties during finalization. return Invalid(); } + + // Store to property not found on the receiver or any prototype, we need + // to transition to a new data property. + // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver) + if (access_mode == AccessMode::kStore) { + return LookupTransition(receiver_map, name, holder); + } + + // The property was not found (access returns undefined or throws + // depending on the language mode of the load operation. + // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver) + return PropertyAccessInfo::NotFound(zone(), receiver_map, holder); } - holder = - broker()->CanonicalPersistentHandle(JSObject::cast(map->prototype())); + holder = prototype->AsJSObject(); map = map_prototype_map; - CHECK(!map->is_deprecated()); if (!CanInlinePropertyAccess(map, access_mode)) { return Invalid(); @@ -912,8 +945,12 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( // Successful lookup on prototype chain needs to guarantee that all the // prototypes up to the holder have stable maps, except for dictionary-mode - // prototypes. - CHECK_IMPLIES(!map->is_dictionary_map(), map->is_stable()); + // prototypes. We currently do this by taking a + // DependOnStablePrototypeChains dependency in the caller. + // + // TODO(jgruber): This is brittle and easy to miss. Consider a refactor + // that moves the responsibility of taking the dependency into + // AccessInfoFactory. } UNREACHABLE(); } @@ -932,15 +969,6 @@ PropertyAccessInfo AccessInfoFactory::FinalizePropertyAccessInfosAsOne( return Invalid(); } -void AccessInfoFactory::ComputePropertyAccessInfos( - MapHandles const& maps, Handle<Name> name, AccessMode access_mode, - ZoneVector<PropertyAccessInfo>* access_infos) const { - DCHECK(access_infos->empty()); - for (Handle<Map> map : maps) { - access_infos->push_back(ComputePropertyAccessInfo(map, name, access_mode)); - } -} - void PropertyAccessInfo::RecordDependencies( CompilationDependencies* dependencies) { for (CompilationDependency const* d : unrecorded_dependencies_) { @@ -1007,7 +1035,7 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind, base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad( ElementAccessFeedback const& feedback) const { - if (feedback.transition_groups().empty()) return base::nullopt; + if (feedback.transition_groups().empty()) return {}; DCHECK(!feedback.transition_groups().front().empty()); Handle<Map> first_map = feedback.transition_groups().front().front(); @@ -1016,20 +1044,20 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad( InstanceType instance_type = first_map_ref->instance_type(); ElementsKind elements_kind = first_map_ref->elements_kind(); - ZoneVector<Handle<Map>> maps(zone()); + ZoneVector<MapRef> maps(zone()); for (auto const& group : feedback.transition_groups()) { for (Handle<Map> map_handle : group) { base::Optional<MapRef> map = TryMakeRef(broker(), map_handle); if (!map.has_value()) return {}; if (map->instance_type() != instance_type || !CanInlineElementAccess(*map)) { - return base::nullopt; + return {}; } if (!GeneralizeElementsKind(elements_kind, map->elements_kind()) .To(&elements_kind)) { - return base::nullopt; + return {}; } - maps.push_back(map->object()); + maps.push_back(map.value()); } } @@ -1037,31 +1065,33 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad( } PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor( - Handle<Map> map, Handle<Name> name) const { + MapRef map, NameRef name) const { // Check for String::length field accessor. - if (map->IsStringMap()) { - if (Name::Equals(isolate(), name, isolate()->factory()->length_string())) { + if (map.object()->IsStringMap()) { + if (Name::Equals(isolate(), name.object(), + isolate()->factory()->length_string())) { return PropertyAccessInfo::StringLength(zone(), map); } return Invalid(); } // Check for special JSObject field accessors. FieldIndex field_index; - if (Accessors::IsJSObjectFieldAccessor(isolate(), map, name, &field_index)) { + if (Accessors::IsJSObjectFieldAccessor(isolate(), map.object(), name.object(), + &field_index)) { Type field_type = Type::NonInternal(); Representation field_representation = Representation::Tagged(); - if (map->IsJSArrayMap()) { - DCHECK( - Name::Equals(isolate(), isolate()->factory()->length_string(), name)); + if (map.object()->IsJSArrayMap()) { + DCHECK(Name::Equals(isolate(), isolate()->factory()->length_string(), + name.object())); // The JSArray::length property is a smi in the range // [0, FixedDoubleArray::kMaxLength] in case of fast double // elements, a smi in the range [0, FixedArray::kMaxLength] // in case of other fast elements, and [0, kMaxUInt32] in // case of other arrays. - if (IsDoubleElementsKind(map->elements_kind())) { + if (IsDoubleElementsKind(map.elements_kind())) { field_type = type_cache_->kFixedDoubleArrayLengthType; field_representation = Representation::Smi(); - } else if (IsFastElementsKind(map->elements_kind())) { + } else if (IsFastElementsKind(map.elements_kind())) { field_type = type_cache_->kFixedArrayLengthType; field_representation = Representation::Smi(); } else { @@ -1070,97 +1100,96 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor( } // Special fields are always mutable. return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index, - field_representation, field_type, map); + field_representation, field_type, map, + {}, {}, {}); } return Invalid(); } PropertyAccessInfo AccessInfoFactory::LookupTransition( - Handle<Map> map, Handle<Name> name, MaybeHandle<JSObject> holder) const { + MapRef map, NameRef name, base::Optional<JSObjectRef> holder) const { // Check if the {map} has a data transition with the given {name}. - Map transition = - TransitionsAccessor(isolate(), map, broker()->is_concurrent_inlining()) - .SearchTransition(*name, kData, NONE); - if (transition.is_null()) { - return Invalid(); - } - - Handle<Map> transition_map = broker()->CanonicalPersistentHandle(transition); - InternalIndex const number = transition_map->LastAdded(); - Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle( - transition_map->instance_descriptors(kAcquireLoad)); + Map transition = TransitionsAccessor(isolate(), map.object(), + broker()->is_concurrent_inlining()) + .SearchTransition(*name.object(), kData, NONE); + if (transition.is_null()) return Invalid(); + + base::Optional<MapRef> maybe_transition_map = + TryMakeRef(broker(), transition); + if (!maybe_transition_map.has_value()) return Invalid(); + MapRef transition_map = maybe_transition_map.value(); + + InternalIndex const number = transition_map.object()->LastAdded(); + Handle<DescriptorArray> descriptors = + transition_map.instance_descriptors().object(); PropertyDetails const details = descriptors->GetDetails(number); + // Don't bother optimizing stores to read-only properties. - if (details.IsReadOnly()) { - return Invalid(); - } + if (details.IsReadOnly()) return Invalid(); + // TODO(bmeurer): Handle transition to data constant? - if (details.location() != kField) { - return Invalid(); - } + if (details.location() != kField) return Invalid(); + int const index = details.field_index(); Representation details_representation = details.representation(); - FieldIndex field_index = FieldIndex::ForPropertyIndex(*transition_map, index, - details_representation); + FieldIndex field_index = FieldIndex::ForPropertyIndex( + *transition_map.object(), index, details_representation); Type field_type = Type::NonInternal(); - MaybeHandle<Map> field_map; - - base::Optional<MapRef> transition_map_ref = - TryMakeRef(broker(), transition_map); - if (!transition_map_ref.has_value()) return Invalid(); + base::Optional<MapRef> field_map; ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone()); if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); - if (!transition_map_ref->TrySerializeOwnDescriptor(number)) { - return Invalid(); - } unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord( - *transition_map_ref, number)); + transition_map, number, details_representation)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; - if (!transition_map_ref->TrySerializeOwnDescriptor(number)) { - return Invalid(); - } unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord( - *transition_map_ref, number)); + transition_map, number, details_representation)); } else if (details_representation.IsHeapObject()) { // Extract the field type from the property details (make sure its // representation is TaggedPointer to reflect the heap object case). + // TODO(jgruber,v8:7790): Use DescriptorArrayRef instead. Handle<FieldType> descriptors_field_type = broker()->CanonicalPersistentHandle(descriptors->GetFieldType(number)); + base::Optional<ObjectRef> descriptors_field_type_ref = + TryMakeRef<Object>(broker(), descriptors_field_type); + if (!descriptors_field_type_ref.has_value()) return Invalid(); + if (descriptors_field_type->IsNone()) { // Store is not safe if the field type was cleared. return Invalid(); } - if (!transition_map_ref->TrySerializeOwnDescriptor(number)) { - return Invalid(); - } unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord( - *transition_map_ref, number)); + transition_map, number, details_representation)); if (descriptors_field_type->IsClass()) { unrecorded_dependencies.push_back( - dependencies()->FieldTypeDependencyOffTheRecord(*transition_map_ref, - number)); + dependencies()->FieldTypeDependencyOffTheRecord( + transition_map, number, + MakeRef<Object>(broker(), descriptors_field_type))); // Remember the field map, and try to infer a useful type. - Handle<Map> map = broker()->CanonicalPersistentHandle( - descriptors_field_type->AsClass()); - base::Optional<MapRef> map_ref = TryMakeRef(broker(), map); - if (!map_ref.has_value()) return Invalid(); - field_type = Type::For(*map_ref); - field_map = map; + base::Optional<MapRef> maybe_field_map = + TryMakeRef(broker(), descriptors_field_type->AsClass()); + if (!maybe_field_map.has_value()) return Invalid(); + field_type = Type::For(maybe_field_map.value()); + field_map = maybe_field_map; } } + unrecorded_dependencies.push_back( - dependencies()->TransitionDependencyOffTheRecord(*transition_map_ref)); - transition_map_ref->SerializeBackPointer(); // For BuildPropertyStore. + dependencies()->TransitionDependencyOffTheRecord(transition_map)); + if (!broker()->is_concurrent_inlining()) { + transition_map.SerializeBackPointer( + NotConcurrentInliningTag{broker()}); // For BuildPropertyStore. + } + // Transitioning stores *may* store to const fields. The resulting // DataConstant access infos can be distinguished from later, i.e. redundant, // stores to the same constant field by the presence of a transition map. - switch (dependencies()->DependOnFieldConstness(*transition_map_ref, number)) { + switch (dependencies()->DependOnFieldConstness(transition_map, number)) { case PropertyConstness::kMutable: return PropertyAccessInfo::DataField( zone(), map, std::move(unrecorded_dependencies), field_index, diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h index 93215ea0a0..72757da5b7 100644 --- a/deps/v8/src/compiler/access-info.h +++ b/deps/v8/src/compiler/access-info.h @@ -5,14 +5,8 @@ #ifndef V8_COMPILER_ACCESS_INFO_H_ #define V8_COMPILER_ACCESS_INFO_H_ -#include <iosfwd> - -#include "src/codegen/machine-type.h" +#include "src/compiler/heap-refs.h" #include "src/compiler/types.h" -#include "src/objects/feedback-vector.h" -#include "src/objects/field-index.h" -#include "src/objects/map.h" -#include "src/objects/objects.h" #include "src/zone/zone-containers.h" namespace v8 { @@ -37,26 +31,26 @@ std::ostream& operator<<(std::ostream&, AccessMode); // This class encapsulates all information required to access a certain element. class ElementAccessInfo final { public: - ElementAccessInfo(ZoneVector<Handle<Map>>&& lookup_start_object_maps, + ElementAccessInfo(ZoneVector<MapRef>&& lookup_start_object_maps, ElementsKind elements_kind, Zone* zone); ElementsKind elements_kind() const { return elements_kind_; } - ZoneVector<Handle<Map>> const& lookup_start_object_maps() const { + ZoneVector<MapRef> const& lookup_start_object_maps() const { return lookup_start_object_maps_; } - ZoneVector<Handle<Map>> const& transition_sources() const { + ZoneVector<MapRef> const& transition_sources() const { return transition_sources_; } - void AddTransitionSource(Handle<Map> map) { + void AddTransitionSource(MapRef map) { CHECK_EQ(lookup_start_object_maps_.size(), 1); transition_sources_.push_back(map); } private: ElementsKind elements_kind_; - ZoneVector<Handle<Map>> lookup_start_object_maps_; - ZoneVector<Handle<Map>> transition_sources_; + ZoneVector<MapRef> lookup_start_object_maps_; + ZoneVector<MapRef> transition_sources_; }; // This class encapsulates all information required to access a certain @@ -75,37 +69,35 @@ class PropertyAccessInfo final { kStringLength }; - static PropertyAccessInfo NotFound(Zone* zone, Handle<Map> receiver_map, - MaybeHandle<JSObject> holder); + static PropertyAccessInfo NotFound(Zone* zone, MapRef receiver_map, + base::Optional<JSObjectRef> holder); static PropertyAccessInfo DataField( - Zone* zone, Handle<Map> receiver_map, + Zone* zone, MapRef receiver_map, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, Handle<Map> field_owner_map, - MaybeHandle<Map> field_map = MaybeHandle<Map>(), - MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(), - MaybeHandle<Map> transition_map = MaybeHandle<Map>()); + Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map, + base::Optional<JSObjectRef> holder, + base::Optional<MapRef> transition_map); static PropertyAccessInfo FastDataConstant( - Zone* zone, Handle<Map> receiver_map, + Zone* zone, MapRef receiver_map, ZoneVector<CompilationDependency const*>&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, - Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map, - MaybeHandle<JSObject> holder, - MaybeHandle<Map> transition_map = MaybeHandle<Map>()); - static PropertyAccessInfo FastAccessorConstant(Zone* zone, - Handle<Map> receiver_map, - Handle<Object> constant, - MaybeHandle<JSObject> holder); - static PropertyAccessInfo ModuleExport(Zone* zone, Handle<Map> receiver_map, - Handle<Cell> cell); - static PropertyAccessInfo StringLength(Zone* zone, Handle<Map> receiver_map); + Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map, + base::Optional<JSObjectRef> holder, + base::Optional<MapRef> transition_map); + static PropertyAccessInfo FastAccessorConstant( + Zone* zone, MapRef receiver_map, base::Optional<ObjectRef> constant, + base::Optional<JSObjectRef> holder); + static PropertyAccessInfo ModuleExport(Zone* zone, MapRef receiver_map, + CellRef cell); + static PropertyAccessInfo StringLength(Zone* zone, MapRef receiver_map); static PropertyAccessInfo Invalid(Zone* zone); static PropertyAccessInfo DictionaryProtoDataConstant( - Zone* zone, Handle<Map> receiver_map, Handle<JSObject> holder, - InternalIndex dict_index, Handle<Name> name); + Zone* zone, MapRef receiver_map, JSObjectRef holder, + InternalIndex dict_index, NameRef name); static PropertyAccessInfo DictionaryProtoAccessorConstant( - Zone* zone, Handle<Map> receiver_map, MaybeHandle<JSObject> holder, - Handle<Object> constant, Handle<Name> name); + Zone* zone, MapRef receiver_map, base::Optional<JSObjectRef> holder, + ObjectRef constant, NameRef name); bool Merge(PropertyAccessInfo const* that, AccessMode access_mode, Zone* zone) V8_WARN_UNUSED_RESULT; @@ -128,7 +120,7 @@ class PropertyAccessInfo final { return kind() == kDictionaryProtoAccessorConstant; } - bool HasTransitionMap() const { return !transition_map().is_null(); } + bool HasTransitionMap() const { return transition_map().has_value(); } bool HasDictionaryHolder() const { return kind_ == kDictionaryProtoDataConstant || kind_ == kDictionaryProtoAccessorConstant; @@ -136,17 +128,22 @@ class PropertyAccessInfo final { ConstFieldInfo GetConstFieldInfo() const; Kind kind() const { return kind_; } - MaybeHandle<JSObject> holder() const { + base::Optional<JSObjectRef> holder() const { // TODO(neis): There was a CHECK here that tries to protect against // using the access info without recording its dependencies first. // Find a more suitable place for it. return holder_; } - MaybeHandle<Map> transition_map() const { + base::Optional<MapRef> transition_map() const { DCHECK(!HasDictionaryHolder()); return transition_map_; } - Handle<Object> constant() const { return constant_; } + base::Optional<ObjectRef> constant() const { + DCHECK_IMPLIES(constant_.has_value(), + IsModuleExport() || IsFastAccessorConstant() || + IsDictionaryProtoAccessorConstant()); + return constant_; + } FieldIndex field_index() const { DCHECK(!HasDictionaryHolder()); return field_index_; @@ -160,11 +157,11 @@ class PropertyAccessInfo final { DCHECK(!HasDictionaryHolder()); return field_representation_; } - MaybeHandle<Map> field_map() const { + base::Optional<MapRef> field_map() const { DCHECK(!HasDictionaryHolder()); return field_map_; } - ZoneVector<Handle<Map>> const& lookup_start_object_maps() const { + ZoneVector<MapRef> const& lookup_start_object_maps() const { return lookup_start_object_maps_; } @@ -173,46 +170,48 @@ class PropertyAccessInfo final { return dictionary_index_; } - Handle<Name> name() const { + NameRef name() const { DCHECK(HasDictionaryHolder()); - return name_.ToHandleChecked(); + return name_.value(); } private: explicit PropertyAccessInfo(Zone* zone); - PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - ZoneVector<Handle<Map>>&& lookup_start_object_maps); - PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - Handle<Object> constant, MaybeHandle<Name> name, - ZoneVector<Handle<Map>>&& lookup_start_object_maps); - PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder, - MaybeHandle<Map> transition_map, FieldIndex field_index, + PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + ZoneVector<MapRef>&& lookup_start_object_maps); + PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + base::Optional<ObjectRef> constant, + base::Optional<NameRef> name, + ZoneVector<MapRef>&& lookup_start_object_maps); + PropertyAccessInfo(Kind kind, base::Optional<JSObjectRef> holder, + base::Optional<MapRef> transition_map, + FieldIndex field_index, Representation field_representation, Type field_type, - Handle<Map> field_owner_map, MaybeHandle<Map> field_map, - ZoneVector<Handle<Map>>&& lookup_start_object_maps, + MapRef field_owner_map, base::Optional<MapRef> field_map, + ZoneVector<MapRef>&& lookup_start_object_maps, ZoneVector<CompilationDependency const*>&& dependencies); - PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder, - ZoneVector<Handle<Map>>&& lookup_start_object_maps, - InternalIndex dictionary_index, Handle<Name> name); + PropertyAccessInfo(Zone* zone, Kind kind, base::Optional<JSObjectRef> holder, + ZoneVector<MapRef>&& lookup_start_object_maps, + InternalIndex dictionary_index, NameRef name); // Members used for fast and dictionary mode holders: Kind kind_; - ZoneVector<Handle<Map>> lookup_start_object_maps_; - Handle<Object> constant_; - MaybeHandle<JSObject> holder_; + ZoneVector<MapRef> lookup_start_object_maps_; + base::Optional<ObjectRef> constant_; + base::Optional<JSObjectRef> holder_; // Members only used for fast mode holders: ZoneVector<CompilationDependency const*> unrecorded_dependencies_; - MaybeHandle<Map> transition_map_; + base::Optional<MapRef> transition_map_; FieldIndex field_index_; Representation field_representation_; Type field_type_; - MaybeHandle<Map> field_owner_map_; - MaybeHandle<Map> field_map_; + base::Optional<MapRef> field_owner_map_; + base::Optional<MapRef> field_map_; // Members only used for dictionary mode holders: InternalIndex dictionary_index_; - MaybeHandle<Name> name_; + base::Optional<NameRef> name_; }; // This class encapsulates information required to generate load properties @@ -252,28 +251,22 @@ class AccessInfoFactory final { Zone* zone); base::Optional<ElementAccessInfo> ComputeElementAccessInfo( - Handle<Map> map, AccessMode access_mode) const; + MapRef map, AccessMode access_mode) const; bool ComputeElementAccessInfos( ElementAccessFeedback const& feedback, ZoneVector<ElementAccessInfo>* access_infos) const; - PropertyAccessInfo ComputePropertyAccessInfo(Handle<Map> map, - Handle<Name> name, + PropertyAccessInfo ComputePropertyAccessInfo(MapRef map, NameRef name, AccessMode access_mode) const; PropertyAccessInfo ComputeDictionaryProtoAccessInfo( - Handle<Map> receiver_map, Handle<Name> name, Handle<JSObject> holder, + MapRef receiver_map, NameRef name, JSObjectRef holder, InternalIndex dict_index, AccessMode access_mode, PropertyDetails details) const; MinimorphicLoadPropertyAccessInfo ComputePropertyAccessInfo( MinimorphicLoadPropertyAccessFeedback const& feedback) const; - // Convenience wrapper around {ComputePropertyAccessInfo} for multiple maps. - void ComputePropertyAccessInfos( - MapHandles const& maps, Handle<Name> name, AccessMode access_mode, - ZoneVector<PropertyAccessInfo>* access_infos) const; - // Merge as many of the given {infos} as possible and record any dependencies. // Return false iff any of them was invalid, in which case no dependencies are // recorded. @@ -291,18 +284,15 @@ class AccessInfoFactory final { private: base::Optional<ElementAccessInfo> ConsolidateElementLoad( ElementAccessFeedback const& feedback) const; - PropertyAccessInfo LookupSpecialFieldAccessor(Handle<Map> map, - Handle<Name> name) const; - PropertyAccessInfo LookupTransition(Handle<Map> map, Handle<Name> name, - MaybeHandle<JSObject> holder) const; - PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map, - Handle<Map> map, - MaybeHandle<JSObject> holder, - InternalIndex descriptor, - AccessMode access_mode) const; + PropertyAccessInfo LookupSpecialFieldAccessor(MapRef map, NameRef name) const; + PropertyAccessInfo LookupTransition(MapRef map, NameRef name, + base::Optional<JSObjectRef> holder) const; + PropertyAccessInfo ComputeDataFieldAccessInfo( + MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder, + InternalIndex descriptor, AccessMode access_mode) const; PropertyAccessInfo ComputeAccessorDescriptorAccessInfo( - Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map, - MaybeHandle<JSObject> holder, InternalIndex descriptor, + MapRef receiver_map, NameRef name, MapRef map, + base::Optional<JSObjectRef> holder, InternalIndex descriptor, AccessMode access_mode) const; PropertyAccessInfo Invalid() const { @@ -313,8 +303,9 @@ class AccessInfoFactory final { AccessMode access_mode, ZoneVector<PropertyAccessInfo>* result) const; - bool TryLoadPropertyDetails(Handle<Map> map, MaybeHandle<JSObject> holder, - Handle<Name> name, InternalIndex* index_out, + bool TryLoadPropertyDetails(MapRef map, + base::Optional<JSObjectRef> maybe_holder, + NameRef name, InternalIndex* index_out, PropertyDetails* details_out) const; CompilationDependencies* dependencies() const { return dependencies_; } @@ -327,7 +318,6 @@ class AccessInfoFactory final { TypeCache const* const type_cache_; Zone* const zone_; - // TODO(nicohartmann@): Move to public AccessInfoFactory(const AccessInfoFactory&) = delete; AccessInfoFactory& operator=(const AccessInfoFactory&) = delete; }; diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h index f39c9bb0d0..18651e26e1 100644 --- a/deps/v8/src/compiler/allocation-builder-inl.h +++ b/deps/v8/src/compiler/allocation-builder-inl.h @@ -17,6 +17,7 @@ namespace compiler { void AllocationBuilder::Allocate(int size, AllocationType allocation, Type type) { + CHECK_GT(size, 0); DCHECK_LE(size, isolate()->heap()->MaxRegularHeapObjectSize(allocation)); effect_ = graph()->NewNode( common()->BeginRegion(RegionObservability::kNotObservable), effect_); diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index c1f0c62e25..29c7897ec9 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -3814,7 +3814,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { DCHECK(frame_access_state()->has_frame()); #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -3824,7 +3824,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { UseScratchRegisterScope temps(tasm()); Register scratch = temps.Acquire(); __ ldr(scratch, FieldMemOperand( @@ -3837,12 +3837,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } + if (FLAG_debug_code) __ stop(); __ bind(&done); } @@ -3950,15 +3949,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - __ add(argc_reg, argc_reg, Operand(1)); // Also pop the receiver. + // We must pop all arguments from the stack (including the receiver). + // The number of arguments without the receiver is + // max(argc_reg, parameter_slots-1), and the receiver is added in + // DropArguments(). DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit()); if (parameter_slots > 1) { - __ cmp(argc_reg, Operand(parameter_slots)); - __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt); + const int parameter_slots_without_receiver = parameter_slots - 1; + __ cmp(argc_reg, Operand(parameter_slots_without_receiver)); + __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt); } - __ Drop(argc_reg); + __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } else if (additional_pop_count->IsImmediate()) { DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); int additional_count = g.ToConstant(additional_pop_count).ToInt32(); diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index c907e83c3f..c121383426 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -3147,7 +3147,7 @@ void CodeGenerator::AssembleConstructFrame() { } #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -3156,7 +3156,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { UseScratchRegisterScope scope(tasm()); Register scratch = scope.AcquireX(); __ Ldr(scratch, FieldMemOperand( @@ -3178,12 +3178,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ Brk(0); - } + if (FLAG_debug_code) __ Brk(0); __ Bind(&done); } #endif // V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h index bc5aa579d6..eaa39ccb82 100644 --- a/deps/v8/src/compiler/backend/code-generator-impl.h +++ b/deps/v8/src/compiler/backend/code-generator-impl.h @@ -189,7 +189,8 @@ class DeoptimizationExit : public ZoneObject { public: explicit DeoptimizationExit(SourcePosition pos, BytecodeOffset bailout_id, int translation_id, int pc_offset, - DeoptimizeKind kind, DeoptimizeReason reason) + DeoptimizeKind kind, DeoptimizeReason reason, + NodeId node_id) : deoptimization_id_(kNoDeoptIndex), pos_(pos), bailout_id_(bailout_id), @@ -197,6 +198,7 @@ class DeoptimizationExit : public ZoneObject { pc_offset_(pc_offset), kind_(kind), reason_(reason), + node_id_(node_id), immediate_args_(nullptr), emitted_(false) {} @@ -220,6 +222,7 @@ class DeoptimizationExit : public ZoneObject { int pc_offset() const { return pc_offset_; } DeoptimizeKind kind() const { return kind_; } DeoptimizeReason reason() const { return reason_; } + NodeId node_id() const { return node_id_; } const ZoneVector<ImmediateOperand*>* immediate_args() const { return immediate_args_; } @@ -243,6 +246,7 @@ class DeoptimizationExit : public ZoneObject { const int pc_offset_; const DeoptimizeKind kind_; const DeoptimizeReason reason_; + const NodeId node_id_; ZoneVector<ImmediateOperand*>* immediate_args_; bool emitted_; }; diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 3b2285a4c5..9e378b8458 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -171,7 +171,7 @@ void CodeGenerator::AssembleDeoptImmediateArgs( switch (constant.type()) { case Constant::kInt32: - tasm()->dp(constant.ToInt32()); + tasm()->dp(constant.ToInt32(), RelocInfo::LITERAL_CONSTANT); break; #ifdef V8_TARGET_ARCH_64_BIT case Constant::kInt64: @@ -181,7 +181,7 @@ void CodeGenerator::AssembleDeoptImmediateArgs( case Constant::kFloat64: { int smi; CHECK(DoubleToSmiInteger(constant.ToFloat64().value(), &smi)); - tasm()->dp(Smi::FromInt(smi).ptr()); + tasm()->dp(Smi::FromInt(smi).ptr(), RelocInfo::LITERAL_CONSTANT); break; } case Constant::kCompressedHeapObject: @@ -221,8 +221,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)]; } if (info()->source_positions()) { - tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(), - deoptimization_id); + tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), + exit->pos(), deoptimization_id); } if (deopt_kind == DeoptimizeKind::kLazy) { @@ -320,8 +320,12 @@ void CodeGenerator::AssembleCode() { offsets_info_.blocks_start = tasm()->pc_offset(); for (const InstructionBlock* block : instructions()->ao_blocks()) { // Align loop headers on vendor recommended boundaries. - if (block->ShouldAlign() && !tasm()->jump_optimization_info()) { - tasm()->CodeTargetAlign(); + if (!tasm()->jump_optimization_info()) { + if (block->ShouldAlignLoopHeader()) { + tasm()->LoopHeaderAlign(); + } else if (block->ShouldAlignCodeTarget()) { + tasm()->CodeTargetAlign(); + } } if (info->trace_turbo_json()) { block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset(); @@ -597,9 +601,9 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() { isolate()->counters()->total_compiled_code_size()->Increment( code->raw_body_size()); - LOG_CODE_EVENT(isolate(), - CodeLinePosInfoRecordEvent(code->raw_instruction_start(), - *source_positions)); + LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent( + code->raw_instruction_start(), + *source_positions, JitCodeEvent::JIT_CODE)); return code; } @@ -1055,6 +1059,9 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() { data->SetTranslationIndex( i, Smi::FromInt(deoptimization_exit->translation_id())); data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset())); +#ifdef DEBUG + data->SetNodeId(i, Smi::FromInt(deoptimization_exit->node_id())); +#endif // DEBUG } return data; @@ -1242,8 +1249,12 @@ DeoptimizationExit* CodeGenerator::BuildTranslation( DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>( current_source_position_, descriptor->bailout_id(), translation_index, - pc_offset, entry.kind(), entry.reason()); - + pc_offset, entry.kind(), entry.reason(), +#ifdef DEBUG + entry.node_id()); +#else // DEBUG + 0); +#endif // DEBUG if (!Deoptimizer::kSupportsFixedDeoptExitSizes) { exit->set_deoptimization_id(next_deoptimization_id_++); } diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 5541f64897..5db3f20fa4 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -2083,22 +2083,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32I64x2ShrS: { XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); - XMMRegister tmp = i.TempSimd128Register(0); - XMMRegister tmp2 = i.TempSimd128Register(1); - Operand shift = i.InputOperand(1); - - // Take shift value modulo 64. - __ and_(shift, Immediate(63)); - __ Movd(tmp, shift); - - // Set up a mask [0x80000000,0,0x80000000,0]. - __ Pcmpeqb(tmp2, tmp2); - __ Psllq(tmp2, tmp2, byte{63}); - - __ Psrlq(tmp2, tmp2, tmp); - __ Psrlq(dst, src, tmp); - __ Pxor(dst, tmp2); - __ Psubq(dst, tmp2); + if (HasImmediateInput(instr, 1)) { + __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg); + } else { + __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg, + i.TempSimd128Register(0), i.TempRegister(1)); + } break; } case kIA32I64x2Add: { @@ -4537,7 +4527,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { DCHECK(frame_access_state()->has_frame()); #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4547,7 +4537,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { Register scratch = esi; __ push(scratch); __ mov(scratch, @@ -4562,6 +4552,8 @@ void CodeGenerator::AssembleConstructFrame() { __ wasm_call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); @@ -4652,11 +4644,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - int parameter_slots_without_receiver = - parameter_slots - 1; // Exclude the receiver to simplify the - // computation. We'll account for it at the end. + // We must pop all arguments from the stack (including the receiver). + // The number of arguments without the receiver is + // max(argc_reg, parameter_slots-1), and the receiver is added in + // DropArguments(). + int parameter_slots_without_receiver = parameter_slots - 1; Label mismatch_return; Register scratch_reg = edx; DCHECK_NE(argc_reg, scratch_reg); @@ -4666,11 +4658,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ PopReturnAddressTo(scratch_reg); - __ lea(esp, Operand(esp, argc_reg, times_system_pointer_size, - kSystemPointerSize)); // Also pop the receiver. + __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); // We use a return instead of a jump for better return address prediction. - __ PushReturnAddressFrom(scratch_reg); __ Ret(); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index 5d7c8fbec2..f36fdb2935 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -2417,16 +2417,16 @@ void InstructionSelector::VisitI64x2Neg(Node* node) { void InstructionSelector::VisitI64x2ShrS(Node* node) { IA32OperandGenerator g(this); - InstructionOperand temps[] = {g.TempSimd128Register(), - g.TempSimd128Register()}; - if (IsSupported(AVX)) { - Emit(kIA32I64x2ShrS, g.DefineAsRegister(node), - g.UseUniqueRegister(node->InputAt(0)), g.Use(node->InputAt(1)), - arraysize(temps), temps); + InstructionOperand dst = + IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node); + + if (g.CanBeImmediate(node->InputAt(1))) { + Emit(kIA32I64x2ShrS, dst, g.UseRegister(node->InputAt(0)), + g.UseImmediate(node->InputAt(1))); } else { - Emit(kIA32I64x2ShrS, g.DefineSameAsFirst(node), - g.UseUniqueRegister(node->InputAt(0)), g.Use(node->InputAt(1)), - arraysize(temps), temps); + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()}; + Emit(kIA32I64x2ShrS, dst, g.UseUniqueRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), arraysize(temps), temps); } } @@ -2989,9 +2989,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) { } Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); } -#else -void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); } -#endif // V8_ENABLE_WEBASSEMBLY void InstructionSelector::VisitI8x16Swizzle(Node* node) { InstructionCode op = kIA32I8x16Swizzle; @@ -3012,6 +3009,10 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) { g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), arraysize(temps), temps); } +#else +void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); } +void InstructionSelector::VisitI8x16Swizzle(Node* node) { UNREACHABLE(); } +#endif // V8_ENABLE_WEBASSEMBLY namespace { void VisitPminOrPmax(InstructionSelector* selector, Node* node, diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 923562dbd9..f279ea1590 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -873,7 +873,7 @@ Instruction* InstructionSelector::EmitWithContinuation( opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) | DeoptFrameStateOffsetField::encode(static_cast<int>(input_count)); AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(), - cont->reason(), cont->feedback(), + cont->reason(), cont->node_id(), cont->feedback(), FrameState{cont->frame_state()}); } else if (cont->IsSet()) { continuation_outputs_.push_back(g.DefineAsRegister(cont->result())); @@ -906,13 +906,13 @@ Instruction* InstructionSelector::EmitWithContinuation( void InstructionSelector::AppendDeoptimizeArguments( InstructionOperandVector* args, DeoptimizeKind kind, - DeoptimizeReason reason, FeedbackSource const& feedback, + DeoptimizeReason reason, NodeId node_id, FeedbackSource const& feedback, FrameState frame_state) { OperandGenerator g(this); FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state); DCHECK_NE(DeoptimizeKind::kLazy, kind); - int const state_id = - sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback); + int const state_id = sequence()->AddDeoptimizationEntry( + descriptor, kind, reason, node_id, feedback); args->push_back(g.TempImmediate(state_id)); StateObjectDeduplicator deduplicator(instruction_zone()); AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator, @@ -1112,7 +1112,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, int const state_id = sequence()->AddDeoptimizationEntry( buffer->frame_state_descriptor, DeoptimizeKind::kLazy, - DeoptimizeReason::kUnknown, FeedbackSource()); + DeoptimizeReason::kUnknown, call->id(), FeedbackSource()); buffer->instruction_args.push_back(g.TempImmediate(state_id)); StateObjectDeduplicator deduplicator(instruction_zone()); @@ -1362,7 +1362,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) { case BasicBlock::kDeoptimize: { DeoptimizeParameters p = DeoptimizeParametersOf(input->op()); FrameState value{input->InputAt(0)}; - VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value); + VisitDeoptimize(p.kind(), p.reason(), input->id(), p.feedback(), value); break; } case BasicBlock::kThrow: @@ -3119,11 +3119,13 @@ void InstructionSelector::VisitDeoptimizeIf(Node* node) { DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); if (NeedsPoisoning(p.is_safety_check())) { FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1)); + kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(), + node->InputAt(1)); VisitWordCompareZero(node, node->InputAt(0), &cont); } else { FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1)); + kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(), + node->InputAt(1)); VisitWordCompareZero(node, node->InputAt(0), &cont); } } @@ -3132,11 +3134,13 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) { DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); if (NeedsPoisoning(p.is_safety_check())) { FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1)); + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), + node->InputAt(1)); VisitWordCompareZero(node, node->InputAt(0), &cont); } else { FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1)); + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), + node->InputAt(1)); VisitWordCompareZero(node, node->InputAt(0), &cont); } } @@ -3184,12 +3188,12 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) { if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) { FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(), + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(), dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size())); VisitWordCompareZero(node, n.condition(), &cont); } else { FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kEqual, p.kind(), p.reason(), p.feedback(), n.frame_state(), + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(), dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size())); VisitWordCompareZero(node, n.condition(), &cont); } @@ -3214,10 +3218,12 @@ void InstructionSelector::EmitIdentity(Node* node) { void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason, + NodeId node_id, FeedbackSource const& feedback, FrameState frame_state) { InstructionOperandVector args(instruction_zone()); - AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state); + AppendDeoptimizeArguments(&args, kind, reason, node_id, feedback, + frame_state); Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr); } diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 837a22412c..11a329d1d6 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -64,20 +64,20 @@ class FlagsContinuation final { // Creates a new flags continuation for an eager deoptimization exit. static FlagsContinuation ForDeoptimize( FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, Node* frame_state, + NodeId node_id, FeedbackSource const& feedback, Node* frame_state, InstructionOperand* extra_args = nullptr, int extra_args_count = 0) { return FlagsContinuation(kFlags_deoptimize, condition, kind, reason, - feedback, frame_state, extra_args, + node_id, feedback, frame_state, extra_args, extra_args_count); } // Creates a new flags continuation for an eager deoptimization exit. static FlagsContinuation ForDeoptimizeAndPoison( FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, Node* frame_state, + NodeId node_id, FeedbackSource const& feedback, Node* frame_state, InstructionOperand* extra_args = nullptr, int extra_args_count = 0) { return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind, - reason, feedback, frame_state, extra_args, + reason, node_id, feedback, frame_state, extra_args, extra_args_count); } @@ -123,6 +123,10 @@ class FlagsContinuation final { DCHECK(IsDeoptimize()); return reason_; } + NodeId node_id() const { + DCHECK(IsDeoptimize()); + return node_id_; + } FeedbackSource const& feedback() const { DCHECK(IsDeoptimize()); return feedback_; @@ -229,12 +233,14 @@ class FlagsContinuation final { FlagsContinuation(FlagsMode mode, FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, Node* frame_state, - InstructionOperand* extra_args, int extra_args_count) + NodeId node_id, FeedbackSource const& feedback, + Node* frame_state, InstructionOperand* extra_args, + int extra_args_count) : mode_(mode), condition_(condition), kind_(kind), reason_(reason), + node_id_(node_id), feedback_(feedback), frame_state_or_result_(frame_state), extra_args_(extra_args), @@ -274,6 +280,7 @@ class FlagsContinuation final { FlagsCondition condition_; DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize* DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize* + NodeId node_id_; // Only valid if mode_ == kFlags_deoptimize* FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize* Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize* // or mode_ == kFlags_set. @@ -524,7 +531,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void AppendDeoptimizeArguments(InstructionOperandVector* args, DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, + NodeId node_id, FeedbackSource const& feedback, FrameState frame_state); void EmitTableSwitch(const SwitchInfo& sw, @@ -660,7 +667,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); void VisitSwitch(Node* node, const SwitchInfo& sw); void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, FrameState frame_state); + NodeId node_id, FeedbackSource const& feedback, + FrameState frame_state); void VisitSelect(Node* node); void VisitReturn(Node* ret); void VisitThrow(Node* node); diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index 24c8722b62..63ca78e060 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -607,7 +607,8 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number, deferred_(deferred), handler_(handler), switch_target_(false), - alignment_(false), + code_target_alignment_(false), + loop_header_alignment_(false), needs_frame_(false), must_construct_frame_(false), must_deconstruct_frame_(false) {} @@ -802,14 +803,14 @@ void InstructionSequence::ComputeAssemblyOrder() { ao_blocks_->push_back(loop_end); // This block will be the new machine-level loop header, so align // this block instead of the loop header block. - loop_end->set_alignment(true); + loop_end->set_loop_header_alignment(true); header_align = false; } } - block->set_alignment(header_align); + block->set_loop_header_alignment(header_align); } if (block->loop_header().IsValid() && block->IsSwitchTarget()) { - block->set_alignment(true); + block->set_code_target_alignment(true); } block->set_ao_number(RpoNumber::FromInt(ao++)); ao_blocks_->push_back(block); @@ -952,10 +953,10 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep, int InstructionSequence::AddDeoptimizationEntry( FrameStateDescriptor* descriptor, DeoptimizeKind kind, - DeoptimizeReason reason, FeedbackSource const& feedback) { + DeoptimizeReason reason, NodeId node_id, FeedbackSource const& feedback) { int deoptimization_id = static_cast<int>(deoptimization_entries_.size()); deoptimization_entries_.push_back( - DeoptimizationEntry(descriptor, kind, reason, feedback)); + DeoptimizationEntry(descriptor, kind, reason, node_id, feedback)); return deoptimization_id; } diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index f20955727a..204683c973 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -1449,24 +1449,35 @@ class JSToWasmFrameStateDescriptor : public FrameStateDescriptor { // frame state descriptor that we have to go back to. class DeoptimizationEntry final { public: - DeoptimizationEntry() = default; DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind, - DeoptimizeReason reason, FeedbackSource const& feedback) + DeoptimizeReason reason, NodeId node_id, + FeedbackSource const& feedback) : descriptor_(descriptor), kind_(kind), reason_(reason), - feedback_(feedback) {} +#ifdef DEBUG + node_id_(node_id), +#endif // DEBUG + feedback_(feedback) { + USE(node_id); + } FrameStateDescriptor* descriptor() const { return descriptor_; } DeoptimizeKind kind() const { return kind_; } DeoptimizeReason reason() const { return reason_; } +#ifdef DEBUG + NodeId node_id() const { return node_id_; } +#endif // DEBUG FeedbackSource const& feedback() const { return feedback_; } private: - FrameStateDescriptor* descriptor_ = nullptr; - DeoptimizeKind kind_ = DeoptimizeKind::kEager; - DeoptimizeReason reason_ = DeoptimizeReason::kUnknown; - FeedbackSource feedback_ = FeedbackSource(); + FrameStateDescriptor* const descriptor_; + const DeoptimizeKind kind_; + const DeoptimizeReason reason_; +#ifdef DEBUG + const NodeId node_id_; +#endif // DEBUG + const FeedbackSource feedback_; }; using DeoptimizationVector = ZoneVector<DeoptimizationEntry>; @@ -1537,7 +1548,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final } inline bool IsLoopHeader() const { return loop_end_.IsValid(); } inline bool IsSwitchTarget() const { return switch_target_; } - inline bool ShouldAlign() const { return alignment_; } + inline bool ShouldAlignCodeTarget() const { return code_target_alignment_; } + inline bool ShouldAlignLoopHeader() const { return loop_header_alignment_; } using Predecessors = ZoneVector<RpoNumber>; Predecessors& predecessors() { return predecessors_; } @@ -1560,7 +1572,8 @@ class V8_EXPORT_PRIVATE InstructionBlock final void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; } - void set_alignment(bool val) { alignment_ = val; } + void set_code_target_alignment(bool val) { code_target_alignment_ = val; } + void set_loop_header_alignment(bool val) { loop_header_alignment_ = val; } void set_switch_target(bool val) { switch_target_ = val; } @@ -1588,7 +1601,10 @@ class V8_EXPORT_PRIVATE InstructionBlock final const bool deferred_ : 1; // Block contains deferred code. bool handler_ : 1; // Block is a handler entry point. bool switch_target_ : 1; - bool alignment_ : 1; // insert alignment before this block + bool code_target_alignment_ : 1; // insert code target alignment before this + // block + bool loop_header_alignment_ : 1; // insert loop header alignment before this + // block bool needs_frame_ : 1; bool must_construct_frame_ : 1; bool must_deconstruct_frame_ : 1; @@ -1770,7 +1786,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final int AddDeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback); + NodeId node_id, FeedbackSource const& feedback); DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id); int GetDeoptimizationEntryCount() const { return static_cast<int>(deoptimization_entries_.size()); diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc index 96a3b144a0..e91b7e17d2 100644 --- a/deps/v8/src/compiler/backend/jump-threading.cc +++ b/deps/v8/src/compiler/backend/jump-threading.cc @@ -206,7 +206,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone, // Skip empty blocks when the previous block doesn't fall through. bool prev_fallthru = true; - for (auto const block : code->instruction_blocks()) { + for (auto const block : code->ao_blocks()) { RpoNumber block_rpo = block->rpo_number(); int block_num = block_rpo.ToInt(); RpoNumber result_rpo = result[block_num]; diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index f921813766..2b8197e7e6 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -4139,7 +4139,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { DCHECK(frame_access_state()->has_frame()); #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4149,7 +4149,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { __ Lw( kScratchReg, FieldMemOperand(kWasmInstanceRegister, @@ -4161,12 +4161,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } + if (FLAG_debug_code) __ stop(); __ bind(&done); } diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index d05df5ceec..6fce103d24 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -4349,7 +4349,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { DCHECK(frame_access_state()->has_frame()); #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4359,7 +4359,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { __ Ld( kScratchReg, FieldMemOperand(kWasmInstanceRegister, @@ -4371,12 +4371,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } + if (FLAG_debug_code) __ stop(); __ bind(&done); } diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 24232aa7fb..cf324353f2 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -347,7 +347,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, i.InputRegister(1), i.OutputRCBit()); \ } else { \ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \ - i.InputInt32(1), i.OutputRCBit()); \ + i.InputImmediate(1), i.OutputRCBit()); \ } \ } while (0) @@ -1227,29 +1227,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kPPC_ShiftLeft32: - ASSEMBLE_BINOP_RC(slw, slwi); + ASSEMBLE_BINOP_RC(ShiftLeftU32, ShiftLeftU32); break; -#if V8_TARGET_ARCH_PPC64 case kPPC_ShiftLeft64: - ASSEMBLE_BINOP_RC(sld, sldi); + ASSEMBLE_BINOP_RC(ShiftLeftU64, ShiftLeftU64); break; -#endif case kPPC_ShiftRight32: - ASSEMBLE_BINOP_RC(srw, srwi); + ASSEMBLE_BINOP_RC(ShiftRightU32, ShiftRightU32); break; -#if V8_TARGET_ARCH_PPC64 case kPPC_ShiftRight64: - ASSEMBLE_BINOP_RC(srd, srdi); + ASSEMBLE_BINOP_RC(ShiftRightU64, ShiftRightU64); break; -#endif case kPPC_ShiftRightAlg32: - ASSEMBLE_BINOP_INT_RC(sraw, srawi); + ASSEMBLE_BINOP_INT_RC(ShiftRightS32, ShiftRightS32); break; -#if V8_TARGET_ARCH_PPC64 case kPPC_ShiftRightAlg64: - ASSEMBLE_BINOP_INT_RC(srad, sradi); + ASSEMBLE_BINOP_INT_RC(ShiftRightS64, ShiftRightS64); break; -#endif #if !V8_TARGET_ARCH_PPC64 case kPPC_AddPair: // i.InputRegister(0) ... left low word. @@ -1493,7 +1487,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode())); break; case kPPC_Mod32: - if (CpuFeatures::IsSupported(MODULO)) { + if (CpuFeatures::IsSupported(PPC_9_PLUS)) { __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); } else { ASSEMBLE_MODULO(divw, mullw); @@ -1501,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; #if V8_TARGET_ARCH_PPC64 case kPPC_Mod64: - if (CpuFeatures::IsSupported(MODULO)) { + if (CpuFeatures::IsSupported(PPC_9_PLUS)) { __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); } else { ASSEMBLE_MODULO(divd, mulld); @@ -1509,7 +1503,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; #endif case kPPC_ModU32: - if (CpuFeatures::IsSupported(MODULO)) { + if (CpuFeatures::IsSupported(PPC_9_PLUS)) { __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); } else { ASSEMBLE_MODULO(divwu, mullw); @@ -1517,7 +1511,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; #if V8_TARGET_ARCH_PPC64 case kPPC_ModU64: - if (CpuFeatures::IsSupported(MODULO)) { + if (CpuFeatures::IsSupported(PPC_9_PLUS)) { __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); } else { ASSEMBLE_MODULO(divdu, mulld); @@ -1830,7 +1824,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( cr, static_cast<CRBit>(VXCVI % CRWIDTH)); __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7 __ li(kScratchReg, Operand(1)); - __ sldi(kScratchReg, kScratchReg, Operand(31)); // generate INT32_MIN. + __ ShiftLeftU64(kScratchReg, kScratchReg, + Operand(31)); // generate INT32_MIN. __ isel(i.OutputRegister(0), kScratchReg, i.OutputRegister(0), crbit); } break; @@ -1873,7 +1868,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( cr, static_cast<CRBit>(VXCVI % CRWIDTH)); __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7 // Handle conversion failures (such as overflow). - if (CpuFeatures::IsSupported(ISELECT)) { + if (CpuFeatures::IsSupported(PPC_7_PLUS)) { if (check_conversion) { __ li(i.OutputRegister(1), Operand(1)); __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit); @@ -1910,7 +1905,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int crbit = v8::internal::Assembler::encode_crbit( cr, static_cast<CRBit>(VXCVI % CRWIDTH)); __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7 - if (CpuFeatures::IsSupported(ISELECT)) { + if (CpuFeatures::IsSupported(PPC_7_PLUS)) { __ li(i.OutputRegister(1), Operand(1)); __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit); } else { @@ -2284,9 +2279,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( constexpr int lane_width_in_bytes = 8; Simd128Register dst = i.OutputSimd128Register(); __ MovDoubleToInt64(r0, i.InputDoubleRegister(2)); - __ mtvsrd(kScratchSimd128Reg, r0); - __ vinsertd(dst, kScratchSimd128Reg, - Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vinsd(dst, r0, Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + } else { + __ mtvsrd(kScratchSimd128Reg, r0); + __ vinsertd(dst, kScratchSimd128Reg, + Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + } break; } case kPPC_F32x4ReplaceLane: { @@ -2294,27 +2293,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( constexpr int lane_width_in_bytes = 4; Simd128Register dst = i.OutputSimd128Register(); __ MovFloatToInt(r0, i.InputDoubleRegister(2)); - __ mtvsrd(kScratchSimd128Reg, r0); - __ vinsertw(dst, kScratchSimd128Reg, - Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + } else { + __ mtvsrd(kScratchSimd128Reg, r0); + __ vinsertw(dst, kScratchSimd128Reg, + Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + } break; } case kPPC_I64x2ReplaceLane: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); constexpr int lane_width_in_bytes = 8; Simd128Register dst = i.OutputSimd128Register(); - __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2)); - __ vinsertd(dst, kScratchSimd128Reg, - Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vinsd(dst, i.InputRegister(2), + Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + } else { + __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2)); + __ vinsertd(dst, kScratchSimd128Reg, + Operand((1 - i.InputInt8(1)) * lane_width_in_bytes)); + } break; } case kPPC_I32x4ReplaceLane: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); constexpr int lane_width_in_bytes = 4; Simd128Register dst = i.OutputSimd128Register(); - __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2)); - __ vinsertw(dst, kScratchSimd128Reg, - Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vinsw(dst, i.InputRegister(2), + Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + } else { + __ mtvsrd(kScratchSimd128Reg, i.InputRegister(2)); + __ vinsertw(dst, kScratchSimd128Reg, + Operand((3 - i.InputInt8(1)) * lane_width_in_bytes)); + } break; } case kPPC_I16x8ReplaceLane: { @@ -2377,26 +2390,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( constexpr int lane_width_in_bytes = 8; Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); - Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)); + Simd128Register tempFPReg0 = i.ToSimd128Register(instr->TempAt(0)); + Register tempReg1 = i.ToRegister(instr->TempAt(2)); + Register scratch_0 = ip; + Register scratch_1 = r0; Simd128Register dst = i.OutputSimd128Register(); - for (int i = 0; i < 2; i++) { - if (i > 0) { - __ vextractd(kScratchSimd128Reg, src0, - Operand(1 * lane_width_in_bytes)); - __ vextractd(tempFPReg1, src1, Operand(1 * lane_width_in_bytes)); - src0 = kScratchSimd128Reg; - src1 = tempFPReg1; - } - __ mfvsrd(r0, src0); - __ mfvsrd(ip, src1); - __ mulld(r0, r0, ip); - if (i <= 0) { - __ mtvsrd(dst, r0); - } else { - __ mtvsrd(kScratchSimd128Reg, r0); - __ vinsertd(dst, kScratchSimd128Reg, - Operand(1 * lane_width_in_bytes)); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vmulld(dst, src0, src1); + } else { + for (int i = 0; i < 2; i++) { + if (i > 0) { + __ vextractd(kScratchSimd128Reg, src0, + Operand(1 * lane_width_in_bytes)); + __ vextractd(tempFPReg0, src1, Operand(1 * lane_width_in_bytes)); + src0 = kScratchSimd128Reg; + src1 = tempFPReg0; + } + __ mfvsrd(scratch_0, src0); + __ mfvsrd(scratch_1, src1); + __ mulld(scratch_0, scratch_0, scratch_1); + scratch_0 = r0; + scratch_1 = tempReg1; } + __ mtvsrdd(dst, ip, r0); } break; } @@ -3256,43 +3272,59 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kPPC_I64x2BitMask: { - __ mov(kScratchReg, - Operand(0x8080808080800040)); // Select 0 for the high bits. - __ mtvsrd(kScratchSimd128Reg, kScratchReg); - __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), - kScratchSimd128Reg); - __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); - __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vextractdm(i.OutputRegister(), i.InputSimd128Register(0)); + } else { + __ mov(kScratchReg, + Operand(0x8080808080800040)); // Select 0 for the high bits. + __ mtvsrd(kScratchSimd128Reg, kScratchReg); + __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), + kScratchSimd128Reg); + __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); + __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + } break; } case kPPC_I32x4BitMask: { - __ mov(kScratchReg, - Operand(0x8080808000204060)); // Select 0 for the high bits. - __ mtvsrd(kScratchSimd128Reg, kScratchReg); - __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), - kScratchSimd128Reg); - __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); - __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vextractwm(i.OutputRegister(), i.InputSimd128Register(0)); + } else { + __ mov(kScratchReg, + Operand(0x8080808000204060)); // Select 0 for the high bits. + __ mtvsrd(kScratchSimd128Reg, kScratchReg); + __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), + kScratchSimd128Reg); + __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); + __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + } break; } case kPPC_I16x8BitMask: { - __ mov(kScratchReg, Operand(0x10203040506070)); - __ mtvsrd(kScratchSimd128Reg, kScratchReg); - __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), - kScratchSimd128Reg); - __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); - __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vextracthm(i.OutputRegister(), i.InputSimd128Register(0)); + } else { + __ mov(kScratchReg, Operand(0x10203040506070)); + __ mtvsrd(kScratchSimd128Reg, kScratchReg); + __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), + kScratchSimd128Reg); + __ vextractub(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); + __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + } break; } case kPPC_I8x16BitMask: { - Register temp = i.ToRegister(instr->TempAt(0)); - __ mov(temp, Operand(0x8101820283038)); - __ mov(ip, Operand(0x4048505860687078)); - __ mtvsrdd(kScratchSimd128Reg, temp, ip); - __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), - kScratchSimd128Reg); - __ vextractuh(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); - __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + __ vextractbm(i.OutputRegister(), i.InputSimd128Register(0)); + } else { + Register temp = i.ToRegister(instr->TempAt(0)); + __ mov(temp, Operand(0x8101820283038)); + __ mov(ip, Operand(0x4048505860687078)); + __ mtvsrdd(kScratchSimd128Reg, temp, ip); + __ vbpermq(kScratchSimd128Reg, i.InputSimd128Register(0), + kScratchSimd128Reg); + __ vextractuh(kScratchSimd128Reg, kScratchSimd128Reg, Operand(6)); + __ mfvsrd(i.OutputRegister(), kScratchSimd128Reg); + } break; } case kPPC_I32x4DotI16x8S: { @@ -3890,7 +3922,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Unnecessary for eq/lt & ne/ge since only FU bit will be set. } - if (CpuFeatures::IsSupported(ISELECT)) { + if (CpuFeatures::IsSupported(PPC_7_PLUS)) { switch (cond) { case eq: case lt: @@ -3941,7 +3973,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { __ CmpU64(input, Operand(case_count), r0); __ bge(GetLabel(i.InputRpo(1))); __ mov_label_addr(kScratchReg, table); - __ ShiftLeftImm(r0, input, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r0, input, Operand(kSystemPointerSizeLog2)); __ LoadU64(kScratchReg, MemOperand(kScratchReg, r0)); __ Jump(kScratchReg); } @@ -4058,7 +4090,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4068,7 +4100,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { Register scratch = ip; __ LoadU64( scratch, @@ -4083,12 +4115,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } + if (FLAG_debug_code) __ stop(); __ bind(&done); } @@ -4195,17 +4226,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { // Constant pool is unavailable since the frame has been destructed ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - __ addi(argc_reg, argc_reg, Operand(1)); // Also pop the receiver. + // We must pop all arguments from the stack (including the receiver). + // The number of arguments without the receiver is + // max(argc_reg, parameter_slots-1), and the receiver is added in + // DropArguments(). if (parameter_slots > 1) { + const int parameter_slots_without_receiver = parameter_slots - 1; Label skip; - __ CmpS64(argc_reg, Operand(parameter_slots), r0); + __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver), r0); __ bgt(&skip); - __ mov(argc_reg, Operand(parameter_slots)); + __ mov(argc_reg, Operand(parameter_slots_without_receiver)); __ bind(&skip); } - __ Drop(argc_reg); + __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index 3cbfc35588..c74211aa38 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -2395,14 +2395,14 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, S) SIMD_TYPES(SIMD_VISIT_REPLACE_LANE) #undef SIMD_VISIT_REPLACE_LANE -#define SIMD_VISIT_BINOP(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - PPCOperandGenerator g(this); \ - InstructionOperand temps[] = {g.TempSimd128Register(), \ - g.TempSimd128Register()}; \ - Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ - g.UseUniqueRegister(node->InputAt(0)), \ - g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \ +#define SIMD_VISIT_BINOP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + PPCOperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempSimd128Register(), \ + g.TempSimd128Register(), g.TempRegister()}; \ + Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \ } SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 060eb31da2..685293169d 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -2481,184 +2481,138 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kS390_Word64AtomicCompareExchangeUint64: ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64(); break; - // vector replicate element - case kS390_F64x2Splat: { - __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0), - Condition(3)); - break; - } - case kS390_F32x4Splat: { - __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0), - Condition(2)); - break; - } - case kS390_I64x2Splat: { - Simd128Register dst = i.OutputSimd128Register(); - __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(3)); - __ vrep(dst, dst, Operand(0), Condition(3)); - break; - } - case kS390_I32x4Splat: { - Simd128Register dst = i.OutputSimd128Register(); - __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(2)); - __ vrep(dst, dst, Operand(0), Condition(2)); - break; - } - case kS390_I16x8Splat: { - Simd128Register dst = i.OutputSimd128Register(); - __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(1)); - __ vrep(dst, dst, Operand(0), Condition(1)); - break; - } - case kS390_I8x16Splat: { - Simd128Register dst = i.OutputSimd128Register(); - __ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(0)); - __ vrep(dst, dst, Operand(0), Condition(0)); - break; - } - // vector extract element - case kS390_F64x2ExtractLane: { - __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0), - Operand(1 - i.InputInt8(1)), Condition(3)); - break; - } - case kS390_F32x4ExtractLane: { - __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0), - Operand(3 - i.InputInt8(1)), Condition(2)); - break; - } - case kS390_I64x2ExtractLane: { - __ vlgv(i.OutputRegister(), i.InputSimd128Register(0), - MemOperand(r0, 1 - i.InputInt8(1)), Condition(3)); - break; - } - case kS390_I32x4ExtractLane: { - __ vlgv(i.OutputRegister(), i.InputSimd128Register(0), - MemOperand(r0, 3 - i.InputInt8(1)), Condition(2)); - break; - } - case kS390_I16x8ExtractLaneU: { - __ vlgv(i.OutputRegister(), i.InputSimd128Register(0), - MemOperand(r0, 7 - i.InputInt8(1)), Condition(1)); - break; - } - case kS390_I16x8ExtractLaneS: { - __ vlgv(kScratchReg, i.InputSimd128Register(0), - MemOperand(r0, 7 - i.InputInt8(1)), Condition(1)); - __ lghr(i.OutputRegister(), kScratchReg); - break; - } - case kS390_I8x16ExtractLaneU: { - __ vlgv(i.OutputRegister(), i.InputSimd128Register(0), - MemOperand(r0, 15 - i.InputInt8(1)), Condition(0)); - break; - } - case kS390_I8x16ExtractLaneS: { - __ vlgv(kScratchReg, i.InputSimd128Register(0), - MemOperand(r0, 15 - i.InputInt8(1)), Condition(0)); - __ lgbr(i.OutputRegister(), kScratchReg); - break; - } - // vector replace element - case kS390_F64x2ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0)); - __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0), - Condition(3)); - __ vlvg(kScratchDoubleReg, kScratchReg, - MemOperand(r0, 1 - i.InputInt8(1)), Condition(3)); - __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0)); - break; - } - case kS390_F32x4ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0)); - __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0), - Condition(2)); - __ vlvg(kScratchDoubleReg, kScratchReg, - MemOperand(r0, 3 - i.InputInt8(1)), Condition(2)); - __ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0)); - break; - } - case kS390_I64x2ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ vlr(dst, src, Condition(0), Condition(0), Condition(0)); - } - __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 1 - i.InputInt8(1)), - Condition(3)); - break; - } - case kS390_I32x4ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ vlr(dst, src, Condition(0), Condition(0), Condition(0)); - } - __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 3 - i.InputInt8(1)), - Condition(2)); - break; - } - case kS390_I16x8ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ vlr(dst, src, Condition(0), Condition(0), Condition(0)); - } - __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 7 - i.InputInt8(1)), - Condition(1)); - break; - } - case kS390_I8x16ReplaceLane: { - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ vlr(dst, src, Condition(0), Condition(0), Condition(0)); - } - __ vlvg(dst, i.InputRegister(2), MemOperand(r0, 15 - i.InputInt8(1)), - Condition(0)); - break; - } - // vector binops - case kS390_F64x2Add: { - __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Sub: { - __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Mul: { - __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Div: { - __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Min: { - __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(1), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Max: { - __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(1), Condition(0), - Condition(3)); - break; - } + // Simd Support. +#define SIMD_BINOP_LIST(V) \ + V(F64x2Add) \ + V(F64x2Sub) \ + V(F64x2Mul) \ + V(F64x2Div) \ + V(F64x2Min) \ + V(F64x2Max) \ + V(F64x2Eq) \ + V(F64x2Ne) \ + V(F64x2Lt) \ + V(F64x2Le) \ + V(F32x4Add) \ + V(F32x4Sub) \ + V(F32x4Mul) \ + V(F32x4Div) \ + V(F32x4Min) \ + V(F32x4Max) \ + V(F32x4Eq) \ + V(F32x4Ne) \ + V(F32x4Lt) \ + V(F32x4Le) \ + V(I64x2Add) \ + V(I64x2Sub) \ + V(I64x2Mul) \ + V(I64x2Eq) \ + V(I64x2Ne) \ + V(I64x2GtS) \ + V(I64x2GeS) \ + V(I32x4Add) \ + V(I32x4Sub) \ + V(I32x4Mul) \ + V(I32x4Eq) \ + V(I32x4Ne) \ + V(I32x4GtS) \ + V(I32x4GeS) \ + V(I32x4GtU) \ + V(I32x4GeU) \ + V(I32x4MinS) \ + V(I32x4MinU) \ + V(I32x4MaxS) \ + V(I32x4MaxU) \ + V(I16x8Add) \ + V(I16x8Sub) \ + V(I16x8Mul) \ + V(I16x8Eq) \ + V(I16x8Ne) \ + V(I16x8GtS) \ + V(I16x8GeS) \ + V(I16x8GtU) \ + V(I16x8GeU) \ + V(I16x8MinS) \ + V(I16x8MinU) \ + V(I16x8MaxS) \ + V(I16x8MaxU) \ + V(I8x16Add) \ + V(I8x16Sub) \ + V(I8x16Eq) \ + V(I8x16Ne) \ + V(I8x16GtS) \ + V(I8x16GeS) \ + V(I8x16GtU) \ + V(I8x16GeU) \ + V(I8x16MinS) \ + V(I8x16MinU) \ + V(I8x16MaxS) \ + V(I8x16MaxU) + +#define EMIT_SIMD_BINOP(name) \ + case kS390_##name: { \ + __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.InputSimd128Register(1)); \ + break; \ + } + SIMD_BINOP_LIST(EMIT_SIMD_BINOP) +#undef EMIT_SIMD_BINOP +#undef SIMD_BINOP_LIST + +#define SIMD_UNOP_LIST(V) \ + V(F64x2Splat, F64x2Splat, Simd128Register, DoubleRegister) \ + V(F32x4Splat, F32x4Splat, Simd128Register, DoubleRegister) \ + V(I64x2Splat, I64x2Splat, Simd128Register, Register) \ + V(I32x4Splat, I32x4Splat, Simd128Register, Register) \ + V(I16x8Splat, I16x8Splat, Simd128Register, Register) \ + V(I8x16Splat, I8x16Splat, Simd128Register, Register) + +#define EMIT_SIMD_UNOP(name, op, dtype, stype) \ + case kS390_##name: { \ + __ op(i.Output##dtype(), i.Input##stype(0)); \ + break; \ + } + SIMD_UNOP_LIST(EMIT_SIMD_UNOP) +#undef EMIT_SIMD_UNOP +#undef SIMD_UNOP_LIST + +#define SIMD_EXTRACT_LANE_LIST(V) \ + V(F64x2ExtractLane, DoubleRegister) \ + V(F32x4ExtractLane, DoubleRegister) \ + V(I64x2ExtractLane, Register) \ + V(I32x4ExtractLane, Register) \ + V(I16x8ExtractLaneU, Register) \ + V(I16x8ExtractLaneS, Register) \ + V(I8x16ExtractLaneU, Register) \ + V(I8x16ExtractLaneS, Register) + +#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \ + case kS390_##name: { \ + __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1)); \ + break; \ + } + SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE) +#undef EMIT_SIMD_EXTRACT_LANE +#undef SIMD_EXTRACT_LANE_LIST + +#define SIMD_REPLACE_LANE_LIST(V) \ + V(F64x2ReplaceLane, DoubleRegister) \ + V(F32x4ReplaceLane, DoubleRegister) \ + V(I64x2ReplaceLane, Register) \ + V(I32x4ReplaceLane, Register) \ + V(I16x8ReplaceLane, Register) \ + V(I8x16ReplaceLane, Register) + +#define EMIT_SIMD_REPLACE_LANE(name, stype) \ + case kS390_##name: { \ + __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \ + i.Input##stype(2), i.InputInt8(1)); \ + break; \ + } + SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE) +#undef EMIT_SIMD_REPLACE_LANE +#undef SIMD_REPLACE_LANE_LIST + // vector binops case kS390_F64x2Qfma: { Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2675,42 +2629,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vfnms(dst, src1, src2, src0, Condition(3), Condition(0)); break; } - case kS390_F32x4Add: { - __ vfa(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Sub: { - __ vfs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Mul: { - __ vfm(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Div: { - __ vfd(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Min: { - __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(1), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Max: { - __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(1), Condition(0), - Condition(2)); - break; - } case kS390_F32x4Qfma: { Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2727,81 +2645,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vfnms(dst, src1, src2, src0, Condition(2), Condition(0)); break; } - case kS390_I64x2Add: { - __ va(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_I64x2Sub: { - __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_I64x2Mul: { - Simd128Register src0 = i.InputSimd128Register(0); - Simd128Register src1 = i.InputSimd128Register(1); - Register scratch_0 = r0; - Register scratch_1 = r1; - for (int i = 0; i < 2; i++) { - __ vlgv(scratch_0, src0, MemOperand(r0, i), Condition(3)); - __ vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3)); - __ MulS64(scratch_0, scratch_1); - scratch_0 = r1; - scratch_1 = ip; - } - __ vlvgp(i.OutputSimd128Register(), r0, r1); - break; - } - case kS390_I32x4Add: { - __ va(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I32x4Sub: { - __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I32x4Mul: { - __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I16x8Add: { - __ va(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I16x8Sub: { - __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I16x8Mul: { - __ vml(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I8x16Add: { - __ va(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_I8x16Sub: { - __ vs(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } case kS390_I16x8RoundingAverageU: { __ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1), Condition(0), Condition(0), @@ -2814,274 +2657,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Condition(0)); break; } - // vector comparisons - case kS390_F64x2Eq: { - __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Ne: { - __ vfce(kScratchDoubleReg, i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(3)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(3)); - break; - } - case kS390_F64x2Le: { - __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_F64x2Lt: { - __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_I32x4MinS: { - __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I32x4MinU: { - __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I16x8MinS: { - __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I16x8MinU: { - __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I8x16MinS: { - __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_I8x16MinU: { - __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_I32x4MaxS: { - __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I32x4MaxU: { - __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I16x8MaxS: { - __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I16x8MaxU: { - __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I8x16MaxS: { - __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_I8x16MaxU: { - __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_F32x4Eq: { - __ vfce(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I64x2Eq: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(3)); - break; - } - case kS390_I32x4Eq: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - break; - } - case kS390_I16x8Eq: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - break; - } - case kS390_I8x16Eq: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - break; - } - case kS390_F32x4Ne: { - __ vfce(kScratchDoubleReg, i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0), - Condition(2)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(2)); - break; - } - case kS390_I64x2Ne: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(3)); - __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register(), Condition(0), Condition(0), - Condition(3)); - break; - } - case kS390_I32x4Ne: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register(), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I16x8Ne: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register(), Condition(0), Condition(0), - Condition(1)); - break; - } - case kS390_I8x16Ne: { - __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register(), Condition(0), Condition(0), - Condition(0)); - break; - } - case kS390_F32x4Lt: { - __ vfch(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_F32x4Le: { - __ vfche(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(0), - Condition(2)); - break; - } - case kS390_I64x2GtS: { - __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(3)); - break; - } - case kS390_I64x2GeS: { - // Compute !(B > A) which is equal to A >= B. - __ vch(kScratchDoubleReg, i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(3)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(3)); - break; - } - case kS390_I32x4GtS: { - __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - break; - } - case kS390_I32x4GeS: { - __ vch(kScratchDoubleReg, i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(2)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(2)); - break; - } - case kS390_I32x4GtU: { - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - break; - } - case kS390_I32x4GeU: { - __ vceq(kScratchDoubleReg, i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(2)); - __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(), - kScratchDoubleReg, Condition(0), Condition(0), Condition(2)); - break; - } - case kS390_I16x8GtS: { - __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - break; - } - case kS390_I16x8GeS: { - __ vch(kScratchDoubleReg, i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(1)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(1)); - break; - } - case kS390_I16x8GtU: { - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - break; - } - case kS390_I16x8GeU: { - __ vceq(kScratchDoubleReg, i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(1)); - __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(), - kScratchDoubleReg, Condition(0), Condition(0), Condition(1)); - break; - } - case kS390_I8x16GtS: { - __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - break; - } - case kS390_I8x16GeS: { - __ vch(kScratchDoubleReg, i.InputSimd128Register(1), - i.InputSimd128Register(0), Condition(0), Condition(0)); - __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg, - Condition(0), Condition(0), Condition(0)); - break; - } - case kS390_I8x16GtU: { - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - break; - } - case kS390_I8x16GeU: { - __ vceq(kScratchDoubleReg, i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1), Condition(0), Condition(0)); - __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(), - kScratchDoubleReg, Condition(0), Condition(0), Condition(0)); - break; - } // vector shifts #define VECTOR_SHIFT(op, mode) \ { \ @@ -3851,14 +3426,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kS390_F64x2ConvertLowI32x4S: { __ vupl(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0), Condition(0), Condition(2)); - __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5), + __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4), Condition(0), Condition(3)); break; } case kS390_F64x2ConvertLowI32x4U: { __ vupll(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0), Condition(0), Condition(2)); - __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5), + __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(4), Condition(0), Condition(3)); break; } @@ -4214,7 +3789,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4224,7 +3799,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { Register scratch = r1; __ LoadU64( scratch, @@ -4238,12 +3813,11 @@ void CodeGenerator::AssembleConstructFrame() { } __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // We come from WebAssembly, there are no references for the GC. + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } + if (FLAG_debug_code) __ stop(); __ bind(&done); } @@ -4346,17 +3920,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - __ AddS64(argc_reg, argc_reg, Operand(1)); // Also pop the receiver. + // We must pop all arguments from the stack (including the receiver). + // The number of arguments without the receiver is + // max(argc_reg, parameter_slots-1), and the receiver is added in + // DropArguments(). if (parameter_slots > 1) { + const int parameter_slots_without_receiver = parameter_slots - 1; Label skip; - __ CmpS64(argc_reg, Operand(parameter_slots)); + __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver)); __ bgt(&skip); - __ mov(argc_reg, Operand(parameter_slots)); + __ mov(argc_reg, Operand(parameter_slots_without_receiver)); __ bind(&skip); } - __ Drop(argc_reg); + __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index a4a116a3f5..bcf5a8dfff 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -2790,7 +2790,7 @@ void InstructionSelector::EmitPrepareResults( void InstructionSelector::VisitLoadLane(Node* node) { // We should never reach here, see http://crrev.com/c/2577820 - UNIMPLEMENTED(); + UNREACHABLE(); } void InstructionSelector::VisitLoadTransform(Node* node) { @@ -2800,7 +2800,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) { void InstructionSelector::VisitStoreLane(Node* node) { // We should never reach here, see http://crrev.com/c/2577820 - UNIMPLEMENTED(); + UNREACHABLE(); } void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index f16c7a6c89..60a40fb489 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -2954,21 +2954,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2ShrS: { // TODO(zhin): there is vpsraq but requires AVX512 - // ShrS on each quadword one at a time XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); - Register tmp = i.ToRegister(instr->TempAt(0)); - // Modulo 64 not required as sarq_cl will mask cl to 6 bits. - - // lower quadword - __ Pextrq(tmp, src, int8_t{0x0}); - __ sarq_cl(tmp); - __ Pinsrq(dst, tmp, uint8_t{0x0}); - - // upper quadword - __ Pextrq(tmp, src, int8_t{0x1}); - __ sarq_cl(tmp); - __ Pinsrq(dst, tmp, uint8_t{0x1}); + if (HasImmediateInput(instr, 1)) { + __ I64x2ShrS(dst, src, i.InputInt6(1), kScratchDoubleReg); + } else { + __ I64x2ShrS(dst, src, i.InputRegister(1), kScratchDoubleReg, + i.TempSimd128Register(0), kScratchRegister); + } break; } case kX64I64x2Add: { @@ -4025,8 +4018,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64S16x8HalfShuffle1: { XMMRegister dst = i.OutputSimd128Register(); - ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, i.InputUint8(1)); - __ Pshufhw(dst, dst, i.InputUint8(2)); + uint8_t mask_lo = i.InputUint8(1); + uint8_t mask_hi = i.InputUint8(2); + if (mask_lo != 0xe4) { + ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, mask_lo); + if (mask_hi != 0xe4) __ Pshufhw(dst, dst, mask_hi); + } else { + DCHECK_NE(mask_hi, 0xe4); + ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, mask_hi); + } break; } case kX64S16x8HalfShuffle2: { @@ -4725,7 +4725,7 @@ void CodeGenerator::AssembleConstructFrame() { if (required_slots > 0) { DCHECK(frame_access_state()->has_frame()); #if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots > 128) { + if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { // For WebAssembly functions with big frames we have to do the stack // overflow check before we construct the frame. Otherwise we may not // have enough space on the stack to call the runtime for the stack @@ -4735,7 +4735,7 @@ void CodeGenerator::AssembleConstructFrame() { // If the frame is bigger than the stack, we throw the stack overflow // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. - if (required_slots * kSystemPointerSize < FLAG_stack_size * 1024) { + if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { __ movq(kScratchRegister, FieldOperand(kWasmInstanceRegister, WasmInstanceObject::kRealStackLimitAddressOffset)); @@ -4748,6 +4748,8 @@ void CodeGenerator::AssembleConstructFrame() { __ near_call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); + // The call does not return, hence we can ignore any references and just + // define an empty safepoint. ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone()); RecordSafepoint(reference_map); __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); @@ -4870,11 +4872,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - int parameter_slots_without_receiver = - parameter_slots - 1; // Exclude the receiver to simplify the - // computation. We'll account for it at the end. + // We must pop all arguments from the stack (including the receiver). + // The number of arguments without the receiver is + // max(argc_reg, parameter_slots-1), and the receiver is added in + // DropArguments(). + int parameter_slots_without_receiver = parameter_slots - 1; Label mismatch_return; Register scratch_reg = r10; DCHECK_NE(argc_reg, scratch_reg); @@ -4884,11 +4886,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ PopReturnAddressTo(scratch_reg); - __ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size, - kSystemPointerSize)); // Also pop the receiver. + __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); // We use a return instead of a jump for better return address prediction. - __ PushReturnAddressFrom(scratch_reg); __ Ret(); } else if (additional_pop_count->IsImmediate()) { Register scratch_reg = r10; diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index f54b3d5792..53ee75064b 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -3298,11 +3298,17 @@ void InstructionSelector::VisitI64x2Neg(Node* node) { void InstructionSelector::VisitI64x2ShrS(Node* node) { X64OperandGenerator g(this); - InstructionOperand temps[] = {g.TempRegister()}; - // Use fixed to rcx, to use sarq_cl in codegen. - Emit(kX64I64x2ShrS, g.DefineSameAsFirst(node), - g.UseUniqueRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), rcx), - arraysize(temps), temps); + InstructionOperand dst = + IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node); + + if (g.CanBeImmediate(node->InputAt(1))) { + Emit(kX64I64x2ShrS, dst, g.UseRegister(node->InputAt(0)), + g.UseImmediate(node->InputAt(1))); + } else { + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kX64I64x2ShrS, dst, g.UseUniqueRegister(node->InputAt(0)), + g.UseRegister(node->InputAt(1)), arraysize(temps), temps); + } } void InstructionSelector::VisitI64x2Mul(Node* node) { diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc index 8059faa176..a864012a7a 100644 --- a/deps/v8/src/compiler/branch-elimination.cc +++ b/deps/v8/src/compiler/branch-elimination.cc @@ -101,8 +101,9 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) { Node* input = inputs[i]; ControlPathConditions from_input = node_conditions_.Get(input); if (!from_input.LookupCondition(branch_condition, &previous_branch, - &condition_value)) + &condition_value)) { return; + } if (phase_ == kEARLY) { phi_inputs.emplace_back(condition_value ? jsgraph()->TrueConstant() @@ -128,6 +129,7 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) { Reduction BranchElimination::ReduceBranch(Node* node) { Node* condition = node->InputAt(0); Node* control_input = NodeProperties::GetControlInput(node, 0); + if (!reduced_.Get(control_input)) return NoChange(); ControlPathConditions from_input = node_conditions_.Get(control_input); Node* branch; bool condition_value; @@ -283,7 +285,7 @@ Reduction BranchElimination::ReduceMerge(Node* node) { } Reduction BranchElimination::ReduceStart(Node* node) { - return UpdateConditions(node, {}); + return UpdateConditions(node, ControlPathConditions(zone_)); } Reduction BranchElimination::ReduceOtherControl(Node* node) { @@ -315,7 +317,7 @@ Reduction BranchElimination::UpdateConditions( // The control path for the node is the path obtained by appending the // current_condition to the prev_conditions. Use the original control path as // a hint to avoid allocations. - if (in_new_block || prev_conditions.Size() == 0) { + if (in_new_block || prev_conditions.blocks_.Size() == 0) { prev_conditions.AddConditionInNewBlock(zone_, current_condition, current_branch, is_true_branch); } else { @@ -330,14 +332,17 @@ void BranchElimination::ControlPathConditions::AddCondition( Zone* zone, Node* condition, Node* branch, bool is_true, ControlPathConditions hint) { if (!LookupCondition(condition)) { - FunctionalList<BranchCondition> prev_front = Front(); - if (hint.Size() > 0) { - prev_front.PushFront({condition, branch, is_true}, zone, hint.Front()); + BranchCondition branch_condition(condition, branch, is_true); + FunctionalList<BranchCondition> prev_front = blocks_.Front(); + if (hint.blocks_.Size() > 0) { + prev_front.PushFront(branch_condition, zone, hint.blocks_.Front()); } else { - prev_front.PushFront({condition, branch, is_true}, zone); + prev_front.PushFront(branch_condition, zone); } - DropFront(); - PushFront(prev_front, zone); + blocks_.DropFront(); + blocks_.PushFront(prev_front, zone); + conditions_.Set(condition, branch_condition); + SLOW_DCHECK(BlocksAndConditionsInvariant()); } } @@ -345,35 +350,66 @@ void BranchElimination::ControlPathConditions::AddConditionInNewBlock( Zone* zone, Node* condition, Node* branch, bool is_true) { FunctionalList<BranchCondition> new_block; if (!LookupCondition(condition)) { - new_block.PushFront({condition, branch, is_true}, zone); + BranchCondition branch_condition(condition, branch, is_true); + new_block.PushFront(branch_condition, zone); + conditions_.Set(condition, branch_condition); } - PushFront(new_block, zone); + blocks_.PushFront(new_block, zone); + SLOW_DCHECK(BlocksAndConditionsInvariant()); } bool BranchElimination::ControlPathConditions::LookupCondition( Node* condition) const { - for (auto block : *this) { - for (BranchCondition element : block) { - if (element.condition == condition) return true; - } - } - return false; + return conditions_.Get(condition).IsSet(); } bool BranchElimination::ControlPathConditions::LookupCondition( Node* condition, Node** branch, bool* is_true) const { - for (auto block : *this) { - for (BranchCondition element : block) { - if (element.condition == condition) { - *is_true = element.is_true; - *branch = element.branch; - return true; - } - } + const BranchCondition& element = conditions_.Get(condition); + if (element.IsSet()) { + *is_true = element.is_true; + *branch = element.branch; + return true; } return false; } +void BranchElimination::ControlPathConditions::ResetToCommonAncestor( + ControlPathConditions other) { + while (other.blocks_.Size() > blocks_.Size()) other.blocks_.DropFront(); + while (blocks_.Size() > other.blocks_.Size()) { + for (BranchCondition branch_condition : blocks_.Front()) { + conditions_.Set(branch_condition.condition, {}); + } + blocks_.DropFront(); + } + while (blocks_ != other.blocks_) { + for (BranchCondition branch_condition : blocks_.Front()) { + conditions_.Set(branch_condition.condition, {}); + } + blocks_.DropFront(); + other.blocks_.DropFront(); + } + SLOW_DCHECK(BlocksAndConditionsInvariant()); +} + +#if DEBUG +bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() { + PersistentMap<Node*, BranchCondition> conditions_copy(conditions_); + for (auto block : blocks_) { + for (BranchCondition condition : block) { + // Every element of blocks_ has to be in conditions_. + if (conditions_copy.Get(condition.condition) != condition) return false; + conditions_copy.Set(condition.condition, {}); + } + } + // Every element of {conditions_} has to be in {blocks_}. We removed all + // elements of blocks_ from condition_copy, so if it is not empty, the + // invariant fails. + return conditions_copy.begin() == conditions_copy.end(); +} +#endif + void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) { // Check if {branch} is dead because we might have a stale side-table entry. if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead && diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h index 6bc45a020d..9078c39038 100644 --- a/deps/v8/src/compiler/branch-elimination.h +++ b/deps/v8/src/compiler/branch-elimination.h @@ -10,6 +10,7 @@ #include "src/compiler/functional-list.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/node-aux-data.h" +#include "src/compiler/persistent-map.h" namespace v8 { namespace internal { @@ -38,6 +39,9 @@ class V8_EXPORT_PRIVATE BranchElimination final // Represents a condition along with its value in the current control path. // Also stores the node that branched on this condition. struct BranchCondition { + BranchCondition() : condition(nullptr), branch(nullptr), is_true(false) {} + BranchCondition(Node* condition, Node* branch, bool is_true) + : condition(condition), branch(branch), is_true(is_true) {} Node* condition; Node* branch; bool is_true; @@ -47,15 +51,17 @@ class V8_EXPORT_PRIVATE BranchElimination final is_true == other.is_true; } bool operator!=(BranchCondition other) const { return !(*this == other); } + + bool IsSet() const { return branch != nullptr; } }; // Class for tracking information about branch conditions. It is represented // as a linked list of condition blocks, each of which corresponds to a block // of code bewteen an IfTrue/IfFalse and a Merge. Each block is in turn // represented as a linked list of {BranchCondition}s. - class ControlPathConditions - : public FunctionalList<FunctionalList<BranchCondition>> { + class ControlPathConditions { public: + explicit ControlPathConditions(Zone* zone) : conditions_(zone) {} // Checks if {condition} is present in this {ControlPathConditions}. bool LookupCondition(Node* condition) const; // Checks if {condition} is present in this {ControlPathConditions} and @@ -68,9 +74,29 @@ class V8_EXPORT_PRIVATE BranchElimination final // Adds a condition in a new block. void AddConditionInNewBlock(Zone* zone, Node* condition, Node* branch, bool is_true); + // Reset this {ControlPathConditions} to the longest prefix that is common + // with {other}. + void ResetToCommonAncestor(ControlPathConditions other); + + bool operator==(const ControlPathConditions& other) const { + return blocks_ == other.blocks_; + } + bool operator!=(const ControlPathConditions& other) const { + return blocks_ != other.blocks_; + } + + friend class BranchElimination; private: - using FunctionalList<FunctionalList<BranchCondition>>::PushFront; + FunctionalList<FunctionalList<BranchCondition>> blocks_; + // This is an auxilliary data structure that provides fast lookups in the + // set of conditions. It should hold at any point that the contents of + // {blocks_} and {conditions_} is the same, which is implemented in + // {BlocksAndConditionsInvariant}. + PersistentMap<Node*, BranchCondition> conditions_; +#if DEBUG + bool BlocksAndConditionsInvariant(); +#endif }; Reduction ReduceBranch(Node* node); @@ -101,7 +127,9 @@ class V8_EXPORT_PRIVATE BranchElimination final // Maps each control node to the condition information known about the node. // If the information is nullptr, then we have not calculated the information // yet. - NodeAuxData<ControlPathConditions> node_conditions_; + + NodeAuxData<ControlPathConditions, ZoneConstruct<ControlPathConditions>> + node_conditions_; NodeAuxData<bool> reduced_; Zone* zone_; Node* dead_; diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index c46ec6944b..985a256c57 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -77,28 +77,6 @@ class BytecodeGraphBuilder { Node* GetParameter(int index, const char* debug_name_hint = nullptr); CodeKind code_kind() const { return code_kind_; } - bool native_context_independent() const { - // TODO(jgruber,v8:8888): Remove dependent code. - return false; - } - bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; } - bool generate_full_feedback_collection() const { - // NCI code currently collects full feedback. - DCHECK_IMPLIES(native_context_independent(), - CollectFeedbackInGenericLowering()); - return native_context_independent(); - } - - static JSTypeHintLowering::LoweringResult NoChange() { - return JSTypeHintLowering::LoweringResult::NoChange(); - } - bool CanApplyTypeHintLowering(IrOpcode::Value opcode) const { - return !generate_full_feedback_collection() || - !IrOpcode::IsFeedbackCollectingOpcode(opcode); - } - bool CanApplyTypeHintLowering(const Operator* op) const { - return CanApplyTypeHintLowering(static_cast<IrOpcode::Value>(op->opcode())); - } // The node representing the current feedback vector is generated once prior // to visiting bytecodes, and is later passed as input to other nodes that @@ -107,22 +85,20 @@ class BytecodeGraphBuilder { // to feedback_vector() once all uses of the direct heap object reference // have been replaced with a Node* reference. void CreateFeedbackVectorNode(); - Node* BuildLoadFeedbackVector(); Node* feedback_vector_node() const { DCHECK_NOT_NULL(feedback_vector_node_); return feedback_vector_node_; } void CreateFeedbackCellNode(); - Node* BuildLoadFeedbackCell(); Node* feedback_cell_node() const { + DCHECK(CodeKindCanTierUp(code_kind())); DCHECK_NOT_NULL(feedback_cell_node_); return feedback_cell_node_; } // Same as above for the feedback vector node. void CreateNativeContextNode(); - Node* BuildLoadNativeContext(); Node* native_context_node() const { DCHECK_NOT_NULL(native_context_node_); return native_context_node_; @@ -135,13 +111,6 @@ class BytecodeGraphBuilder { // Only relevant for specific code kinds (see CodeKindCanTierUp). void MaybeBuildTierUpCheck(); - // Like bytecode, NCI code must collect call feedback to preserve proper - // behavior of inlining heuristics when tiering up to Turbofan in the future. - // The invocation count (how often a particular JSFunction has been called) - // is tracked by the callee. For bytecode, this happens in the - // InterpreterEntryTrampoline, for NCI code it happens here in the prologue. - void MaybeBuildIncrementInvocationCount(); - // Builder for loading the a native context field. Node* BuildLoadNativeContextField(int index); @@ -255,8 +224,6 @@ class BytecodeGraphBuilder { // former points at JumpLoop, the latter at the loop header, i.e. the target // of JumpLoop). void PrepareFrameStateForOSREntryStackCheck(Node* node) { - DCHECK_EQ(bytecode_iterator().current_offset(), - bytecode_analysis().osr_entry_point()); DCHECK(OperatorProperties::HasFrameStateInput(node->op())); DCHECK(node->opcode() == IrOpcode::kJSStackCheck); const int offset = bytecode_analysis().osr_bailout_id().ToInt(); @@ -361,7 +328,7 @@ class BytecodeGraphBuilder { // StackChecks. void BuildFunctionEntryStackCheck(); void BuildIterationBodyStackCheck(); - void MaybeBuildOSREntryStackCheck(); + void BuildOSREntryStackCheck(); // Control flow plumbing. void BuildJump(); @@ -511,7 +478,6 @@ class BytecodeGraphBuilder { Environment* environment_; bool const osr_; int currently_peeled_loop_offset_; - bool is_osr_entry_stack_check_pending_; const bool skip_first_stack_and_tierup_check_; @@ -1087,7 +1053,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( shared_info_(shared_info), bytecode_array_(shared_info.GetBytecodeArray()), feedback_cell_(feedback_cell), - feedback_vector_(feedback_cell.value()->AsFeedbackVector()), + feedback_vector_(feedback_cell.value().value()), invocation_frequency_(invocation_frequency), type_hint_lowering_( broker, jsgraph, feedback_vector_, @@ -1107,7 +1073,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( environment_(nullptr), osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), - is_osr_entry_stack_check_pending_(osr_), skip_first_stack_and_tierup_check_( flags & BytecodeGraphBuilderFlag::kSkipFirstStackAndTierupCheck), merge_environments_(local_zone), @@ -1160,70 +1125,24 @@ Node* BytecodeGraphBuilder::GetParameter(int parameter_index, void BytecodeGraphBuilder::CreateFeedbackCellNode() { DCHECK_NULL(feedback_cell_node_); - if (native_context_independent()) { - feedback_cell_node_ = BuildLoadFeedbackCell(); - } else if (is_turboprop()) { - feedback_cell_node_ = jsgraph()->Constant(feedback_cell_); - } -} - -Node* BytecodeGraphBuilder::BuildLoadFeedbackCell() { - DCHECK(native_context_independent()); - DCHECK_NULL(feedback_cell_node_); - return NewNode( - simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()), - GetFunctionClosure()); + // Only used by tier-up logic; for code that doesn't tier-up, we can skip + // this. + if (!CodeKindCanTierUp(code_kind())) return; + feedback_cell_node_ = jsgraph()->Constant(feedback_cell_); } void BytecodeGraphBuilder::CreateFeedbackVectorNode() { DCHECK_NULL(feedback_vector_node_); - feedback_vector_node_ = native_context_independent() - ? BuildLoadFeedbackVector() - : jsgraph()->Constant(feedback_vector()); -} - -Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() { - DCHECK(native_context_independent()); - DCHECK_NULL(feedback_vector_node_); - - // The feedback vector must exist and remain live while the generated code - // lives. Specifically that means it must be created when NCI code is - // installed, and must not be flushed. - return NewNode(simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()), - feedback_cell_node()); + feedback_vector_node_ = jsgraph()->Constant(feedback_vector()); } Node* BytecodeGraphBuilder::BuildLoadFeedbackCell(int index) { - if (native_context_independent()) { - // TODO(jgruber,v8:8888): Assumes that the feedback vector has been - // allocated. - Node* closure_feedback_cell_array = - NewNode(simplified()->LoadField( - AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()), - feedback_vector_node()); - - return NewNode( - simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), - closure_feedback_cell_array); - } else { - return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index)); - } + return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index)); } void BytecodeGraphBuilder::CreateNativeContextNode() { DCHECK_NULL(native_context_node_); - native_context_node_ = native_context_independent() - ? BuildLoadNativeContext() - : jsgraph()->Constant(native_context()); -} - -Node* BytecodeGraphBuilder::BuildLoadNativeContext() { - DCHECK(native_context_independent()); - DCHECK_NULL(native_context_node_); - Node* context_map = NewNode(simplified()->LoadField(AccessBuilder::ForMap()), - environment()->Context()); - return NewNode(simplified()->LoadField(AccessBuilder::ForMapNativeContext()), - context_map); + native_context_node_ = jsgraph()->Constant(native_context()); } void BytecodeGraphBuilder::MaybeBuildTierUpCheck() { @@ -1245,21 +1164,6 @@ void BytecodeGraphBuilder::MaybeBuildTierUpCheck() { new_target, argc, context); } -void BytecodeGraphBuilder::MaybeBuildIncrementInvocationCount() { - if (!generate_full_feedback_collection()) return; - - Node* current_invocation_count = - NewNode(simplified()->LoadField( - AccessBuilder::ForFeedbackVectorInvocationCount()), - feedback_vector_node()); - Node* next_invocation_count = - NewNode(simplified()->NumberAdd(), current_invocation_count, - jsgraph()->SmiConstant(1)); - NewNode(simplified()->StoreField( - AccessBuilder::ForFeedbackVectorInvocationCount()), - feedback_vector_node(), next_invocation_count); -} - Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) { Node* result = NewNode(javascript()->LoadContext(0, index, true)); NodeProperties::ReplaceContextInput(result, native_context_node()); @@ -1293,7 +1197,6 @@ void BytecodeGraphBuilder::CreateGraph() { CreateFeedbackCellNode(); CreateFeedbackVectorNode(); MaybeBuildTierUpCheck(); - MaybeBuildIncrementInvocationCount(); CreateNativeContextNode(); VisitBytecodes(); @@ -1460,6 +1363,7 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset( void BytecodeGraphBuilder::BuildFunctionEntryStackCheck() { if (!skip_first_stack_check()) { + DCHECK(exception_handlers_.empty()); Node* node = NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry)); PrepareFrameStateForFunctionEntryStackCheck(node); @@ -1472,26 +1376,36 @@ void BytecodeGraphBuilder::BuildIterationBodyStackCheck() { environment()->RecordAfterState(node, Environment::kAttachFrameState); } -void BytecodeGraphBuilder::MaybeBuildOSREntryStackCheck() { - if (V8_UNLIKELY(is_osr_entry_stack_check_pending_)) { - is_osr_entry_stack_check_pending_ = false; - Node* node = - NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry)); - PrepareFrameStateForOSREntryStackCheck(node); - } +void BytecodeGraphBuilder::BuildOSREntryStackCheck() { + DCHECK(exception_handlers_.empty()); + Node* node = + NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry)); + PrepareFrameStateForOSREntryStackCheck(node); } // We will iterate through the OSR loop, then its parent, and so on // until we have reached the outmost loop containing the OSR loop. We do // not generate nodes for anything before the outermost loop. void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { + environment()->FillWithOsrValues(); + + // The entry stack check has to happen *before* initialising the OSR prelude; + // it has to happen before setting up exception handlers, so that the + // optimized code can't accidentally catch a failingstack with a OSR-ed loop + // inside a try-catch, e.g. + // + // try { + // loop { OSR(); } + // } catch { + // // Ignore failed stack check. + // } + BuildOSREntryStackCheck(); + OsrIteratorState iterator_states(this); iterator_states.ProcessOsrPrelude(); int osr_entry = bytecode_analysis().osr_entry_point(); DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry); - environment()->FillWithOsrValues(); - // Suppose we have n nested loops, loop_0 being the outermost one, and // loop_n being the OSR loop. We start iterating the bytecode at the header // of loop_n (the OSR loop), and then we peel the part of the the body of @@ -1562,12 +1476,6 @@ void BytecodeGraphBuilder::VisitSingleBytecode() { if (environment() != nullptr) { BuildLoopHeaderEnvironment(current_offset); - // The OSR-entry stack check must be emitted during the first call to - // VisitSingleBytecode in an OSR'd function. We don't know if that call - // will be made from AdvanceToOsrEntryAndPeelLoops or from VisitBytecodes, - // therefore we insert the logic here inside VisitSingleBytecode itself. - MaybeBuildOSREntryStackCheck(); - switch (bytecode_iterator().current_bytecode()) { #define BYTECODE_CASE(name, ...) \ case interpreter::Bytecode::k##name: \ @@ -1675,8 +1583,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name, TypeofMode typeof_mode) { FeedbackSource feedback = CreateFeedbackSource(feedback_slot_index); DCHECK(IsLoadGlobalICKind(broker()->GetFeedbackSlotKind(feedback))); - const Operator* op = - javascript()->LoadGlobal(name.object(), feedback, typeof_mode); + const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); return NewNode(op, feedback_vector_node()); } @@ -1707,8 +1614,7 @@ void BytecodeGraphBuilder::VisitStaGlobal() { LanguageMode language_mode = GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback)); - const Operator* op = - javascript()->StoreGlobal(language_mode, name.object(), feedback); + const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); Node* node = NewNode(op, value, feedback_vector_node()); environment()->RecordAfterState(node, Environment::kAttachFrameState); @@ -1891,13 +1797,12 @@ base::Optional<ScopeInfoRef> BytecodeGraphBuilder::TryGetScopeInfo() { Node* context = environment()->Context(); switch (context->opcode()) { case IrOpcode::kJSCreateFunctionContext: - return MakeRef( - broker(), - CreateFunctionContextParametersOf(context->op()).scope_info()); + return CreateFunctionContextParametersOf(context->op()) + .scope_info(broker()); case IrOpcode::kJSCreateBlockContext: case IrOpcode::kJSCreateCatchContext: case IrOpcode::kJSCreateWithContext: - return MakeRef(broker(), ScopeInfoOf(context->op())); + return ScopeInfoOf(broker(), context->op()); case IrOpcode::kParameter: { ScopeInfoRef scope_info = shared_info_.scope_info(); if (scope_info.HasOuterScopeInfo()) { @@ -2100,7 +2005,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() { NameRef name = MakeRefForConstantForIndexOperand<Name>(1); FeedbackSource feedback = CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); - const Operator* op = javascript()->LoadNamed(name.object(), feedback); + const Operator* op = javascript()->LoadNamed(name, feedback); JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedLoadNamed(op, feedback.slot); @@ -2126,8 +2031,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() { FeedbackSource feedback = CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); - const Operator* op = - javascript()->LoadNamedFromSuper(name.object(), feedback); + const Operator* op = javascript()->LoadNamedFromSuper(name, feedback); JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedLoadNamed(op, feedback.slot); @@ -2185,12 +2089,12 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) { DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed, broker()->GetFeedbackSlotKind(feedback)); - op = javascript()->StoreNamedOwn(name.object(), feedback); + op = javascript()->StoreNamedOwn(name, feedback); } else { DCHECK_EQ(StoreMode::kNormal, store_mode); LanguageMode language_mode = GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback)); - op = javascript()->StoreNamed(language_mode, name.object(), feedback); + op = javascript()->StoreNamed(language_mode, name, feedback); } JSTypeHintLowering::LoweringResult lowering = @@ -2288,10 +2192,10 @@ void BytecodeGraphBuilder::VisitCreateClosure() { bytecode_iterator().GetFlagOperand(2)) ? AllocationType::kOld : AllocationType::kYoung; - Handle<CodeT> compile_lazy = broker()->CanonicalPersistentHandle( - ToCodeT(*BUILTIN_CODE(jsgraph()->isolate(), CompileLazy))); - const Operator* op = javascript()->CreateClosure(shared_info.object(), - compile_lazy, allocation); + CodeTRef compile_lazy = MakeRef( + broker(), ToCodeT(*BUILTIN_CODE(jsgraph()->isolate(), CompileLazy))); + const Operator* op = + javascript()->CreateClosure(shared_info, compile_lazy, allocation); Node* closure = NewNode( op, BuildLoadFeedbackCell(bytecode_iterator().GetIndexOperand(1))); environment()->BindAccumulator(closure); @@ -2299,7 +2203,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() { void BytecodeGraphBuilder::VisitCreateBlockContext() { ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0); - const Operator* op = javascript()->CreateBlockContext(scope_info.object()); + const Operator* op = javascript()->CreateBlockContext(scope_info); Node* context = NewNode(op); environment()->BindAccumulator(context); } @@ -2307,8 +2211,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() { void BytecodeGraphBuilder::VisitCreateFunctionContext() { ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); - const Operator* op = javascript()->CreateFunctionContext( - scope_info.object(), slots, FUNCTION_SCOPE); + const Operator* op = + javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE); Node* context = NewNode(op); environment()->BindAccumulator(context); } @@ -2316,8 +2220,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() { void BytecodeGraphBuilder::VisitCreateEvalContext() { ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); - const Operator* op = javascript()->CreateFunctionContext(scope_info.object(), - slots, EVAL_SCOPE); + const Operator* op = + javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE); Node* context = NewNode(op); environment()->BindAccumulator(context); } @@ -2327,7 +2231,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() { Node* exception = environment()->LookupRegister(reg); ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1); - const Operator* op = javascript()->CreateCatchContext(scope_info.object()); + const Operator* op = javascript()->CreateCatchContext(scope_info); Node* context = NewNode(op, exception); environment()->BindAccumulator(context); } @@ -2337,7 +2241,7 @@ void BytecodeGraphBuilder::VisitCreateWithContext() { environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1); - const Operator* op = javascript()->CreateWithContext(scope_info.object()); + const Operator* op = javascript()->CreateWithContext(scope_info); Node* context = NewNode(op, object); environment()->BindAccumulator(context); } @@ -2366,8 +2270,8 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { FeedbackSource pair = CreateFeedbackSource(slot_id); int literal_flags = bytecode_iterator().GetFlagOperand(2); STATIC_ASSERT(JSCreateLiteralRegExpNode::FeedbackVectorIndex() == 0); - const Operator* op = javascript()->CreateLiteralRegExp( - constant_pattern.object(), pair, literal_flags); + const Operator* op = + javascript()->CreateLiteralRegExp(constant_pattern, pair, literal_flags); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); Node* literal = NewNode(op, feedback_vector_node()); environment()->BindAccumulator(literal, Environment::kAttachFrameState); @@ -2389,9 +2293,8 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() { int number_of_elements = array_boilerplate_description.constants_elements_length(); STATIC_ASSERT(JSCreateLiteralArrayNode::FeedbackVectorIndex() == 0); - const Operator* op = - javascript()->CreateLiteralArray(array_boilerplate_description.object(), - pair, literal_flags, number_of_elements); + const Operator* op = javascript()->CreateLiteralArray( + array_boilerplate_description, pair, literal_flags, number_of_elements); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); Node* literal = NewNode(op, feedback_vector_node()); environment()->BindAccumulator(literal, Environment::kAttachFrameState); @@ -2423,7 +2326,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() { int number_of_properties = constant_properties.size(); STATIC_ASSERT(JSCreateLiteralObjectNode::FeedbackVectorIndex() == 0); const Operator* op = javascript()->CreateLiteralObject( - constant_properties.object(), pair, literal_flags, number_of_properties); + constant_properties, pair, literal_flags, number_of_properties); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); Node* literal = NewNode(op, feedback_vector_node()); environment()->BindAccumulator(literal, Environment::kAttachFrameState); @@ -2455,8 +2358,8 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() { TemplateObjectDescriptionRef description = MakeRefForConstantForIndexOperand<TemplateObjectDescription>(0); STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0); - const Operator* op = javascript()->GetTemplateObject( - description.object(), shared_info().object(), source); + const Operator* op = + javascript()->GetTemplateObject(description, shared_info(), source); DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode())); Node* template_object = NewNode(op, feedback_vector_node()); environment()->BindAccumulator(template_object); @@ -4162,7 +4065,6 @@ JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedUnaryOp(const Operator* op, Node* operand, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4176,7 +4078,6 @@ JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4191,7 +4092,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver, Node* cache_array, Node* cache_type, Node* index, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(IrOpcode::kJSForInNext)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4204,7 +4104,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver, JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(IrOpcode::kJSForInPrepare)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4217,7 +4116,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator, JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(IrOpcode::kJSToNumber)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4229,7 +4127,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value, JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedCall( const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4244,7 +4141,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4259,7 +4155,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op, Node* receiver, FeedbackSlot load_slot, FeedbackSlot call_slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult early_reduction = @@ -4272,7 +4167,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op, JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult early_reduction = @@ -4285,7 +4179,6 @@ JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op, Node* receiver, Node* key, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4299,7 +4192,6 @@ JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op, Node* receiver, Node* value, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4314,7 +4206,6 @@ BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op, Node* receiver, Node* key, Node* value, FeedbackSlot slot) { - if (!CanApplyTypeHintLowering(op)) return NoChange(); Node* effect = environment()->GetEffectDependency(); Node* control = environment()->GetControlDependency(); JSTypeHintLowering::LoweringResult result = @@ -4561,9 +4452,6 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter, ObserveNodeInfo const& observe_node_info) { - DCHECK(broker->IsSerializedForCompilation( - shared_info, feedback_cell.value()->AsFeedbackVector())); - DCHECK(feedback_cell.value()->AsFeedbackVector().serialized()); BytecodeGraphBuilder builder( broker, local_zone, broker->target_native_context(), shared_info, feedback_cell, osr_offset, jsgraph, invocation_frequency, diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index 4e0afd8ea5..0e6872aa66 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -125,7 +125,7 @@ class SymbolWrapper; class Undetectable; class UniqueName; class WasmCapiFunctionData; -class WasmExceptionObject; +class WasmTagObject; class WasmExceptionPackage; class WasmExceptionTag; class WasmExportedFunctionData; diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index 85cd1bf303..dc2db32753 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -21,14 +21,15 @@ namespace compiler { CompilationDependencies::CompilationDependencies(JSHeapBroker* broker, Zone* zone) - : zone_(zone), broker_(broker), dependencies_(zone) {} + : zone_(zone), broker_(broker), dependencies_(zone) { + broker->set_dependencies(this); +} class InitialMapDependency final : public CompilationDependency { public: - InitialMapDependency(const JSFunctionRef& function, const MapRef& initial_map) + InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function, + const MapRef& initial_map) : function_(function), initial_map_(initial_map) { - DCHECK(function_.has_initial_map()); - DCHECK(function_.initial_map().equals(initial_map_)); } bool IsValid() const override { @@ -51,19 +52,22 @@ class InitialMapDependency final : public CompilationDependency { class PrototypePropertyDependency final : public CompilationDependency { public: - PrototypePropertyDependency(const JSFunctionRef& function, + PrototypePropertyDependency(JSHeapBroker* broker, + const JSFunctionRef& function, const ObjectRef& prototype) : function_(function), prototype_(prototype) { - DCHECK(function_.has_prototype()); - DCHECK(!function_.PrototypeRequiresRuntimeLookup()); - DCHECK(function_.prototype().equals(prototype_)); + DCHECK(function_.has_instance_prototype(broker->dependencies())); + DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies())); + DCHECK(function_.instance_prototype(broker->dependencies()) + .equals(prototype_)); } bool IsValid() const override { Handle<JSFunction> function = function_.object(); - return function->has_prototype_slot() && function->has_prototype() && + return function->has_prototype_slot() && + function->has_instance_prototype() && !function->PrototypeRequiresRuntimeLookup() && - function->prototype() == *prototype_.object(); + function->instance_prototype() == *prototype_.object(); } void PrepareInstall() const override { @@ -75,7 +79,7 @@ class PrototypePropertyDependency final : public CompilationDependency { void Install(Handle<Code> code) const override { SLOW_DCHECK(IsValid()); Handle<JSFunction> function = function_.object(); - DCHECK(function->has_initial_map()); + CHECK(function->has_initial_map()); Handle<Map> initial_map(function->initial_map(), function_.isolate()); DependentCode::InstallDependency(function_.isolate(), code, initial_map, DependentCode::kInitialMapChangedGroup); @@ -338,10 +342,29 @@ class OwnConstantDictionaryPropertyDependency final ObjectRef const value_; }; +class ConsistentJSFunctionViewDependency final : public CompilationDependency { + public: + explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function) + : function_(function) {} + + bool IsValid() const override { + return function_.IsConsistentWithHeapState(); + } + + void Install(Handle<Code> code) const override {} + +#ifdef DEBUG + bool IsConsistentJSFunctionViewDependency() const override { return true; } +#endif + + private: + const JSFunctionRef function_; +}; + class TransitionDependency final : public CompilationDependency { public: explicit TransitionDependency(const MapRef& map) : map_(map) { - DCHECK(!map_.is_deprecated()); + DCHECK(map_.CanBeDeprecated()); } bool IsValid() const override { return !map_.object()->is_deprecated(); } @@ -384,108 +407,107 @@ class PretenureModeDependency final : public CompilationDependency { class FieldRepresentationDependency final : public CompilationDependency { public: - FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor, + FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor, Representation representation) - : owner_(owner), - descriptor_(descriptor), - representation_(representation) { - } + : map_(map), descriptor_(descriptor), representation_(representation) {} bool IsValid() const override { DisallowGarbageCollection no_heap_allocation; - Handle<Map> owner = owner_.object(); - Isolate* isolate = owner_.isolate(); - - // TODO(v8:11670): Consider turn this back into a CHECK inside the - // constructor, if possible in light of concurrent heap state - // modifications. - if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false; - - return representation_.Equals(owner->instance_descriptors(isolate) + if (map_.object()->is_deprecated()) return false; + return representation_.Equals(map_.object() + ->instance_descriptors(map_.isolate()) .GetDetails(descriptor_) .representation()); } void Install(Handle<Code> code) const override { SLOW_DCHECK(IsValid()); - DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(), + Isolate* isolate = map_.isolate(); + Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_), + isolate); + CHECK(!owner->is_deprecated()); + CHECK(representation_.Equals(owner->instance_descriptors(isolate) + .GetDetails(descriptor_) + .representation())); + DependentCode::InstallDependency(isolate, code, owner, DependentCode::kFieldRepresentationGroup); } #ifdef DEBUG bool IsFieldRepresentationDependencyOnMap( Handle<Map> const& receiver_map) const override { - return owner_.object().equals(receiver_map); + return map_.object().equals(receiver_map); } #endif private: - MapRef owner_; + MapRef map_; InternalIndex descriptor_; Representation representation_; }; class FieldTypeDependency final : public CompilationDependency { public: - FieldTypeDependency(const MapRef& owner, InternalIndex descriptor, + FieldTypeDependency(const MapRef& map, InternalIndex descriptor, const ObjectRef& type) - : owner_(owner), descriptor_(descriptor), type_(type) {} + : map_(map), descriptor_(descriptor), type_(type) {} bool IsValid() const override { DisallowGarbageCollection no_heap_allocation; - Handle<Map> owner = owner_.object(); - Isolate* isolate = owner_.isolate(); - - // TODO(v8:11670): Consider turn this back into a CHECK inside the - // constructor, if possible in light of concurrent heap state - // modifications. - if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false; - - Handle<Object> type = type_.object(); - return *type == - owner->instance_descriptors(isolate).GetFieldType(descriptor_); + if (map_.object()->is_deprecated()) return false; + return *type_.object() == map_.object() + ->instance_descriptors(map_.isolate()) + .GetFieldType(descriptor_); } void Install(Handle<Code> code) const override { SLOW_DCHECK(IsValid()); - DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(), + Isolate* isolate = map_.isolate(); + Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_), + isolate); + CHECK(!owner->is_deprecated()); + CHECK_EQ(*type_.object(), + owner->instance_descriptors(isolate).GetFieldType(descriptor_)); + DependentCode::InstallDependency(isolate, code, owner, DependentCode::kFieldTypeGroup); } private: - MapRef owner_; + MapRef map_; InternalIndex descriptor_; ObjectRef type_; }; class FieldConstnessDependency final : public CompilationDependency { public: - FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor) - : owner_(owner), descriptor_(descriptor) {} + FieldConstnessDependency(const MapRef& map, InternalIndex descriptor) + : map_(map), descriptor_(descriptor) {} bool IsValid() const override { DisallowGarbageCollection no_heap_allocation; - Handle<Map> owner = owner_.object(); - Isolate* isolate = owner_.isolate(); - - // TODO(v8:11670): Consider turn this back into a CHECK inside the - // constructor, if possible in light of concurrent heap state - // modifications. - if (owner->FindFieldOwner(isolate, descriptor_) != *owner) return false; - - return PropertyConstness::kConst == owner->instance_descriptors(isolate) - .GetDetails(descriptor_) - .constness(); + if (map_.object()->is_deprecated()) return false; + return PropertyConstness::kConst == + map_.object() + ->instance_descriptors(map_.isolate()) + .GetDetails(descriptor_) + .constness(); } void Install(Handle<Code> code) const override { SLOW_DCHECK(IsValid()); - DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(), + Isolate* isolate = map_.isolate(); + Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_), + isolate); + CHECK(!owner->is_deprecated()); + CHECK_EQ(PropertyConstness::kConst, owner->instance_descriptors(isolate) + .GetDetails(descriptor_) + .constness()); + DependentCode::InstallDependency(isolate, code, owner, DependentCode::kFieldConstGroup); } private: - MapRef owner_; + MapRef map_; InternalIndex descriptor_; }; @@ -523,9 +545,7 @@ class GlobalPropertyDependency final : public CompilationDependency { class ProtectorDependency final : public CompilationDependency { public: - explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) { - DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid); - } + explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {} bool IsValid() const override { Handle<PropertyCell> cell = cell_.object(); @@ -638,23 +658,20 @@ void CompilationDependencies::RecordDependency( MapRef CompilationDependencies::DependOnInitialMap( const JSFunctionRef& function) { - DCHECK(!function.IsNeverSerializedHeapObject()); - MapRef map = function.initial_map(); - RecordDependency(zone_->New<InitialMapDependency>(function, map)); + MapRef map = function.initial_map(this); + RecordDependency(zone_->New<InitialMapDependency>(broker_, function, map)); return map; } ObjectRef CompilationDependencies::DependOnPrototypeProperty( const JSFunctionRef& function) { - DCHECK(!function.IsNeverSerializedHeapObject()); - ObjectRef prototype = function.prototype(); + ObjectRef prototype = function.instance_prototype(this); RecordDependency( - zone_->New<PrototypePropertyDependency>(function, prototype)); + zone_->New<PrototypePropertyDependency>(broker_, function, prototype)); return prototype; } void CompilationDependencies::DependOnStableMap(const MapRef& map) { - DCHECK(!map.IsNeverSerializedHeapObject()); if (map.CanTransition()) { RecordDependency(zone_->New<StableMapDependency>(map)); } @@ -677,11 +694,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode( PropertyConstness CompilationDependencies::DependOnFieldConstness( const MapRef& map, InternalIndex descriptor) { - DCHECK(!map.IsNeverSerializedHeapObject()); - MapRef owner = map.FindFieldOwner(descriptor); - DCHECK(!owner.IsNeverSerializedHeapObject()); - PropertyConstness constness = - owner.GetPropertyDetails(descriptor).constness(); + PropertyConstness constness = map.GetPropertyDetails(descriptor).constness(); if (constness == PropertyConstness::kMutable) return constness; // If the map can have fast elements transitions, then the field can be only @@ -696,7 +709,7 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness( } DCHECK_EQ(constness, PropertyConstness::kConst); - RecordDependency(zone_->New<FieldConstnessDependency>(owner, descriptor)); + RecordDependency(zone_->New<FieldConstnessDependency>(map, descriptor)); return PropertyConstness::kConst; } @@ -708,7 +721,7 @@ void CompilationDependencies::DependOnGlobalProperty( } bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) { - cell.SerializeAsProtector(); + cell.CacheAsProtector(); if (cell.value().AsSmi() != Protectors::kProtectorValid) return false; RecordDependency(zone_->New<ProtectorDependency>(cell)); return true; @@ -783,12 +796,6 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty( } bool CompilationDependencies::Commit(Handle<Code> code) { - // Dependencies are context-dependent. In the future it may be possible to - // restore them in the consumer native context, but for now they are - // disabled. - CHECK_IMPLIES(broker_->is_native_context_independent(), - dependencies_.empty()); - for (auto dep : dependencies_) { if (!dep->IsValid()) { dependencies_.clear(); @@ -812,17 +819,27 @@ bool CompilationDependencies::Commit(Handle<Code> code) { } // It is even possible that a GC during the above installations invalidated - // one of the dependencies. However, this should only affect pretenure mode - // dependencies, which we assert below. It is safe to return successfully in - // these cases, because once the code gets executed it will do a stack check - // that triggers its deoptimization. + // one of the dependencies. However, this should only affect + // + // 1. pretenure mode dependencies, or + // 2. function consistency dependencies, + // + // which we assert below. It is safe to return successfully in these cases, + // because + // + // 1. once the code gets executed it will do a stack check that triggers its + // deoptimization. + // 2. since the function state was deemed consistent above, that means the + // compilation saw a self-consistent state of the jsfunction. if (FLAG_stress_gc_during_compilation) { broker_->isolate()->heap()->PreciseCollectAllGarbage( Heap::kForcedGC, GarbageCollectionReason::kTesting, kNoGCCallbackFlags); } #ifdef DEBUG for (auto dep : dependencies_) { - CHECK_IMPLIES(!dep->IsValid(), dep->IsPretenureModeDependency()); + CHECK_IMPLIES(!dep->IsValid(), + dep->IsPretenureModeDependency() || + dep->IsConsistentJSFunctionViewDependency()); } #endif @@ -847,29 +864,22 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map, } } // namespace -template <class MapContainer> void CompilationDependencies::DependOnStablePrototypeChains( - MapContainer const& receiver_maps, WhereToStart start, + ZoneVector<MapRef> const& receiver_maps, WhereToStart start, base::Optional<JSObjectRef> last_prototype) { - for (auto map : receiver_maps) { - MapRef receiver_map = MakeRef(broker_, map); - if (start == kStartAtReceiver) DependOnStableMap(receiver_map); + for (MapRef receiver_map : receiver_maps) { if (receiver_map.IsPrimitiveMap()) { // Perform the implicit ToObject for primitives here. // Implemented according to ES6 section 7.3.2 GetV (V, P). + // Note: Keep sync'd with AccessInfoFactory::ComputePropertyAccessInfo. base::Optional<JSFunctionRef> constructor = broker_->target_native_context().GetConstructorFunction(receiver_map); - if (constructor.has_value()) receiver_map = constructor->initial_map(); + receiver_map = constructor.value().initial_map(this); } + if (start == kStartAtReceiver) DependOnStableMap(receiver_map); DependOnStablePrototypeChain(this, receiver_map, last_prototype); } } -template void CompilationDependencies::DependOnStablePrototypeChains( - ZoneVector<Handle<Map>> const& receiver_maps, WhereToStart start, - base::Optional<JSObjectRef> last_prototype); -template void CompilationDependencies::DependOnStablePrototypeChains( - ZoneHandleSet<Map> const& receiver_maps, WhereToStart start, - base::Optional<JSObjectRef> last_prototype); void CompilationDependencies::DependOnElementsKinds( const AllocationSiteRef& site) { @@ -882,6 +892,12 @@ void CompilationDependencies::DependOnElementsKinds( CHECK_EQ(current.nested_site().AsSmi(), 0); } +void CompilationDependencies::DependOnConsistentJSFunctionView( + const JSFunctionRef& function) { + DCHECK(broker_->is_concurrent_inlining()); + RecordDependency(zone_->New<ConsistentJSFunctionViewDependency>(function)); +} + SlackTrackingPrediction::SlackTrackingPrediction(MapRef initial_map, int instance_size) : instance_size_(instance_size), @@ -893,20 +909,19 @@ SlackTrackingPrediction CompilationDependencies::DependOnInitialMapInstanceSizePrediction( const JSFunctionRef& function) { MapRef initial_map = DependOnInitialMap(function); - int instance_size = function.InitialMapInstanceSizeWithMinSlack(); + int instance_size = function.InitialMapInstanceSizeWithMinSlack(this); // Currently, we always install the prediction dependency. If this turns out // to be too expensive, we can only install the dependency if slack // tracking is active. RecordDependency(zone_->New<InitialMapInstanceSizePredictionDependency>( function, instance_size)); - DCHECK_LE(instance_size, function.initial_map().instance_size()); + CHECK_LE(instance_size, function.initial_map(this).instance_size()); return SlackTrackingPrediction(initial_map, instance_size); } CompilationDependency const* CompilationDependencies::TransitionDependencyOffTheRecord( const MapRef& target_map) const { - DCHECK(!target_map.IsNeverSerializedHeapObject()); if (target_map.CanBeDeprecated()) { return zone_->New<TransitionDependency>(target_map); } else { @@ -917,26 +932,16 @@ CompilationDependencies::TransitionDependencyOffTheRecord( CompilationDependency const* CompilationDependencies::FieldRepresentationDependencyOffTheRecord( - const MapRef& map, InternalIndex descriptor) const { - DCHECK(!map.IsNeverSerializedHeapObject()); - MapRef owner = map.FindFieldOwner(descriptor); - DCHECK(!owner.IsNeverSerializedHeapObject()); - PropertyDetails details = owner.GetPropertyDetails(descriptor); - CHECK(details.representation().Equals( - map.GetPropertyDetails(descriptor).representation())); - return zone_->New<FieldRepresentationDependency>(owner, descriptor, - details.representation()); + const MapRef& map, InternalIndex descriptor, + Representation representation) const { + return zone_->New<FieldRepresentationDependency>(map, descriptor, + representation); } CompilationDependency const* CompilationDependencies::FieldTypeDependencyOffTheRecord( - const MapRef& map, InternalIndex descriptor) const { - DCHECK(!map.IsNeverSerializedHeapObject()); - MapRef owner = map.FindFieldOwner(descriptor); - DCHECK(!owner.IsNeverSerializedHeapObject()); - ObjectRef type = owner.GetFieldType(descriptor); - CHECK(type.equals(map.GetFieldType(descriptor))); - return zone_->New<FieldTypeDependency>(owner, descriptor, type); + const MapRef& map, InternalIndex descriptor, const ObjectRef& type) const { + return zone_->New<FieldTypeDependency>(map, descriptor, type); } } // namespace compiler diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h index 0e7b02cbfb..be507c6843 100644 --- a/deps/v8/src/compiler/compilation-dependencies.h +++ b/deps/v8/src/compiler/compilation-dependencies.h @@ -34,10 +34,6 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code); - // TODO(jgruber): Remove this method once GetPropertyAccessInfo no longer - // uses the two-phase approach between serialization and compilation. - void ClearForConcurrentGetPropertyAccessInfo() { dependencies_.clear(); } - // Return the initial map of {function} and record the assumption that it // stays the initial map. MapRef DependOnInitialMap(const JSFunctionRef& function); @@ -116,15 +112,16 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // For each given map, depend on the stability of (the maps of) all prototypes // up to (and including) the {last_prototype}. - template <class MapContainer> void DependOnStablePrototypeChains( - MapContainer const& receiver_maps, WhereToStart start, + ZoneVector<MapRef> const& receiver_maps, WhereToStart start, base::Optional<JSObjectRef> last_prototype = base::Optional<JSObjectRef>()); // Like DependOnElementsKind but also applies to all nested allocation sites. void DependOnElementsKinds(const AllocationSiteRef& site); + void DependOnConsistentJSFunctionView(const JSFunctionRef& function); + // Predict the final instance size for {function}'s initial map and record // the assumption that this prediction is correct. In addition, register // the initial map dependency. This method returns the {function}'s the @@ -148,12 +145,14 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // Gather the assumption that the field representation of a field does not // change. The field is identified by the arguments. CompilationDependency const* FieldRepresentationDependencyOffTheRecord( - const MapRef& map, InternalIndex descriptor) const; + const MapRef& map, InternalIndex descriptor, + Representation representation) const; // Gather the assumption that the field type of a field does not change. The // field is identified by the arguments. CompilationDependency const* FieldTypeDependencyOffTheRecord( - const MapRef& map, InternalIndex descriptor) const; + const MapRef& map, InternalIndex descriptor, + const ObjectRef& /* Contains a FieldType underneath. */ type) const; private: Zone* const zone_; diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h index 1cacb4d6df..852c7b7640 100644 --- a/deps/v8/src/compiler/compilation-dependency.h +++ b/deps/v8/src/compiler/compilation-dependency.h @@ -26,6 +26,7 @@ class CompilationDependency : public ZoneObject { Handle<Map> const& receiver_map) const { return false; } + virtual bool IsConsistentJSFunctionViewDependency() const { return false; } #endif }; diff --git a/deps/v8/src/compiler/compiler-source-position-table.h b/deps/v8/src/compiler/compiler-source-position-table.h index 699402c8ef..f66d132df1 100644 --- a/deps/v8/src/compiler/compiler-source-position-table.h +++ b/deps/v8/src/compiler/compiler-source-position-table.h @@ -62,10 +62,14 @@ class V8_EXPORT_PRIVATE SourcePositionTable final private: class Decorator; + static SourcePosition UnknownSourcePosition(Zone* zone) { + return SourcePosition::Unknown(); + } + Graph* const graph_; Decorator* decorator_; SourcePosition current_position_; - NodeAuxData<SourcePosition, SourcePosition::Unknown> table_; + NodeAuxData<SourcePosition, UnknownSourcePosition> table_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc index dadbeb0f7b..b5df8b542b 100644 --- a/deps/v8/src/compiler/csa-load-elimination.cc +++ b/deps/v8/src/compiler/csa-load-elimination.cc @@ -74,99 +74,261 @@ bool Subsumes(MachineRepresentation from, MachineRepresentation to) { return false; } -bool ObjectMayAlias(Node* a, Node* b) { - if (a != b) { - if (NodeProperties::IsFreshObject(b)) std::swap(a, b); - if (NodeProperties::IsFreshObject(a) && - (NodeProperties::IsFreshObject(b) || - b->opcode() == IrOpcode::kParameter || - b->opcode() == IrOpcode::kLoadImmutable || - IrOpcode::IsConstantOpcode(b->opcode()))) { - return false; - } - } - return true; +bool IsConstantObject(Node* object) { + return object->opcode() == IrOpcode::kParameter || + object->opcode() == IrOpcode::kLoadImmutable || + NodeProperties::IsConstant(object); } -bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2, - MachineRepresentation repr2) { - IntPtrMatcher matcher1(offset1); - IntPtrMatcher matcher2(offset2); - // If either of the offsets is variable, accesses may alias - if (!matcher1.HasResolvedValue() || !matcher2.HasResolvedValue()) { - return true; - } - // Otherwise, we return whether accesses overlap - intptr_t start1 = matcher1.ResolvedValue(); - intptr_t end1 = start1 + ElementSizeInBytes(repr1); - intptr_t start2 = matcher2.ResolvedValue(); - intptr_t end2 = start2 + ElementSizeInBytes(repr2); - return !(end1 <= start2 || end2 <= start1); +bool IsFreshObject(Node* object) { + DCHECK_IMPLIES(NodeProperties::IsFreshObject(object), + !IsConstantObject(object)); + return NodeProperties::IsFreshObject(object); } } // namespace CsaLoadEliminationHelpers namespace Helpers = CsaLoadEliminationHelpers; -void CsaLoadElimination::AbstractState::Merge(AbstractState const* that, - Zone* zone) { +// static +template <typename OuterKey> +void CsaLoadElimination::AbstractState::IntersectWith( + OuterMap<OuterKey>& to, const OuterMap<OuterKey>& from) { FieldInfo empty_info; - for (std::pair<Field, FieldInfo> entry : field_infos_) { - if (that->field_infos_.Get(entry.first) != entry.second) { - field_infos_.Set(entry.first, empty_info); + for (const std::pair<OuterKey, InnerMap>& to_map : to) { + InnerMap to_map_copy(to_map.second); + OuterKey key = to_map.first; + InnerMap current_map = from.Get(key); + for (std::pair<Node*, FieldInfo> info : to_map.second) { + if (current_map.Get(info.first) != info.second) { + to_map_copy.Set(info.first, empty_info); + } } + to.Set(key, to_map_copy); } } +void CsaLoadElimination::AbstractState::IntersectWith( + AbstractState const* that) { + IntersectWith(fresh_entries_, that->fresh_entries_); + IntersectWith(constant_entries_, that->constant_entries_); + IntersectWith(arbitrary_entries_, that->arbitrary_entries_); + IntersectWith(fresh_unknown_entries_, that->fresh_unknown_entries_); + IntersectWith(constant_unknown_entries_, that->constant_unknown_entries_); + IntersectWith(arbitrary_unknown_entries_, that->arbitrary_unknown_entries_); +} + CsaLoadElimination::AbstractState const* -CsaLoadElimination::AbstractState::KillField(Node* kill_object, - Node* kill_offset, - MachineRepresentation kill_repr, - Zone* zone) const { - FieldInfo empty_info; - AbstractState* that = zone->New<AbstractState>(*this); - for (std::pair<Field, FieldInfo> entry : that->field_infos_) { - Field field = entry.first; - MachineRepresentation field_repr = entry.second.representation; - if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second, - field_repr) && - Helpers::ObjectMayAlias(kill_object, field.first)) { - that->field_infos_.Set(field, empty_info); +CsaLoadElimination::AbstractState::KillField(Node* object, Node* offset, + MachineRepresentation repr) const { + AbstractState* result = zone_->New<AbstractState>(*this); + UnknownOffsetInfos empty_unknown(zone_, InnerMap(zone_)); + IntPtrMatcher m(offset); + if (m.HasResolvedValue()) { + uint32_t num_offset = static_cast<uint32_t>(m.ResolvedValue()); + if (Helpers::IsFreshObject(object)) { + // May alias with: + // - The same object/offset + // - Arbitrary objects with the same offset + // - The same object, unkwown offset + // - Arbitrary objects with unkwown offset + result->KillOffsetInFresh(object, num_offset, repr); + KillOffset(result->arbitrary_entries_, num_offset, repr, zone_); + result->fresh_unknown_entries_.Set(object, InnerMap(zone_)); + result->arbitrary_unknown_entries_ = empty_unknown; + } else if (Helpers::IsConstantObject(object)) { + // May alias with: + // - Constant/arbitrary objects with the same offset + // - Constant/arbitrary objects with unkwown offset + KillOffset(result->constant_entries_, num_offset, repr, zone_); + KillOffset(result->arbitrary_entries_, num_offset, repr, zone_); + result->constant_unknown_entries_ = empty_unknown; + result->arbitrary_unknown_entries_ = empty_unknown; + } else { + // May alias with: + // - Any object with the same or unknown offset + KillOffset(result->fresh_entries_, num_offset, repr, zone_); + KillOffset(result->constant_entries_, num_offset, repr, zone_); + KillOffset(result->arbitrary_entries_, num_offset, repr, zone_); + result->fresh_unknown_entries_ = empty_unknown; + result->constant_unknown_entries_ = empty_unknown; + result->arbitrary_unknown_entries_ = empty_unknown; + } + } else { + ConstantOffsetInfos empty_constant(zone_, InnerMap(zone_)); + if (Helpers::IsFreshObject(object)) { + // May alias with: + // - The same object with any known/unknown offset + // - Arbitrary objects with any known/unknown offset + for (auto map : result->fresh_entries_) { + // TODO(manoskouk): Consider adding a map from fresh objects to offsets + // to implement this efficiently. + InnerMap map_copy(map.second); + map_copy.Set(object, FieldInfo()); + result->fresh_entries_.Set(map.first, map_copy); + } + result->fresh_unknown_entries_.Set(object, InnerMap(zone_)); + result->arbitrary_entries_ = empty_constant; + result->arbitrary_unknown_entries_ = empty_unknown; + } else if (Helpers::IsConstantObject(object)) { + // May alias with: + // - Constant/arbitrary objects with the any known/unknown offset + result->constant_entries_ = empty_constant; + result->constant_unknown_entries_ = empty_unknown; + result->arbitrary_entries_ = empty_constant; + result->arbitrary_unknown_entries_ = empty_unknown; + } else { + // May alias with anything. Clear the state. + return zone_->New<AbstractState>(zone_); } } - return that; + + return result; } CsaLoadElimination::AbstractState const* CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset, - CsaLoadElimination::FieldInfo info, - Zone* zone) const { - AbstractState* that = zone->New<AbstractState>(*this); - that->field_infos_.Set({object, offset}, info); - return that; + Node* value, + MachineRepresentation repr) const { + AbstractState* new_state = zone_->New<AbstractState>(*this); + IntPtrMatcher m(offset); + if (m.HasResolvedValue()) { + uint32_t offset_num = static_cast<uint32_t>(m.ResolvedValue()); + ConstantOffsetInfos& infos = Helpers::IsFreshObject(object) + ? new_state->fresh_entries_ + : Helpers::IsConstantObject(object) + ? new_state->constant_entries_ + : new_state->arbitrary_entries_; + Update(infos, offset_num, object, FieldInfo(value, repr)); + } else { + UnknownOffsetInfos& infos = + Helpers::IsFreshObject(object) + ? new_state->fresh_unknown_entries_ + : Helpers::IsConstantObject(object) + ? new_state->constant_unknown_entries_ + : new_state->arbitrary_unknown_entries_; + Update(infos, object, offset, FieldInfo(value, repr)); + } + return new_state; } CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup( Node* object, Node* offset) const { - if (object->IsDead()) { - return {}; + IntPtrMatcher m(offset); + if (m.HasResolvedValue()) { + uint32_t num_offset = static_cast<uint32_t>(m.ResolvedValue()); + const ConstantOffsetInfos& infos = Helpers::IsFreshObject(object) + ? fresh_entries_ + : Helpers::IsConstantObject(object) + ? constant_entries_ + : arbitrary_entries_; + return infos.Get(num_offset).Get(object); + } else { + const UnknownOffsetInfos& infos = Helpers::IsFreshObject(object) + ? fresh_unknown_entries_ + : Helpers::IsConstantObject(object) + ? constant_unknown_entries_ + : arbitrary_unknown_entries_; + return infos.Get(object).Get(offset); } - return field_infos_.Get({object, offset}); } -void CsaLoadElimination::AbstractState::Print() const { - for (std::pair<Field, FieldInfo> entry : field_infos_) { - Field field = entry.first; - Node* object = field.first; - Node* offset = field.second; - FieldInfo info = entry.second; - PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(), - object->op()->mnemonic(), info.value->id(), - info.value->op()->mnemonic(), - MachineReprToString(info.representation)); +// static +// Kill all elements in {infos} that overlap with an element with {offset} and +// size {ElementSizeInBytes(repr)}. +void CsaLoadElimination::AbstractState::KillOffset(ConstantOffsetInfos& infos, + uint32_t offset, + MachineRepresentation repr, + Zone* zone) { + // All elements in the range [{offset}, {offset + ElementSizeInBytes(repr)}) + // are in the killed range. We do not need to traverse the inner maps, we can + // just clear them. + for (int i = 0; i < ElementSizeInBytes(repr); i++) { + infos.Set(offset + i, InnerMap(zone)); + } + + // Now we have to remove all elements in earlier offsets that overlap with an + // element in {offset}. + // The earliest offset that may overlap with {offset} is + // {kMaximumReprSizeInBytes - 1} before. + uint32_t initial_offset = offset >= kMaximumReprSizeInBytes - 1 + ? offset - (kMaximumReprSizeInBytes - 1) + : 0; + // For all offsets from {initial_offset} to {offset}, we traverse the + // respective inner map, and reset all elements that are large enough to + // overlap with {offset}. + for (uint32_t i = initial_offset; i < offset; i++) { + InnerMap map_copy(infos.Get(i)); + for (const std::pair<Node*, FieldInfo> info : infos.Get(i)) { + if (info.second.representation != MachineRepresentation::kNone && + ElementSizeInBytes(info.second.representation) > + static_cast<int>(offset - i)) { + map_copy.Set(info.first, {}); + } + } + infos.Set(i, map_copy); + } +} + +void CsaLoadElimination::AbstractState::KillOffsetInFresh( + Node* const object, uint32_t offset, MachineRepresentation repr) { + for (int i = 0; i < ElementSizeInBytes(repr); i++) { + Update(fresh_entries_, offset + i, object, {}); + } + uint32_t initial_offset = offset >= kMaximumReprSizeInBytes - 1 + ? offset - (kMaximumReprSizeInBytes - 1) + : 0; + for (uint32_t i = initial_offset; i < offset; i++) { + const FieldInfo& info = fresh_entries_.Get(i).Get(object); + if (info.representation != MachineRepresentation::kNone && + ElementSizeInBytes(info.representation) > + static_cast<int>(offset - i)) { + Update(fresh_entries_, i, object, {}); + } + } +} + +// static +void CsaLoadElimination::AbstractState::Print( + const CsaLoadElimination::AbstractState::ConstantOffsetInfos& infos) { + for (const auto outer_entry : infos) { + for (const auto inner_entry : outer_entry.second) { + Node* object = inner_entry.first; + uint32_t offset = outer_entry.first; + FieldInfo info = inner_entry.second; + PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset, + object->op()->mnemonic(), info.value->id(), + info.value->op()->mnemonic(), + MachineReprToString(info.representation)); + } + } +} + +// static +void CsaLoadElimination::AbstractState::Print( + const CsaLoadElimination::AbstractState::UnknownOffsetInfos& infos) { + for (const auto outer_entry : infos) { + for (const auto inner_entry : outer_entry.second) { + Node* object = outer_entry.first; + Node* offset = inner_entry.first; + FieldInfo info = inner_entry.second; + PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(), + object->op()->mnemonic(), info.value->id(), + info.value->op()->mnemonic(), + MachineReprToString(info.representation)); + } } } +void CsaLoadElimination::AbstractState::Print() const { + Print(fresh_entries_); + Print(constant_entries_); + Print(arbitrary_entries_); + Print(fresh_unknown_entries_); + Print(constant_unknown_entries_); + Print(arbitrary_unknown_entries_); +} + Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node, ObjectAccess const& access) { Node* object = NodeProperties::GetValueInput(node, 0); @@ -189,8 +351,7 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node, return Replace(replacement); } } - FieldInfo info(node, representation); - state = state->AddField(object, offset, info, zone()); + state = state->AddField(object, offset, node, representation); return UpdateState(node, state); } @@ -204,9 +365,9 @@ Reduction CsaLoadElimination::ReduceStoreToObject(Node* node, AbstractState const* state = node_states_.Get(effect); if (state == nullptr) return NoChange(); - FieldInfo info(value, access.machine_type.representation()); - state = state->KillField(object, offset, info.representation, zone()); - state = state->AddField(object, offset, info, zone()); + MachineRepresentation repr = access.machine_type.representation(); + state = state->KillField(object, offset, repr); + state = state->AddField(object, offset, value, repr); return UpdateState(node, state); } @@ -232,12 +393,14 @@ Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) { if (node_states_.Get(effect) == nullptr) return NoChange(); } - // Make a copy of the first input's state and merge with the state + // Make a copy of the first input's state and intersect it with the state // from other inputs. + // TODO(manoskouk): Consider computing phis for at least a subset of the + // state. AbstractState* state = zone()->New<AbstractState>(*state0); for (int i = 1; i < input_count; ++i) { Node* const input = NodeProperties::GetEffectInput(node, i); - state->Merge(node_states_.Get(input), zone()); + state->IntersectWith(node_states_.Get(input)); } return UpdateState(node, state); } @@ -298,11 +461,10 @@ Reduction CsaLoadElimination::PropagateInputState(Node* node) { CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState( Node* node, AbstractState const* state) const { DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi); - Node* const control = NodeProperties::GetControlInput(node); ZoneQueue<Node*> queue(zone()); ZoneSet<Node*> visited(zone()); visited.insert(node); - for (int i = 1; i < control->InputCount(); ++i) { + for (int i = 1; i < node->InputCount() - 1; ++i) { queue.push(node->InputAt(i)); } while (!queue.empty()) { diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h index bd314cad8e..82ca580329 100644 --- a/deps/v8/src/compiler/csa-load-elimination.h +++ b/deps/v8/src/compiler/csa-load-elimination.h @@ -61,28 +61,74 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final MachineRepresentation representation = MachineRepresentation::kNone; }; + // Design doc: https://bit.ly/36MfD6Y class AbstractState final : public ZoneObject { public: - explicit AbstractState(Zone* zone) : field_infos_(zone) {} + explicit AbstractState(Zone* zone) + : zone_(zone), + fresh_entries_(zone, InnerMap(zone)), + constant_entries_(zone, InnerMap(zone)), + arbitrary_entries_(zone, InnerMap(zone)), + fresh_unknown_entries_(zone, InnerMap(zone)), + constant_unknown_entries_(zone, InnerMap(zone)), + arbitrary_unknown_entries_(zone, InnerMap(zone)) {} bool Equals(AbstractState const* that) const { - return field_infos_ == that->field_infos_; + return fresh_entries_ == that->fresh_entries_ && + constant_entries_ == that->constant_entries_ && + arbitrary_entries_ == that->arbitrary_entries_ && + fresh_unknown_entries_ == that->fresh_unknown_entries_ && + constant_unknown_entries_ == that->constant_unknown_entries_ && + arbitrary_unknown_entries_ == that->arbitrary_unknown_entries_; } - void Merge(AbstractState const* that, Zone* zone); + void IntersectWith(AbstractState const* that); AbstractState const* KillField(Node* object, Node* offset, - MachineRepresentation repr, - Zone* zone) const; - AbstractState const* AddField(Node* object, Node* offset, FieldInfo info, - Zone* zone) const; + MachineRepresentation repr) const; + AbstractState const* AddField(Node* object, Node* offset, Node* value, + MachineRepresentation repr) const; FieldInfo Lookup(Node* object, Node* offset) const; void Print() const; private: - using Field = std::pair<Node*, Node*>; - using FieldInfos = PersistentMap<Field, FieldInfo>; - FieldInfos field_infos_; + Zone* zone_; + using InnerMap = PersistentMap<Node*, FieldInfo>; + template <typename OuterKey> + using OuterMap = PersistentMap<OuterKey, InnerMap>; + + // offset -> object -> info + using ConstantOffsetInfos = OuterMap<uint32_t>; + ConstantOffsetInfos fresh_entries_; + ConstantOffsetInfos constant_entries_; + ConstantOffsetInfos arbitrary_entries_; + + // object -> offset -> info + using UnknownOffsetInfos = OuterMap<Node*>; + UnknownOffsetInfos fresh_unknown_entries_; + UnknownOffsetInfos constant_unknown_entries_; + UnknownOffsetInfos arbitrary_unknown_entries_; + + // Update {map} so that {map.Get(outer_key).Get(inner_key)} returns {info}. + template <typename OuterKey> + static void Update(OuterMap<OuterKey>& map, OuterKey outer_key, + Node* inner_key, FieldInfo info) { + InnerMap map_copy(map.Get(outer_key)); + map_copy.Set(inner_key, info); + map.Set(outer_key, map_copy); + } + + // Kill all elements in {infos} which may alias with offset. + static void KillOffset(ConstantOffsetInfos& infos, uint32_t offset, + MachineRepresentation repr, Zone* zone); + void KillOffsetInFresh(Node* object, uint32_t offset, + MachineRepresentation repr); + + template <typename OuterKey> + static void IntersectWith(OuterMap<OuterKey>& to, + const OuterMap<OuterKey>& from); + static void Print(const ConstantOffsetInfos& infos); + static void Print(const UnknownOffsetInfos& infos); }; Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access); diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index fe2774f55e..d7a0ca62dd 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -197,6 +197,9 @@ class EffectControlLinearizer { void LowerTransitionElementsKind(Node* node); Node* LowerLoadFieldByIndex(Node* node); Node* LowerLoadMessage(Node* node); + Node* AdaptFastCallTypedArrayArgument(Node* node, + ElementsKind expected_elements_kind, + GraphAssemblerLabel<0>* bailout); Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error); @@ -5004,16 +5007,102 @@ MachineType MachineTypeFor(CTypeInfo::Type type) { } } // namespace +Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument( + Node* node, ElementsKind expected_elements_kind, + GraphAssemblerLabel<0>* bailout) { + Node* value_map = __ LoadField(AccessBuilder::ForMap(), node); + Node* value_instance_type = + __ LoadField(AccessBuilder::ForMapInstanceType(), value_map); + Node* value_is_typed_array = __ Word32Equal( + value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)); + __ GotoIfNot(value_is_typed_array, bailout); + + Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map); + Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask); + Node* andit = __ Word32And(bit_field2, mask); + Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift); + Node* kind = __ Word32Shr(andit, shift); + + Node* value_is_expected_elements_kind = + __ Word32Equal(kind, __ Int32Constant(expected_elements_kind)); + __ GotoIfNot(value_is_expected_elements_kind, bailout); + + Node* buffer = + __ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node); + Node* buffer_bit_field = + __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer); + + // Go to the slow path if the {buffer} was detached. + Node* buffer_is_not_detached = __ Word32Equal( + __ Word32And(buffer_bit_field, + __ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)), + __ ZeroConstant()); + __ GotoIfNot(buffer_is_not_detached, bailout); + + // Go to the slow path if the {buffer} is shared. + Node* buffer_is_not_shared = __ Word32Equal( + __ Word32And(buffer_bit_field, + __ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)), + __ ZeroConstant()); + __ GotoIfNot(buffer_is_not_shared, bailout); + + // Unpack the store and length, and store them to a struct + // FastApiTypedArray. + Node* external_pointer = + __ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node); + + // Load the base pointer for the buffer. This will always be Smi + // zero unless we allow on-heap TypedArrays, which is only the case + // for Chrome. Node and Electron both set this limit to 0. Setting + // the base to Smi zero here allows the BuildTypedArrayDataPointer + // to optimize away the tricky part of the access later. + Node* base_pointer = + __ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node); + if (JSTypedArray::kMaxSizeInHeap == 0) { + base_pointer = jsgraph()->ZeroConstant(); + } + Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer); + Node* length_in_bytes = + __ LoadField(AccessBuilder::ForJSTypedArrayLength(), node); + + // We hard-code int32_t here, because all specializations of + // FastApiTypedArray have the same size. + constexpr int kAlign = alignof(FastApiTypedArray<int32_t>); + constexpr int kSize = sizeof(FastApiTypedArray<int32_t>); + static_assert(kAlign == alignof(FastApiTypedArray<double>), + "Alignment mismatch between different specializations of " + "FastApiTypedArray"); + static_assert(kSize == sizeof(FastApiTypedArray<double>), + "Size mismatch between different specializations of " + "FastApiTypedArray"); + static_assert( + kSize == sizeof(uintptr_t) + sizeof(size_t), + "The size of " + "FastApiTypedArray isn't equal to the sum of its expected members."); + Node* stack_slot = __ StackSlot(kSize, kAlign); + + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + stack_slot, 0, length_in_bytes); + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + stack_slot, sizeof(size_t), data_ptr); + static_assert(sizeof(uintptr_t) == sizeof(size_t), + "The buffer length can't " + "fit the PointerRepresentation used to store it."); + + return stack_slot; +} + Node* EffectControlLinearizer::AdaptFastCallArgument( Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) { + int kAlign = alignof(uintptr_t); + int kSize = sizeof(uintptr_t); switch (arg_type.GetSequenceType()) { case CTypeInfo::SequenceType::kScalar: { switch (arg_type.GetType()) { case CTypeInfo::Type::kV8Value: { - int kAlign = alignof(uintptr_t); - int kSize = sizeof(uintptr_t); Node* stack_slot = __ StackSlot(kSize, kAlign); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), kNoWriteBarrier), stack_slot, 0, node); @@ -5035,10 +5124,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( Node* value_is_smi = ObjectIsSmi(node); __ GotoIf(value_is_smi, if_error); - int kAlign = alignof(uintptr_t); - int kSize = sizeof(uintptr_t); Node* stack_slot = __ StackSlot(kSize, kAlign); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), kNoWriteBarrier), stack_slot, 0, node); @@ -5053,9 +5139,15 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( return stack_slot; } - case CTypeInfo::SequenceType::kIsTypedArray: - // TODO(mslekova): Implement typed arrays. - return node; + case CTypeInfo::SequenceType::kIsTypedArray: { + // Check that the value is a HeapObject. + Node* value_is_smi = ObjectIsSmi(node); + __ GotoIf(value_is_smi, if_error); + + return AdaptFastCallTypedArrayArgument( + node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()), + if_error); + } default: { UNREACHABLE(); } @@ -5069,14 +5161,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument( GraphAssemblerLabel<0>* if_error) { static constexpr int kReceiver = 1; - auto merge = __ MakeLabel(MachineRepresentation::kTagged); - - int kAlign = alignof(uintptr_t); - int kSize = sizeof(uintptr_t); - Node* stack_slot = __ StackSlot(kSize, kAlign); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, node); + auto merge = __ MakeLabel(MachineRepresentation::kTagged, + MachineRepresentation::kTagged); for (size_t func_index = 0; func_index < c_functions.size(); func_index++) { const CFunctionInfo* c_signature = c_functions[func_index].signature; @@ -5101,34 +5187,31 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument( value_instance_type, __ Int32Constant(JS_ARRAY_TYPE)); __ GotoIfNot(value_is_js_array, &next); + int kAlign = alignof(uintptr_t); + int kSize = sizeof(uintptr_t); + Node* stack_slot = __ StackSlot(kSize, kAlign); + + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + stack_slot, 0, node); + Node* target_address = __ ExternalConstant( ExternalReference::Create(c_functions[func_index].address)); - __ Goto(&merge, target_address); + __ Goto(&merge, target_address, stack_slot); break; } case CTypeInfo::SequenceType::kIsTypedArray: { // Check that the value is a TypedArray with a type that matches the // type declared in the c-function. - ElementsKind typed_array_elements_kind = + Node* stack_slot = AdaptFastCallTypedArrayArgument( + node, fast_api_call::GetTypedArrayElementsKind( - overloads_resolution_result.element_type); - - Node* value_map = __ LoadField(AccessBuilder::ForMap(), node); - Node* value_bit_field2 = - __ LoadField(AccessBuilder::ForMapBitField2(), value_map); - Node* value_elements_kind = __ WordShr( - __ WordAnd(value_bit_field2, - __ Int32Constant(Map::Bits2::ElementsKindBits::kMask)), - __ Int32Constant(Map::Bits2::ElementsKindBits::kShift)); - Node* is_same_kind = __ Word32Equal( - value_elements_kind, - __ Int32Constant(GetPackedElementsKind(typed_array_elements_kind))); - __ GotoIfNot(is_same_kind, &next); - + overloads_resolution_result.element_type), + &next); Node* target_address = __ ExternalConstant( ExternalReference::Create(c_functions[func_index].address)); - __ Goto(&merge, target_address); + __ Goto(&merge, target_address, stack_slot); break; } @@ -5142,7 +5225,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument( __ Goto(if_error); __ Bind(&merge); - return {merge.PhiAt(0), stack_slot}; + return {merge.PhiAt(0), merge.PhiAt(1)}; } Node* EffectControlLinearizer::WrapFastCall( diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc index 608fce8606..564da611d5 100644 --- a/deps/v8/src/compiler/fast-api-calls.cc +++ b/deps/v8/src/compiler/fast-api-calls.cc @@ -28,7 +28,6 @@ ElementsKind GetTypedArrayElementsKind(CTypeInfo::Type type) { case CTypeInfo::Type::kV8Value: case CTypeInfo::Type::kApiObject: UNREACHABLE(); - break; } } diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index e5b4fb35b7..26ae88362d 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -574,6 +574,15 @@ TNode<Map> GraphAssembler::LoadMap(Node* object) { #endif } +void GraphAssembler::StoreMap(Node* object, TNode<Map> map) { +#ifdef V8_MAP_PACKING + map = PackMapWord(map); +#endif + StoreRepresentation rep(MachineType::TaggedRepresentation(), + kMapWriteBarrier); + Store(rep, object, HeapObject::kMapOffset - kHeapObjectTag, map); +} + Node* JSGraphAssembler::StoreElement(ElementAccess const& access, Node* object, Node* index, Node* value) { return AddNode(graph()->NewNode(simplified()->StoreElement(access), object, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 335ec0a314..5efe6dd9c3 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -276,6 +276,7 @@ class V8_EXPORT_PRIVATE GraphAssembler { TNode<Map> UnpackMapWord(Node* map_word); #endif TNode<Map> LoadMap(Node* object); + void StoreMap(Node* object, TNode<Map> map); Node* DebugBreak(); diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc index a518d87ba8..1688a14a04 100644 --- a/deps/v8/src/compiler/heap-refs.cc +++ b/deps/v8/src/compiler/heap-refs.cc @@ -32,10 +32,6 @@ namespace compiler { #define TRACE(broker, x) TRACE_BROKER(broker, x) #define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x) -#define FORWARD_DECL(Name, ...) class Name##Data; -HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) -#undef FORWARD_DECL - // There are several kinds of ObjectData values. // // kSmi: The underlying V8 object is a Smi and the data is an instance of the @@ -43,12 +39,10 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) // object is a Smi, it's safe to access the handle in order to extract the // number value, and AsSmi() does exactly that. // -// kSerializedHeapObject: The underlying V8 object is a HeapObject and the -// data is an instance of the corresponding (most-specific) subclass, e.g. -// JSFunctionData, which provides serialized information about the object. -// -// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but -// allows serialization from the background thread. +// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject +// and the data is an instance of the corresponding (most-specific) subclass, +// e.g. JSFunctionData, which provides serialized information about the +// object. Allows serialization from the background thread. // // kUnserializedHeapObject: The underlying V8 object is a HeapObject and the // data is an instance of the base class (ObjectData), i.e. it basically @@ -66,7 +60,6 @@ HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) // these objects need not be serialized. enum ObjectDataKind { kSmi, - kSerializedHeapObject, kBackgroundSerializedHeapObject, kUnserializedHeapObject, kNeverSerializedHeapObject, @@ -87,6 +80,10 @@ bool IsReadOnlyHeapObjectForCompiler(HeapObject object) { } // namespace +NotConcurrentInliningTag::NotConcurrentInliningTag(JSHeapBroker* broker) { + CHECK(!broker->is_concurrent_inlining()); +} + class ObjectData : public ZoneObject { public: ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object, @@ -125,12 +122,12 @@ class ObjectData : public ZoneObject { HeapObject::cast(*object))); } -#define DECLARE_IS(Name, ...) bool Is##Name() const; +#define DECLARE_IS(Name) bool Is##Name() const; HEAP_BROKER_OBJECT_LIST(DECLARE_IS) #undef DECLARE_IS -#define DECLARE_AS(Name, ...) Name##Data* As##Name(); - HEAP_BROKER_OBJECT_LIST(DECLARE_AS) +#define DECLARE_AS(Name) Name##Data* As##Name(); + HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS) #undef DECLARE_AS Handle<Object> object() const { return object_; } @@ -144,9 +141,6 @@ class ObjectData : public ZoneObject { bool IsNull() const { return object_->IsNull(); } #ifdef DEBUG - enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed}; - mutable Usage used_status = Usage::kUnused; - JSHeapBroker* broker() const { return broker_; } #endif // DEBUG @@ -158,36 +152,10 @@ class ObjectData : public ZoneObject { #endif // DEBUG }; -namespace { - -template <class T> -constexpr bool IsSerializedRef() { - return ref_traits<T>::ref_serialization_kind == - RefSerializationKind::kSerialized; -} - -RefSerializationKind RefSerializationKindOf(ObjectData* const data) { - Object o = *data->object(); - if (o.IsSmi()) { - return RefSerializationKind::kNeverSerialized; -#define DEFINE_REF_SERIALIZATION_KIND(Name, Kind) \ - } \ - /* NOLINTNEXTLINE(readability/braces) */ \ - else if (o.Is##Name()) { \ - return ref_traits<Name>::ref_serialization_kind; - HEAP_BROKER_OBJECT_LIST(DEFINE_REF_SERIALIZATION_KIND) -#undef DEFINE_REF_SERIALIZATION_KIND - } - UNREACHABLE(); -} - -} // namespace - class HeapObjectData : public ObjectData { public: HeapObjectData(JSHeapBroker* broker, ObjectData** storage, - Handle<HeapObject> object, - ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject); + Handle<HeapObject> object, ObjectDataKind kind); base::Optional<bool> TryGetBooleanValue(JSHeapBroker* broker) const; ObjectData* map() const { return map_; } @@ -202,10 +170,9 @@ class HeapObjectData : public ObjectData { class PropertyCellData : public HeapObjectData { public: PropertyCellData(JSHeapBroker* broker, ObjectData** storage, - Handle<PropertyCell> object, - ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject); + Handle<PropertyCell> object, ObjectDataKind kind); - bool Serialize(JSHeapBroker* broker); + bool Cache(JSHeapBroker* broker); PropertyDetails property_details() const { CHECK(serialized()); @@ -224,34 +191,6 @@ class PropertyCellData : public HeapObjectData { bool serialized() const { return value_ != nullptr; } }; -// TODO(mslekova): Once we have real-world usage data, we might want to -// reimplement this as sorted vector instead, to reduce the memory overhead. -typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap; - -class FunctionTemplateInfoData : public HeapObjectData { - public: - FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage, - Handle<FunctionTemplateInfo> object) - : HeapObjectData(broker, storage, object) { - // FunctionTemplateInfoData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class CallHandlerInfoData : public HeapObjectData { - public: - CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage, - Handle<CallHandlerInfo> object) - : HeapObjectData(broker, storage, object) { - // CallHandlerInfoData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - namespace { ZoneVector<Address> GetCFunctions(FixedArray function_overloads, Zone* zone) { @@ -285,7 +224,7 @@ PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage, ObjectDataKind kind) : HeapObjectData(broker, storage, object, kind) {} -bool PropertyCellData::Serialize(JSHeapBroker* broker) { +bool PropertyCellData::Cache(JSHeapBroker* broker) { if (serialized()) return true; TraceScope tracer(broker, this, "PropertyCellData::Serialize"); @@ -352,22 +291,22 @@ class JSReceiverData : public HeapObjectData { class JSObjectData : public JSReceiverData { public: JSObjectData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSObject> object, - ObjectDataKind kind = kSerializedHeapObject); + Handle<JSObject> object, ObjectDataKind kind); // Recursive serialization of all reachable JSObjects. bool SerializeAsBoilerplateRecursive(JSHeapBroker* broker, + NotConcurrentInliningTag, int max_depth = kMaxFastLiteralDepth); ObjectData* GetInobjectField(int property_index) const; // Shallow serialization of {elements}. - void SerializeElements(JSHeapBroker* broker); + void SerializeElements(JSHeapBroker* broker, NotConcurrentInliningTag); bool serialized_elements() const { return serialized_elements_; } ObjectData* elements() const; ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; } - void SerializeObjectCreateMap(JSHeapBroker* broker); + void SerializeObjectCreateMap(JSHeapBroker* broker, NotConcurrentInliningTag); // Can be nullptr. ObjectData* object_create_map(JSHeapBroker* broker) const { @@ -427,7 +366,8 @@ class JSObjectData : public JSReceiverData { ZoneUnorderedMap<int, ObjectData*> own_properties_; }; -void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) { +void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker, + NotConcurrentInliningTag) { if (serialized_object_create_map_) return; serialized_object_create_map_ = true; @@ -540,14 +480,18 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap( // object, we are guaranteed to see valid heap words even if the data is wrong. base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap( JSHeapBroker* broker, Handle<JSObject> receiver, InternalIndex dict_index) { - DisallowGarbageCollection no_gc; - // DictionaryPropertyAt will check that we are within the bounds of the - // object. - base::Optional<Object> maybe_constant = JSObject::DictionaryPropertyAt( - receiver, dict_index, broker->isolate()->heap()); - DCHECK_IMPLIES(broker->IsMainThread(), maybe_constant); - if (!maybe_constant) return {}; - return TryMakeRef(broker, maybe_constant.value()); + Handle<Object> constant; + { + DisallowGarbageCollection no_gc; + // DictionaryPropertyAt will check that we are within the bounds of the + // object. + base::Optional<Object> maybe_constant = JSObject::DictionaryPropertyAt( + receiver, dict_index, broker->isolate()->heap()); + DCHECK_IMPLIES(broker->IsMainThread(), maybe_constant); + if (!maybe_constant) return {}; + constant = broker->CanonicalPersistentHandle(maybe_constant.value()); + } + return TryMakeRef(broker, constant); } } // namespace @@ -622,7 +566,7 @@ class JSTypedArrayData : public JSObjectData { Handle<JSTypedArray> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind) {} - void Serialize(JSHeapBroker* broker); + void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag); bool serialized() const { return serialized_; } bool is_on_heap() const { return is_on_heap_; } @@ -639,7 +583,8 @@ class JSTypedArrayData : public JSObjectData { ObjectData* buffer_ = nullptr; }; -void JSTypedArrayData::Serialize(JSHeapBroker* broker) { +void JSTypedArrayData::Serialize(JSHeapBroker* broker, + NotConcurrentInliningTag) { if (serialized_) return; serialized_ = true; @@ -656,35 +601,18 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) { } } -class ArrayBoilerplateDescriptionData : public HeapObjectData { - public: - ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage, - Handle<ArrayBoilerplateDescription> object) - : HeapObjectData(broker, storage, object) { - // ArrayBoilerplateDescriptionData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - class JSDataViewData : public JSObjectData { public: JSDataViewData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSDataView> object, - ObjectDataKind kind = kSerializedHeapObject) + Handle<JSDataView> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind) { - if (kind == kSerializedHeapObject) { - DCHECK(!broker->is_concurrent_inlining()); + DCHECK_EQ(kind, kBackgroundSerializedHeapObject); + if (!broker->is_concurrent_inlining()) { byte_length_ = object->byte_length(); - } else { - DCHECK_EQ(kind, kBackgroundSerializedHeapObject); - DCHECK(broker->is_concurrent_inlining()); } } size_t byte_length() const { - DCHECK_EQ(kind(), kSerializedHeapObject); return byte_length_; } @@ -695,12 +623,10 @@ class JSDataViewData : public JSObjectData { class JSBoundFunctionData : public JSObjectData { public: JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSBoundFunction> object, - ObjectDataKind kind = kSerializedHeapObject) + Handle<JSBoundFunction> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind) {} - // For main-thread serialization only. - bool Serialize(JSHeapBroker* broker); + bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag); ObjectData* bound_target_function() const { DCHECK(!broker()->is_concurrent_inlining()); @@ -726,197 +652,110 @@ class JSBoundFunctionData : public JSObjectData { class JSFunctionData : public JSObjectData { public: JSFunctionData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSFunction> object); + Handle<JSFunction> object, ObjectDataKind kind) + : JSObjectData(broker, storage, object, kind) { + Cache(broker); + } - bool has_feedback_vector() const { return has_feedback_vector_; } - bool has_initial_map() const { return has_initial_map_; } - bool has_prototype() const { return has_prototype_; } + bool IsConsistentWithHeapState(JSHeapBroker* broker) const; + + bool has_feedback_vector() const { + DCHECK(serialized_); + return has_feedback_vector_; + } + bool has_initial_map() const { + DCHECK(serialized_); + return has_initial_map_; + } + bool has_instance_prototype() const { + DCHECK(serialized_); + return has_instance_prototype_; + } bool PrototypeRequiresRuntimeLookup() const { + DCHECK(serialized_); return PrototypeRequiresRuntimeLookup_; } - void Serialize(JSHeapBroker* broker); - bool serialized() const { return serialized_; } - - void SerializeCodeAndFeedback(JSHeapBroker* broker); - bool serialized_code_and_feedback() const { - return serialized_code_and_feedback_; + ObjectData* context() const { + DCHECK(serialized_); + return context_; + } + ObjectData* native_context() const { + DCHECK(serialized_); + return native_context_; + } + MapData* initial_map() const { + DCHECK(serialized_); + return initial_map_; + } + ObjectData* instance_prototype() const { + DCHECK(serialized_); + return instance_prototype_; + } + ObjectData* shared() const { + DCHECK(serialized_); + return shared_; } - - ObjectData* context() const { return context_; } - ObjectData* native_context() const { return native_context_; } - ObjectData* initial_map() const { return initial_map_; } - ObjectData* prototype() const { return prototype_; } - ObjectData* shared() const { return shared_; } ObjectData* raw_feedback_cell() const { - DCHECK(serialized_code_and_feedback()); + DCHECK(serialized_); return feedback_cell_; } ObjectData* feedback_vector() const { - DCHECK(serialized_code_and_feedback()); + DCHECK(serialized_); return feedback_vector_; } - ObjectData* code() const { - DCHECK(serialized_code_and_feedback()); - DCHECK(!broker()->is_concurrent_inlining()); - return code_; - } int initial_map_instance_size_with_min_slack() const { - CHECK(serialized_); + DCHECK(serialized_); return initial_map_instance_size_with_min_slack_; } - private: - bool has_feedback_vector_; - bool has_initial_map_; - bool has_prototype_; - bool PrototypeRequiresRuntimeLookup_; + // Track serialized fields that are actually used, in order to relax + // ConsistentJSFunctionView dependency validation as much as possible. + enum UsedField { + kHasFeedbackVector = 1 << 0, + kPrototypeOrInitialMap = 1 << 1, + kHasInitialMap = 1 << 2, + kHasInstancePrototype = 1 << 3, + kPrototypeRequiresRuntimeLookup = 1 << 4, + kInitialMap = 1 << 5, + kInstancePrototype = 1 << 6, + kFeedbackVector = 1 << 7, + kFeedbackCell = 1 << 8, + kInitialMapInstanceSizeWithMinSlack = 1 << 9, + }; - bool serialized_ = false; - bool serialized_code_and_feedback_ = false; - - ObjectData* context_ = nullptr; - ObjectData* native_context_ = nullptr; - ObjectData* initial_map_ = nullptr; - ObjectData* prototype_ = nullptr; - ObjectData* shared_ = nullptr; - ObjectData* feedback_vector_ = nullptr; - ObjectData* feedback_cell_ = nullptr; - ObjectData* code_ = nullptr; - int initial_map_instance_size_with_min_slack_; -}; - -class RegExpBoilerplateDescriptionData : public HeapObjectData { - public: - RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage, - Handle<RegExpBoilerplateDescription> object) - : HeapObjectData(broker, storage, object) { - // RegExpBoilerplateDescription is NeverEverSerialize. - // TODO(jgruber): Remove this class once all kNeverSerialized types are - // NeverEverSerialize. - UNREACHABLE(); + bool has_any_used_field() const { return used_fields_ != 0; } + bool has_used_field(UsedField used_field) const { + return (used_fields_ & used_field) != 0; } -}; - -class HeapNumberData : public HeapObjectData { - public: - HeapNumberData(JSHeapBroker* broker, ObjectData** storage, - Handle<HeapNumber> object, - ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject) - : HeapObjectData(broker, storage, object, kind), - value_(object->value()), - value_as_bits_(object->value_as_bits(kRelaxedLoad)) {} - - double value() const { return value_; } - uint64_t value_as_bits() const { return value_as_bits_; } + void set_used_field(UsedField used_field) { used_fields_ |= used_field; } private: - double const value_; - uint64_t const value_as_bits_; -}; - -class ContextData : public HeapObjectData { - public: - ContextData(JSHeapBroker* broker, ObjectData** storage, - Handle<Context> object) - : HeapObjectData(broker, storage, object) { - // TODO(v8:7790): Remove this class once all kNeverSerialized types are - // NeverEverSerialize. - UNREACHABLE(); - } -}; - -class NativeContextData : public ContextData { - public: - NativeContextData(JSHeapBroker* broker, ObjectData** storage, - Handle<NativeContext> object) - : ContextData(broker, storage, object) { - // TODO(v8:7790): Remove this class once all kNeverSerialized types are - // NeverEverSerialize. - UNREACHABLE(); - } -}; + void Cache(JSHeapBroker* broker); -class NameData : public HeapObjectData { - public: - NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object) - : HeapObjectData(broker, storage, object) { - // StringData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class StringData : public NameData { - public: - StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object) - : NameData(broker, storage, object) { - // StringData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class SymbolData : public NameData { - public: - SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object) - : NameData(broker, storage, object) { - // StringData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class InternalizedStringData : public StringData { - public: - InternalizedStringData(JSHeapBroker* broker, ObjectData** storage, - Handle<InternalizedString> object) - : StringData(broker, storage, object) { - // InternalizedStringData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class AccessorInfoData : public HeapObjectData { - public: - AccessorInfoData(JSHeapBroker* broker, ObjectData** storage, - Handle<AccessorInfo> object) - : HeapObjectData(broker, storage, object) { - // AccessorInfoData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class AllocationSiteData : public HeapObjectData { - public: - AllocationSiteData(JSHeapBroker* broker, ObjectData** storage, - Handle<AllocationSite> object); - void Serialize(JSHeapBroker* broker); +#ifdef DEBUG + bool serialized_ = false; +#endif // DEBUG - bool PointsToLiteral() const { return PointsToLiteral_; } - AllocationType GetAllocationType() const { return GetAllocationType_; } - ObjectData* nested_site() const { return nested_site_; } - ObjectData* boilerplate() const { return boilerplate_; } + using UsedFields = base::Flags<UsedField>; + UsedFields used_fields_; - // These are only valid if PointsToLiteral is false. - ElementsKind GetElementsKind() const { return GetElementsKind_; } - bool CanInlineCall() const { return CanInlineCall_; } + bool has_feedback_vector_ = false; + ObjectData* prototype_or_initial_map_ = nullptr; + bool has_initial_map_ = false; + bool has_instance_prototype_ = false; + bool PrototypeRequiresRuntimeLookup_ = false; - private: - bool const PointsToLiteral_; - AllocationType const GetAllocationType_; - ObjectData* nested_site_ = nullptr; - ObjectData* boilerplate_ = nullptr; - ElementsKind GetElementsKind_ = NO_ELEMENTS; - bool CanInlineCall_ = false; - bool serialized_ = false; + ObjectData* context_ = nullptr; + ObjectData* native_context_ = nullptr; // Derives from context_. + MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_. + ObjectData* instance_prototype_ = + nullptr; // Derives from prototype_or_initial_map_. + ObjectData* shared_ = nullptr; + ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell. + ObjectData* feedback_cell_ = nullptr; + int initial_map_instance_size_with_min_slack_; // Derives from + // prototype_or_initial_map_. }; class BigIntData : public HeapObjectData { @@ -933,18 +772,14 @@ class BigIntData : public HeapObjectData { }; struct PropertyDescriptor { - ObjectData* key = nullptr; - ObjectData* value = nullptr; - PropertyDetails details = PropertyDetails::Empty(); FieldIndex field_index; ObjectData* field_owner = nullptr; - ObjectData* field_type = nullptr; }; class MapData : public HeapObjectData { public: MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object, - ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject); + ObjectDataKind kind); InstanceType instance_type() const { return instance_type_; } int instance_size() const { return instance_size_; } @@ -975,49 +810,38 @@ class MapData : public HeapObjectData { } // Extra information. - - // Serialize a single (or all) own slot(s) of the descriptor array and recurse - // on field owner(s). - bool TrySerializeOwnDescriptor(JSHeapBroker* broker, - InternalIndex descriptor_index); - void SerializeOwnDescriptor(JSHeapBroker* broker, - InternalIndex descriptor_index) { - CHECK(TrySerializeOwnDescriptor(broker, descriptor_index)); - } - void SerializeOwnDescriptors(JSHeapBroker* broker); - ObjectData* GetStrongValue(InternalIndex descriptor_index) const; - ObjectData* instance_descriptors() const { return instance_descriptors_; } - - void SerializeRootMap(JSHeapBroker* broker); + void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag); ObjectData* FindRootMap() const; - void SerializeConstructor(JSHeapBroker* broker); + void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag); ObjectData* GetConstructor() const { CHECK(serialized_constructor_); return constructor_; } - void SerializeBackPointer(JSHeapBroker* broker); + void SerializeBackPointer(JSHeapBroker* broker, NotConcurrentInliningTag tag); ObjectData* GetBackPointer() const { CHECK(serialized_backpointer_); return backpointer_; } - bool TrySerializePrototype(JSHeapBroker* broker); - void SerializePrototype(JSHeapBroker* broker) { - CHECK(TrySerializePrototype(broker)); + bool TrySerializePrototype(JSHeapBroker* broker, + NotConcurrentInliningTag tag); + void SerializePrototype(JSHeapBroker* broker, NotConcurrentInliningTag tag) { + CHECK(TrySerializePrototype(broker, tag)); } ObjectData* prototype() const { DCHECK_EQ(serialized_prototype_, prototype_ != nullptr); return prototype_; } - void SerializeForElementStore(JSHeapBroker* broker); + void SerializeForElementStore(JSHeapBroker* broker, + NotConcurrentInliningTag tag); bool has_extra_serialized_data() const { - return serialized_own_descriptors_ || serialized_constructor_ || - serialized_backpointer_ || serialized_prototype_ || - serialized_root_map_ || serialized_for_element_store_; + return serialized_constructor_ || serialized_backpointer_ || + serialized_prototype_ || serialized_root_map_ || + serialized_for_element_store_; } private: @@ -1044,14 +868,10 @@ class MapData : public HeapObjectData { bool supports_fast_array_iteration_; bool supports_fast_array_resize_; - // These extra fields still have to be serialized (e.g prototype_) even with - // concurrent inling, since those classes have fields themselves which are not - // being directly read. This means that, for example, even though we can get - // the prototype itself with direct reads, some of its fields require - // serialization. - bool serialized_own_descriptors_ = false; - ObjectData* instance_descriptors_ = nullptr; - + // These extra fields still have to be serialized (e.g prototype_), since + // those classes have fields themselves which are not being directly read. + // This means that, for example, even though we can get the prototype itself + // with direct reads, some of its fields require serialization. bool serialized_constructor_ = false; ObjectData* constructor_ = nullptr; @@ -1067,33 +887,222 @@ class MapData : public HeapObjectData { bool serialized_for_element_store_ = false; }; -AllocationSiteData::AllocationSiteData(JSHeapBroker* broker, - ObjectData** storage, - Handle<AllocationSite> object) - : HeapObjectData(broker, storage, object), - PointsToLiteral_(object->PointsToLiteral()), - GetAllocationType_(object->GetAllocationType()) { - DCHECK(!broker->is_concurrent_inlining()); - if (!PointsToLiteral_) { - GetElementsKind_ = object->GetElementsKind(); - CanInlineCall_ = object->CanInlineCall(); +namespace { + +int InstanceSizeWithMinSlack(JSHeapBroker* broker, MapRef map) { + // This operation is split into two phases (1. map collection, 2. map + // processing). This is to avoid having to take two locks + // (full_transition_array_access and map_updater_access) at once and thus + // having to deal with related deadlock issues. + ZoneVector<Handle<Map>> maps(broker->zone()); + maps.push_back(map.object()); + + { + DisallowGarbageCollection no_gc; + + // Has to be an initial map. + DCHECK(map.object()->GetBackPointer().IsUndefined(broker->isolate())); + + static constexpr bool kConcurrentAccess = true; + TransitionsAccessor(broker->isolate(), *map.object(), &no_gc, + kConcurrentAccess) + .TraverseTransitionTree([&](Map m) { + maps.push_back(broker->CanonicalPersistentHandle(m)); + }); } + + // The lock is needed for UnusedPropertyFields and InstanceSizeFromSlack. + JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker); + + int slack = std::numeric_limits<int>::max(); + for (Handle<Map> m : maps) { + slack = std::min(slack, m->UnusedPropertyFields()); + } + + return map.object()->InstanceSizeFromSlack(slack); } -void AllocationSiteData::Serialize(JSHeapBroker* broker) { - if (serialized_) return; +} // namespace + +// IMPORTANT: Keep this sync'd with JSFunctionData::IsConsistentWithHeapState. +void JSFunctionData::Cache(JSHeapBroker* broker) { + DCHECK(!serialized_); + + TraceScope tracer(broker, this, "JSFunctionData::Cache"); + Handle<JSFunction> function = Handle<JSFunction>::cast(object()); + + // This function may run on the background thread and thus must be individual + // fields in a thread-safe manner. Consistency between fields is *not* + // guaranteed here, instead we verify it in `IsConsistentWithHeapState`, + // called during job finalization. Relaxed loads are thus okay: we're + // guaranteed to see an initialized JSFunction object, and after + // initialization fields remain in a valid state. + + Context context = function->context(kRelaxedLoad); + context_ = broker->GetOrCreateData(context, kAssumeMemoryFence); + CHECK(context_->IsContext()); + + native_context_ = broker->GetOrCreateData(context.map().native_context(), + kAssumeMemoryFence); + CHECK(native_context_->IsNativeContext()); + + SharedFunctionInfo shared = function->shared(kRelaxedLoad); + shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence); + + if (function->has_prototype_slot()) { + prototype_or_initial_map_ = broker->GetOrCreateData( + function->prototype_or_initial_map(kAcquireLoad), kAssumeMemoryFence); + + has_initial_map_ = prototype_or_initial_map_->IsMap(); + if (has_initial_map_) { + initial_map_ = prototype_or_initial_map_->AsMap(); + + MapRef initial_map_ref = TryMakeRef<Map>(broker, initial_map_).value(); + if (initial_map_ref.IsInobjectSlackTrackingInProgress()) { + initial_map_instance_size_with_min_slack_ = + InstanceSizeWithMinSlack(broker, initial_map_ref); + } else { + initial_map_instance_size_with_min_slack_ = + initial_map_ref.instance_size(); + } + CHECK_GT(initial_map_instance_size_with_min_slack_, 0); + + if (!initial_map_->should_access_heap() && + !broker->is_concurrent_inlining()) { + // TODO(neis): This is currently only needed for native_context's + // object_function, as used by GetObjectCreateMap. If no further use + // sites show up, we should move this into NativeContextData::Serialize. + initial_map_->SerializePrototype(broker, + NotConcurrentInliningTag{broker}); + initial_map_->SerializeConstructor(broker, + NotConcurrentInliningTag{broker}); + } + } + + if (has_initial_map_) { + has_instance_prototype_ = true; + instance_prototype_ = broker->GetOrCreateData( + Handle<Map>::cast(initial_map_->object())->prototype(), + kAssumeMemoryFence); + } else if (prototype_or_initial_map_->IsHeapObject() && + !Handle<HeapObject>::cast(prototype_or_initial_map_->object()) + ->IsTheHole()) { + has_instance_prototype_ = true; + instance_prototype_ = prototype_or_initial_map_; + } + } + + PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup(); + + FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad); + feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence); + + ObjectData* maybe_feedback_vector = broker->GetOrCreateData( + feedback_cell.value(kAcquireLoad), kAssumeMemoryFence); + if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) { + has_feedback_vector_ = true; + feedback_vector_ = maybe_feedback_vector; + } + +#ifdef DEBUG serialized_ = true; +#endif // DEBUG +} + +// IMPORTANT: Keep this sync'd with JSFunctionData::Cache. +bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const { + DCHECK(serialized_); + + Handle<JSFunction> f = Handle<JSFunction>::cast(object()); + + CHECK_EQ(*context_->object(), f->context()); + CHECK_EQ(*native_context_->object(), f->native_context()); + CHECK_EQ(*shared_->object(), f->shared()); + + if (f->has_prototype_slot()) { + if (has_used_field(kPrototypeOrInitialMap) && + *prototype_or_initial_map_->object() != + f->prototype_or_initial_map(kAcquireLoad)) { + TRACE_BROKER_MISSING(broker, "JSFunction::prototype_or_initial_map"); + return false; + } + if (has_used_field(kHasInitialMap) && + has_initial_map_ != f->has_initial_map()) { + TRACE_BROKER_MISSING(broker, "JSFunction::has_initial_map"); + return false; + } + if (has_used_field(kHasInstancePrototype) && + has_instance_prototype_ != f->has_instance_prototype()) { + TRACE_BROKER_MISSING(broker, "JSFunction::has_instance_prototype"); + return false; + } + } else { + DCHECK(!has_initial_map_); + DCHECK(!has_instance_prototype_); + } + + if (has_initial_map()) { + if (has_used_field(kInitialMap) && + *initial_map_->object() != f->initial_map()) { + TRACE_BROKER_MISSING(broker, "JSFunction::initial_map"); + return false; + } + if (has_used_field(kInitialMapInstanceSizeWithMinSlack) && + initial_map_instance_size_with_min_slack_ != + f->ComputeInstanceSizeWithMinSlack(f->GetIsolate())) { + TRACE_BROKER_MISSING(broker, + "JSFunction::ComputeInstanceSizeWithMinSlack"); + return false; + } + } else { + DCHECK_NULL(initial_map_); + } + + if (has_instance_prototype_) { + if (has_used_field(kInstancePrototype) && + *instance_prototype_->object() != f->instance_prototype()) { + TRACE_BROKER_MISSING(broker, "JSFunction::instance_prototype"); + return false; + } + } else { + DCHECK_NULL(instance_prototype_); + } + + if (has_used_field(kPrototypeRequiresRuntimeLookup) && + PrototypeRequiresRuntimeLookup_ != f->PrototypeRequiresRuntimeLookup()) { + TRACE_BROKER_MISSING(broker, "JSFunction::PrototypeRequiresRuntimeLookup"); + return false; + } + + if (has_used_field(kFeedbackCell) && + *feedback_cell_->object() != f->raw_feedback_cell()) { + TRACE_BROKER_MISSING(broker, "JSFunction::raw_feedback_cell"); + return false; + } - TraceScope tracer(broker, this, "AllocationSiteData::Serialize"); - Handle<AllocationSite> site = Handle<AllocationSite>::cast(object()); + if (has_used_field(kHasFeedbackVector) && + has_feedback_vector_ != f->has_feedback_vector()) { + TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector"); + return false; + } - if (PointsToLiteral_) { - DCHECK_NULL(boilerplate_); - boilerplate_ = broker->GetOrCreateData(site->boilerplate(kAcquireLoad)); + if (has_feedback_vector_) { + if (has_used_field(kFeedbackVector) && + *feedback_vector_->object() != f->feedback_vector()) { + TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector"); + return false; + } + } else { + DCHECK_NULL(feedback_vector_); } - DCHECK_NULL(nested_site_); - nested_site_ = broker->GetOrCreateData(site->nested_site()); + return true; +} + +bool JSFunctionRef::IsConsistentWithHeapState() const { + DCHECK(broker()->is_concurrent_inlining()); + DCHECK(broker()->IsMainThread()); + return data()->AsJSFunction()->IsConsistentWithHeapState(broker()); } HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage, @@ -1101,8 +1110,6 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage, : ObjectData(broker, storage, object, kind), map_(broker->GetOrCreateData(object->map(kAcquireLoad), kAssumeMemoryFence)) { - CHECK_IMPLIES(kind == kSerializedHeapObject, - broker->mode() == JSHeapBroker::kSerializing); CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized, kind == kBackgroundSerializedHeapObject); } @@ -1234,249 +1241,12 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object, } } -JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSFunction> object) - : JSObjectData(broker, storage, object), - has_feedback_vector_(object->has_feedback_vector()), - has_initial_map_(object->has_prototype_slot() && - object->has_initial_map()), - has_prototype_(object->has_prototype_slot() && object->has_prototype()), - PrototypeRequiresRuntimeLookup_( - object->PrototypeRequiresRuntimeLookup()) {} - -void JSFunctionData::Serialize(JSHeapBroker* broker) { - if (serialized_) return; - serialized_ = true; - - TraceScope tracer(broker, this, "JSFunctionData::Serialize"); - Handle<JSFunction> function = Handle<JSFunction>::cast(object()); - - DCHECK_NULL(context_); - DCHECK_NULL(native_context_); - DCHECK_NULL(initial_map_); - DCHECK_NULL(prototype_); - DCHECK_NULL(shared_); - - context_ = broker->GetOrCreateData(function->context()); - native_context_ = broker->GetOrCreateData(function->native_context()); - shared_ = broker->GetOrCreateData(function->shared()); - - initial_map_ = has_initial_map() - ? broker->GetOrCreateData(function->initial_map()) - : nullptr; - prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype()) - : nullptr; - - if (initial_map_ != nullptr) { - initial_map_instance_size_with_min_slack_ = - function->ComputeInstanceSizeWithMinSlack(broker->isolate()); - } - if (initial_map_ != nullptr && !initial_map_->should_access_heap()) { - initial_map_->AsMap()->SerializeConstructor(broker); - // TODO(neis): This is currently only needed for native_context's - // object_function, as used by GetObjectCreateMap. If no further use sites - // show up, we should move this into NativeContextData::Serialize. - initial_map_->AsMap()->SerializePrototype(broker); - } -} - -void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) { - DCHECK(serialized_); - if (serialized_code_and_feedback_) return; - serialized_code_and_feedback_ = true; - - TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback"); - Handle<JSFunction> function = Handle<JSFunction>::cast(object()); - - DCHECK_NULL(feedback_cell_); - DCHECK_NULL(feedback_vector_); - DCHECK_NULL(code_); - if (!broker->is_concurrent_inlining()) { - // This is conditionalized because Code objects are never serialized now. - // We only need to represent the code object in serialized data when - // we're unable to perform direct heap accesses. - code_ = broker->GetOrCreateData(function->code(kAcquireLoad)); - } - feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell()); - feedback_vector_ = has_feedback_vector() - ? broker->GetOrCreateData(function->feedback_vector()) - : nullptr; -} - -class DescriptorArrayData : public HeapObjectData { - public: - DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage, - Handle<DescriptorArray> object) - : HeapObjectData(broker, storage, object), contents_(broker->zone()) { - DCHECK(!broker->is_concurrent_inlining()); - } - - ObjectData* FindFieldOwner(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).field_owner; - } - - PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).details; - } - - ObjectData* GetPropertyKey(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).key; - } - - FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).field_index; - } - - ObjectData* GetFieldType(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).field_type; - } - - ObjectData* GetStrongValue(InternalIndex descriptor_index) const { - return contents_.at(descriptor_index.as_int()).value; - } - - bool serialized_descriptor(InternalIndex descriptor_index) const { - return contents_.find(descriptor_index.as_int()) != contents_.end(); - } - - void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map, - InternalIndex descriptor_index); - - private: - ZoneMap<int, PropertyDescriptor> contents_; -}; - -void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker, - Handle<Map> map, - InternalIndex descriptor_index) { - CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors()); - if (contents_.find(descriptor_index.as_int()) != contents_.end()) return; - - Isolate* const isolate = broker->isolate(); - auto descriptors = Handle<DescriptorArray>::cast(object()); - CHECK_EQ(*descriptors, map->instance_descriptors(isolate)); - - PropertyDescriptor d; - d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index)); - MaybeObject value = descriptors->GetValue(descriptor_index); - HeapObject obj; - if (value.GetHeapObjectIfStrong(&obj)) { - d.value = broker->GetOrCreateData(obj); - } - d.details = descriptors->GetDetails(descriptor_index); - if (d.details.location() == kField) { - d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index); - d.field_owner = - broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index)); - d.field_type = - broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index)); - } - contents_[descriptor_index.as_int()] = d; - - if (d.details.location() == kField && !d.field_owner->should_access_heap()) { - // Recurse on the owner map. - d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index); - } - - TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into " - << this << " (" << contents_.size() - << " total)"); -} - -class FeedbackCellData : public HeapObjectData { - public: - FeedbackCellData(JSHeapBroker* broker, ObjectData** storage, - Handle<FeedbackCell> object); - - ObjectData* value() const { return value_; } - - private: - ObjectData* const value_; -}; - -FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage, - Handle<FeedbackCell> object) - : HeapObjectData(broker, storage, object), - value_(object->value().IsFeedbackVector() - ? broker->GetOrCreateData(object->value()) - : nullptr) { - DCHECK(!broker->is_concurrent_inlining()); -} - -class FeedbackVectorData : public HeapObjectData { - public: - FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage, - Handle<FeedbackVector> object); - - double invocation_count() const { return invocation_count_; } - - ObjectData* shared_function_info() { - CHECK(serialized_); - return shared_function_info_; - } - - void Serialize(JSHeapBroker* broker); - bool serialized() const { return serialized_; } - ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const; - - private: - double const invocation_count_; - - bool serialized_ = false; - ObjectData* shared_function_info_; - ZoneVector<ObjectData*> closure_feedback_cell_array_; -}; - -FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker, - ObjectData** storage, - Handle<FeedbackVector> object) - : HeapObjectData(broker, storage, object), - invocation_count_(object->invocation_count()), - closure_feedback_cell_array_(broker->zone()) { - DCHECK(!broker->is_concurrent_inlining()); -} - -ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker, - int index) const { - CHECK_GE(index, 0); - - size_t cell_array_size = closure_feedback_cell_array_.size(); - if (!serialized_) { - DCHECK_EQ(cell_array_size, 0); - TRACE_BROKER_MISSING(broker, - " closure feedback cell array for vector " << this); - return nullptr; - } - CHECK_LT(index, cell_array_size); - return closure_feedback_cell_array_[index]; -} - -void FeedbackVectorData::Serialize(JSHeapBroker* broker) { - if (serialized_) return; - serialized_ = true; - - TraceScope tracer(broker, this, "FeedbackVectorData::Serialize"); - Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object()); - Handle<SharedFunctionInfo> sfi(vector->shared_function_info(), - broker->isolate()); - shared_function_info_ = broker->GetOrCreateData(sfi); - DCHECK(closure_feedback_cell_array_.empty()); - int length = vector->closure_feedback_cell_array().length(); - closure_feedback_cell_array_.reserve(length); - for (int i = 0; i < length; ++i) { - Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i); - ObjectData* cell_data = broker->GetOrCreateData(cell); - closure_feedback_cell_array_.push_back(cell_data); - } - TRACE(broker, "Copied " << length << " feedback cells"); -} - class FixedArrayBaseData : public HeapObjectData { public: FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage, Handle<FixedArrayBase> object, ObjectDataKind kind) : HeapObjectData(broker, storage, object, kind), - length_(object->length()) {} + length_(object->length(kAcquireLoad)) {} int length() const { return length_; } @@ -1491,20 +1261,6 @@ class FixedArrayData : public FixedArrayBaseData { : FixedArrayBaseData(broker, storage, object, kind) {} }; -class ObjectBoilerplateDescriptionData : public FixedArrayData { - public: - ObjectBoilerplateDescriptionData( - JSHeapBroker* broker, ObjectData** storage, - Handle<ObjectBoilerplateDescription> object, - ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject) - : FixedArrayData(broker, storage, object, kind) { - // ObjectBoilerplateDescriptionData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - // Only used in JSNativeContextSpecialization. class ScriptContextTableData : public FixedArrayData { public: @@ -1513,7 +1269,8 @@ class ScriptContextTableData : public FixedArrayData { : FixedArrayData(broker, storage, object, kind) {} }; -bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) { +bool JSBoundFunctionData::Serialize(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { DCHECK(!broker->is_concurrent_inlining()); if (serialized_) return true; @@ -1532,9 +1289,7 @@ bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) { if (!bound_target_function_->should_access_heap()) { if (bound_target_function_->IsJSBoundFunction()) { serialized_nested = - bound_target_function_->AsJSBoundFunction()->Serialize(broker); - } else if (bound_target_function_->IsJSFunction()) { - bound_target_function_->AsJSFunction()->Serialize(broker); + bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag); } } if (!serialized_nested) { @@ -1563,39 +1318,14 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage, own_constant_elements_(broker->zone()), own_properties_(broker->zone()) {} -class FixedDoubleArrayData : public FixedArrayBaseData { - public: - FixedDoubleArrayData( - JSHeapBroker* broker, ObjectData** storage, - Handle<FixedDoubleArray> object, - ObjectDataKind kind = ObjectDataKind::kNeverSerializedHeapObject) - : FixedArrayBaseData(broker, storage, object, kind) { - DCHECK(!broker->is_concurrent_inlining()); - } -}; - -class BytecodeArrayData : public FixedArrayBaseData { - public: - BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage, - Handle<BytecodeArray> object) - : FixedArrayBaseData(broker, storage, object, - ObjectDataKind::kNeverSerializedHeapObject) { - // BytecodeArrayData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - class JSArrayData : public JSObjectData { public: JSArrayData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSArray> object, - ObjectDataKind kind = kSerializedHeapObject) + Handle<JSArray> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind), own_elements_(broker->zone()) {} - void Serialize(JSHeapBroker* broker); + void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag); ObjectData* length() const { CHECK(serialized_); return length_; @@ -1616,9 +1346,8 @@ class JSArrayData : public JSObjectData { ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_; }; -void JSArrayData::Serialize(JSHeapBroker* broker) { - CHECK(!broker->is_concurrent_inlining()); - +void JSArrayData::Serialize(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_) return; serialized_ = true; @@ -1647,56 +1376,10 @@ ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index, return result; } -class ScopeInfoData : public HeapObjectData { - public: - ScopeInfoData(JSHeapBroker* broker, ObjectData** storage, - Handle<ScopeInfo> object) - : HeapObjectData(broker, storage, object) { - // TODO(v8:7790): Remove this class once all kNeverSerialized types are - // NeverEverSerialize. - UNREACHABLE(); - } -}; - -class SharedFunctionInfoData : public HeapObjectData { - public: - SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage, - Handle<SharedFunctionInfo> object) - : HeapObjectData(broker, storage, object) { - // TODO(v8:7790): Remove this class once all kNeverSerialized types are - // NeverEverSerialize. - UNREACHABLE(); - } -}; - -class SourceTextModuleData : public HeapObjectData { - public: - SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage, - Handle<SourceTextModule> object) - : HeapObjectData(broker, storage, object) { - // SourceTextModuleData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class CellData : public HeapObjectData { - public: - CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object) - : HeapObjectData(broker, storage, object) { - // CellData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - class JSGlobalObjectData : public JSObjectData { public: JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSGlobalObject> object, - ObjectDataKind kind = kSerializedHeapObject) + Handle<JSGlobalObject> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind), properties_(broker->zone()) { if (!broker->is_concurrent_inlining()) { @@ -1705,7 +1388,6 @@ class JSGlobalObjectData : public JSObjectData { } bool IsDetached() const { - DCHECK_EQ(kind(), kSerializedHeapObject); return is_detached_; } @@ -1715,7 +1397,6 @@ class JSGlobalObjectData : public JSObjectData { private: // Only valid if not concurrent inlining. - bool is_detached_ = false; // Properties that either @@ -1728,8 +1409,7 @@ class JSGlobalObjectData : public JSObjectData { class JSGlobalProxyData : public JSObjectData { public: JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage, - Handle<JSGlobalProxy> object, - ObjectDataKind kind = kSerializedHeapObject) + Handle<JSGlobalProxy> object, ObjectDataKind kind) : JSObjectData(broker, storage, object, kind) {} }; @@ -1750,8 +1430,6 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker, ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker, ObjectData* name, SerializationPolicy policy) { - DCHECK_EQ(kind(), kSerializedHeapObject); - CHECK_NOT_NULL(name); for (auto const& p : properties_) { if (p.first == name) return p.second; @@ -1768,52 +1446,14 @@ ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker, if (cell.has_value()) { result = cell->data(); if (!result->should_access_heap()) { - result->AsPropertyCell()->Serialize(broker); + result->AsPropertyCell()->Cache(broker); } } properties_.push_back({name, result}); return result; } -class TemplateObjectDescriptionData : public HeapObjectData { - public: - TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage, - Handle<TemplateObjectDescription> object) - : HeapObjectData(broker, storage, object) { - // TemplateObjectDescriptionData is NeverEverSerialize. - // TODO(solanes, v8:7790): Remove this class once all kNeverSerialized types - // are NeverEverSerialize. - UNREACHABLE(); - } -}; - -class CodeData : public HeapObjectData { - public: - CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object) - : HeapObjectData(broker, storage, object), - inlined_bytecode_size_(object->inlined_bytecode_size() > 0 && - !object->marked_for_deoptimization() - ? object->inlined_bytecode_size() - : 0) { - DCHECK(!broker->is_concurrent_inlining()); - } - - unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; } - - private: - unsigned const inlined_bytecode_size_; -}; - -class CodeDataContainerData : public HeapObjectData { - public: - CodeDataContainerData(JSHeapBroker* broker, ObjectData** storage, - Handle<CodeDataContainer> object) - : HeapObjectData(broker, storage, object) { - DCHECK(!broker->is_concurrent_inlining()); - } -}; - -#define DEFINE_IS(Name, ...) \ +#define DEFINE_IS(Name) \ bool ObjectData::Is##Name() const { \ if (should_access_heap()) { \ return object()->Is##Name(); \ @@ -1826,14 +1466,13 @@ class CodeDataContainerData : public HeapObjectData { HEAP_BROKER_OBJECT_LIST(DEFINE_IS) #undef DEFINE_IS -#define DEFINE_AS(Name, Kind) \ +#define DEFINE_AS(Name) \ Name##Data* ObjectData::As##Name() { \ CHECK(Is##Name()); \ - CHECK(kind_ == kSerializedHeapObject || \ - kind_ == kBackgroundSerializedHeapObject); \ + CHECK(kind_ == kBackgroundSerializedHeapObject); \ return static_cast<Name##Data*>(this); \ } -HEAP_BROKER_OBJECT_LIST(DEFINE_AS) +HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS) #undef DEFINE_AS ObjectData* JSObjectData::GetInobjectField(int property_index) const { @@ -1850,7 +1489,8 @@ ObjectData* JSObjectData::elements() const { return elements_; } -void JSObjectData::SerializeElements(JSHeapBroker* broker) { +void JSObjectData::SerializeElements(JSHeapBroker* broker, + NotConcurrentInliningTag) { if (serialized_elements_) return; serialized_elements_ = true; @@ -1863,7 +1503,8 @@ void JSObjectData::SerializeElements(JSHeapBroker* broker) { DCHECK(elements_->IsFixedArrayBase()); } -void MapData::SerializeConstructor(JSHeapBroker* broker) { +void MapData::SerializeConstructor(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_constructor_) return; serialized_constructor_ = true; @@ -1874,7 +1515,8 @@ void MapData::SerializeConstructor(JSHeapBroker* broker) { constructor_ = broker->GetOrCreateData(map->GetConstructor()); } -void MapData::SerializeBackPointer(JSHeapBroker* broker) { +void MapData::SerializeBackPointer(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_backpointer_) return; serialized_backpointer_ = true; @@ -1885,7 +1527,8 @@ void MapData::SerializeBackPointer(JSHeapBroker* broker) { backpointer_ = broker->GetOrCreateData(map->GetBackPointer()); } -bool MapData::TrySerializePrototype(JSHeapBroker* broker) { +bool MapData::TrySerializePrototype(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_prototype_) return true; TraceScope tracer(broker, this, "MapData::SerializePrototype"); @@ -1897,56 +1540,8 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker) { return true; } -void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) { - if (serialized_own_descriptors_) return; - serialized_own_descriptors_ = true; - - TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors"); - Handle<Map> map = Handle<Map>::cast(object()); - - for (InternalIndex i : map->IterateOwnDescriptors()) { - SerializeOwnDescriptor(broker, i); - } -} - -bool MapData::TrySerializeOwnDescriptor(JSHeapBroker* broker, - InternalIndex descriptor_index) { - TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor"); - Handle<Map> map = Handle<Map>::cast(object()); - Isolate* isolate = broker->isolate(); - - if (instance_descriptors_ == nullptr) { - instance_descriptors_ = - broker->TryGetOrCreateData(map->instance_descriptors(kAcquireLoad)); - if (instance_descriptors_ == nullptr) return false; - } - - if (instance_descriptors()->should_access_heap()) { - // When accessing the fields concurrently, we still have to recurse on the - // owner map if it is different than the current map. This is because - // {instance_descriptors_} gets set on SerializeOwnDescriptor and otherwise - // we risk the field owner having a null {instance_descriptors_}. - Handle<DescriptorArray> descriptors = broker->CanonicalPersistentHandle( - map->instance_descriptors(kAcquireLoad)); - if (descriptors->GetDetails(descriptor_index).location() == kField) { - Handle<Map> owner = broker->CanonicalPersistentHandle( - map->FindFieldOwner(isolate, descriptor_index)); - if (!owner.equals(map)) { - ObjectData* data = broker->TryGetOrCreateData(owner); - if (data == nullptr) return false; - data->AsMap()->SerializeOwnDescriptor(broker, descriptor_index); - } - } - } else { - DescriptorArrayData* descriptors = - instance_descriptors()->AsDescriptorArray(); - descriptors->SerializeDescriptor(broker, map, descriptor_index); - } - - return true; -} - -void MapData::SerializeRootMap(JSHeapBroker* broker) { +void MapData::SerializeRootMap(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_root_map_) return; serialized_root_map_ = true; @@ -1959,6 +1554,7 @@ void MapData::SerializeRootMap(JSHeapBroker* broker) { ObjectData* MapData::FindRootMap() const { return root_map_; } bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker, + NotConcurrentInliningTag tag, int max_depth) { if (serialized_as_boilerplate_) return true; // If serialization succeeds, we set this to true at the end. @@ -1996,10 +1592,6 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker, return false; } - if (!map()->should_access_heap()) { - map()->AsMap()->SerializeOwnDescriptors(broker); - } - // Check the in-object properties. inobject_fields_.clear(); Handle<DescriptorArray> descriptors( @@ -2019,7 +1611,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker, inobject_fields_.push_back(value_data); if (value_data->IsJSObject() && !value_data->should_access_heap()) { if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive( - broker, max_depth - 1)) + broker, tag, max_depth - 1)) return false; } } @@ -2039,7 +1631,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker, ObjectData* value_data = broker->GetOrCreateData(value); if (!value_data->should_access_heap()) { if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive( - broker, max_depth - 1)) { + broker, tag, max_depth - 1)) { return false; } } @@ -2052,30 +1644,15 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker, } if (IsJSArray() && !broker->is_concurrent_inlining()) { - AsJSArray()->Serialize(broker); + AsJSArray()->Serialize(broker, NotConcurrentInliningTag{broker}); } serialized_as_boilerplate_ = true; return true; } -#ifdef DEBUG -bool ObjectRef::IsNeverSerializedHeapObject() const { - return data_->kind() == ObjectDataKind::kNeverSerializedHeapObject; -} -#endif // DEBUG - bool ObjectRef::equals(const ObjectRef& other) const { -#ifdef DEBUG - if (broker()->mode() == JSHeapBroker::kSerialized && - data_->used_status == ObjectData::Usage::kUnused) { - data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; - } -#endif // DEBUG - // TODO(jgruber): Consider going back to reference-equality on data_ once - // ObjectData objects are guaranteed to be canonicalized (see also: - // ClearReconstructibleData). - return data_->object().is_identical_to(other.data_->object()); + return data_ == other.data_; } Isolate* ObjectRef::isolate() const { return broker()->isolate(); } @@ -2088,97 +1665,18 @@ ContextRef ContextRef::previous(size_t* depth) const { current = Context::cast(current.unchecked_previous()); (*depth)--; } - return MakeRef(broker(), current); + // The `previous` field is immutable after initialization and the + // context itself is read through an atomic load. + return MakeRefAssumeMemoryFence(broker(), current); } base::Optional<ObjectRef> ContextRef::get(int index) const { CHECK_LE(0, index); - if (index >= object()->length()) return {}; + // Length is immutable after initialization. + if (index >= object()->length(kRelaxedLoad)) return {}; return TryMakeRef(broker(), object()->get(index)); } -#ifdef DEBUG -void JSHeapBroker::PrintRefsAnalysis() const { - // Usage counts - size_t used_total = 0, unused_total = 0, identity_used_total = 0; - for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr; - ref = refs_->Next(ref)) { - switch (ref->value->used_status) { - case ObjectData::Usage::kUnused: - ++unused_total; - break; - case ObjectData::Usage::kOnlyIdentityUsed: - ++identity_used_total; - break; - case ObjectData::Usage::kDataUsed: - ++used_total; - break; - } - } - - // Ref types analysis - TRACE_BROKER_MEMORY( - this, "Refs: " << refs_->occupancy() << "; data used: " << used_total - << "; only identity used: " << identity_used_total - << "; unused: " << unused_total); - size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0; - size_t used[LAST_TYPE + 1] = {0}; - size_t unused[LAST_TYPE + 1] = {0}; - size_t identity_used[LAST_TYPE + 1] = {0}; - for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr; - ref = refs_->Next(ref)) { - if (ref->value->is_smi()) { - switch (ref->value->used_status) { - case ObjectData::Usage::kUnused: - ++unused_smis; - break; - case ObjectData::Usage::kOnlyIdentityUsed: - ++identity_used_smis; - break; - case ObjectData::Usage::kDataUsed: - ++used_smis; - break; - } - } else { - InstanceType instance_type; - if (ref->value->should_access_heap()) { - instance_type = Handle<HeapObject>::cast(ref->value->object()) - ->map() - .instance_type(); - } else { - instance_type = ref->value->AsHeapObject()->GetMapInstanceType(); - } - CHECK_LE(FIRST_TYPE, instance_type); - CHECK_LE(instance_type, LAST_TYPE); - switch (ref->value->used_status) { - case ObjectData::Usage::kUnused: - ++unused[instance_type]; - break; - case ObjectData::Usage::kOnlyIdentityUsed: - ++identity_used[instance_type]; - break; - case ObjectData::Usage::kDataUsed: - ++used[instance_type]; - break; - } - } - } - - TRACE_BROKER_MEMORY( - this, "Smis: " << used_smis + identity_used_smis + unused_smis - << "; data used: " << used_smis << "; only identity used: " - << identity_used_smis << "; unused: " << unused_smis); - for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) { - size_t total = used[i] + identity_used[i] + unused[i]; - if (total == 0) continue; - TRACE_BROKER_MEMORY( - this, InstanceType(i) << ": " << total << "; data used: " << used[i] - << "; only identity used: " << identity_used[i] - << "; unused: " << unused[i]); - } -} -#endif // DEBUG - void JSHeapBroker::InitializeAndStartSerializing() { TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing"); @@ -2194,175 +1692,67 @@ void JSHeapBroker::InitializeAndStartSerializing() { CollectArrayAndObjectPrototypes(); SetTargetNativeContextRef(target_native_context().object()); - target_native_context().Serialize(); - - Factory* const f = isolate()->factory(); if (!is_concurrent_inlining()) { + target_native_context().Serialize(NotConcurrentInliningTag{this}); + + Factory* const f = isolate()->factory(); ObjectData* data; data = GetOrCreateData(f->array_buffer_detaching_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->array_constructor_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->array_iterator_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->array_species_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->no_elements_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->promise_hook_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->promise_species_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->promise_then_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } data = GetOrCreateData(f->string_length_protector()); - if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); + if (!data->should_access_heap()) { + data->AsPropertyCell()->Cache(this); + } + GetOrCreateData(f->many_closures_cell()); + GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, true)); + TRACE(this, "Finished serializing standard objects"); } - GetOrCreateData(f->many_closures_cell()); - GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, true)); - - TRACE(this, "Finished serializing standard objects"); } namespace { -template <RefSerializationKind Kind, class DataT, class ObjectT> -struct CreateDataFunctor { - bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object, - RefsMap::Entry** entry_out, ObjectData** object_data_out) { - USE(broker, refs, object, entry_out, object_data_out); - UNREACHABLE(); - } -}; - -template <class DataT, class ObjectT> -struct CreateDataFunctor<RefSerializationKind::kSerialized, DataT, ObjectT> { - bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object, - RefsMap::Entry** entry_out, ObjectData** object_data_out) { - if (broker->mode() == JSHeapBroker::kSerializing) { - RefsMap::Entry* entry = refs->LookupOrInsert(object.address()); - *object_data_out = broker->zone()->New<DataT>( - broker, &entry->value, Handle<ObjectT>::cast(object)); - *entry_out = entry; - return true; - } - return false; - } -}; - -template <class DataT, class ObjectT> -struct CreateDataFunctor<RefSerializationKind::kBackgroundSerialized, DataT, - ObjectT> { - bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object, - RefsMap::Entry** entry_out, ObjectData** object_data_out) { - if (broker->is_concurrent_inlining()) { - RefsMap::Entry* entry = refs->LookupOrInsert(object.address()); - *object_data_out = broker->zone()->New<DataT>( - broker, &entry->value, Handle<ObjectT>::cast(object), - kBackgroundSerializedHeapObject); - *entry_out = entry; - return true; - } else if (broker->mode() == JSHeapBroker::kSerializing) { - RefsMap::Entry* entry = refs->LookupOrInsert(object.address()); - *object_data_out = broker->zone()->New<DataT>( - broker, &entry->value, Handle<ObjectT>::cast(object), - ObjectDataKind::kSerializedHeapObject); - *entry_out = entry; - return true; - } - return false; +constexpr ObjectDataKind ObjectDataKindFor(RefSerializationKind kind) { + switch (kind) { + case RefSerializationKind::kBackgroundSerialized: + return kBackgroundSerializedHeapObject; + case RefSerializationKind::kNeverSerialized: + return kNeverSerializedHeapObject; } -}; - -template <class T> -bool NeverEverSerialize() { - return false; } -// This list is to help with the transition of kNeverSerialize types (which are -// currently still serialized if concurrent inlining is disabled) to actually -// be never serialized. It should be removed once all types have been migrated -// here. -#define NEVER_EVER_SERIALIZE(Type) \ - template <> \ - bool NeverEverSerialize<Type>() { \ - return true; \ - } - -NEVER_EVER_SERIALIZE(AccessorInfo) -NEVER_EVER_SERIALIZE(ArrayBoilerplateDescription) -NEVER_EVER_SERIALIZE(BytecodeArray) -NEVER_EVER_SERIALIZE(Cell) -NEVER_EVER_SERIALIZE(CallHandlerInfo) -NEVER_EVER_SERIALIZE(Context) -NEVER_EVER_SERIALIZE(FunctionTemplateInfo) -NEVER_EVER_SERIALIZE(InternalizedString) -NEVER_EVER_SERIALIZE(Name) -NEVER_EVER_SERIALIZE(NativeContext) -NEVER_EVER_SERIALIZE(ObjectBoilerplateDescription) -NEVER_EVER_SERIALIZE(RegExpBoilerplateDescription) -NEVER_EVER_SERIALIZE(SharedFunctionInfo) -NEVER_EVER_SERIALIZE(ScopeInfo) -NEVER_EVER_SERIALIZE(SourceTextModule) -NEVER_EVER_SERIALIZE(String) -NEVER_EVER_SERIALIZE(Symbol) -NEVER_EVER_SERIALIZE(TemplateObjectDescription) - -#undef NEVER_EVER_SERIALIZE - -template <class DataT, class ObjectT> -struct CreateDataFunctor<RefSerializationKind::kNeverSerialized, DataT, - ObjectT> { - bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object, - RefsMap::Entry** entry_out, ObjectData** object_data_out) { - // TODO(solanes, v8:10866): Remove the `(mode() == kSerializing)` case - // below when all classes skip serialization. Same for similar spots if we - // end up keeping them. - if (broker->is_concurrent_inlining() || NeverEverSerialize<ObjectT>()) { - RefsMap::Entry* entry = refs->LookupOrInsert(object.address()); - *object_data_out = broker->zone()->New<ObjectData>( - broker, &entry->value, object, kNeverSerializedHeapObject); - *entry_out = entry; - return true; - } else if (broker->mode() == JSHeapBroker::kSerializing) { - RefsMap::Entry* entry = refs->LookupOrInsert(object.address()); - *object_data_out = broker->zone()->New<DataT>( - broker, &entry->value, Handle<ObjectT>::cast(object)); - *entry_out = entry; - return true; - } - return false; - } -}; - } // namespace -void JSHeapBroker::ClearReconstructibleData() { - RefsMap::Entry* p = refs_->Start(); - while (p != nullptr) { - Address key = p->key; - ObjectData* value = p->value; - p = refs_->Next(p); - const auto kind = RefSerializationKindOf(value); - if (kind == RefSerializationKind::kNeverSerialized || - kind == RefSerializationKind::kBackgroundSerialized) { - if (value->IsMap() && - value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject && - value->AsMap()->has_extra_serialized_data()) { - continue; - } - if (value->IsJSObject() && - value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject && - value->AsJSObject()->has_extra_serialized_data()) { - continue; - } - // Can be reconstructed from the background thread. - CHECK_NOT_NULL(refs_->Remove(key)); - } - } -} - ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object, GetOrCreateDataFlags flags) { RefsMap::Entry* entry = refs_->Lookup(object.address()); @@ -2405,14 +1795,13 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object, kUnserializedReadOnlyHeapObject); } -#define CREATE_DATA(Name, Kind) \ - if (object->Is##Name()) { \ - CreateDataFunctor<Kind, Name##Data, Name> f; \ - if (!f(this, refs_, object, &entry, &object_data)) { \ - CHECK_WITH_MSG(!crash_on_error, #Name "Ref construction failed"); \ - return nullptr; \ - } \ - /* NOLINTNEXTLINE(readability/braces) */ \ +#define CREATE_DATA(Name) \ + if (object->Is##Name()) { \ + RefsMap::Entry* entry = refs_->LookupOrInsert(object.address()); \ + object_data = zone()->New<ref_traits<Name>::data_type>( \ + this, &entry->value, Handle<Name>::cast(object), \ + ObjectDataKindFor(ref_traits<Name>::ref_serialization_kind)); \ + /* NOLINTNEXTLINE(readability/braces) */ \ } else HEAP_BROKER_OBJECT_LIST(CREATE_DATA) #undef CREATE_DATA @@ -2425,7 +1814,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object, return object_data; } -#define DEFINE_IS_AND_AS(Name, ...) \ +#define DEFINE_IS_AND_AS(Name) \ bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \ Name##Ref ObjectRef::As##Name() const { \ DCHECK(Is##Name()); \ @@ -2450,26 +1839,39 @@ INSTANCE_TYPE_CHECKERS(DEF_TESTER) #undef DEF_TESTER base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const { - // TODO(jgruber): Consider supporting transitions other than for JSArray - // initial maps (e.g. by walking transitions concurrently and finding an - // existing map that fits). - const ElementsKind current_kind = elements_kind(); if (kind == current_kind) return *this; + base::Optional<Map> maybe_result = Map::TryAsElementsKind( + broker()->isolate(), object(), kind, ConcurrencyMode::kConcurrent); + +#ifdef DEBUG + // If starting from an initial JSArray map, TryAsElementsKind must succeed + // and return the expected transitioned JSArray map. NativeContextRef native_context = broker()->target_native_context(); - if (!equals(native_context.GetInitialJSArrayMap(current_kind))) return {}; + if (equals(native_context.GetInitialJSArrayMap(current_kind))) { + CHECK_EQ(Map::TryAsElementsKind(broker()->isolate(), object(), kind, + ConcurrencyMode::kConcurrent) + .value(), + *native_context.GetInitialJSArrayMap(kind).object()); + } +#endif // DEBUG - return native_context.GetInitialJSArrayMap(kind); + if (!maybe_result.has_value()) { + TRACE_BROKER_MISSING(broker(), "MapRef::AsElementsKind " << *this); + return {}; + } + return MakeRefAssumeMemoryFence(broker(), maybe_result.value()); } -void MapRef::SerializeForElementStore() { +void MapRef::SerializeForElementStore(NotConcurrentInliningTag tag) { if (data()->should_access_heap()) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsMap()->SerializeForElementStore(broker()); + data()->AsMap()->SerializeForElementStore(broker(), tag); } -void MapData::SerializeForElementStore(JSHeapBroker* broker) { +void MapData::SerializeForElementStore(JSHeapBroker* broker, + NotConcurrentInliningTag tag) { if (serialized_for_element_store_) return; serialized_for_element_store_ = true; @@ -2479,7 +1881,7 @@ void MapData::SerializeForElementStore(JSHeapBroker* broker) { // method should go away anyway once the compiler is fully concurrent. MapRef map(broker, this); do { - map.SerializePrototype(); + map.SerializePrototype(tag); map = map.prototype().value().map(); } while (map.IsJSObjectMap() && map.is_stable() && IsFastElementsKind(map.elements_kind())); @@ -2514,10 +1916,29 @@ bool MapRef::supports_fast_array_resize() const { return data()->AsMap()->supports_fast_array_resize(); } -int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const { +namespace { + +void RecordConsistentJSFunctionViewDependencyIfNeeded( + const JSHeapBroker* broker, const JSFunctionRef& ref, JSFunctionData* data, + JSFunctionData::UsedField used_field) { + if (!broker->is_concurrent_inlining()) return; + if (!data->has_any_used_field()) { + // Deduplicate dependencies. + broker->dependencies()->DependOnConsistentJSFunctionView(ref); + } + data->set_used_field(used_field); +} + +} // namespace + +int JSFunctionRef::InitialMapInstanceSizeWithMinSlack( + CompilationDependencies* dependencies) const { if (data_->should_access_heap()) { return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate()); } + RecordConsistentJSFunctionViewDependencyIfNeeded( + broker(), *this, data()->AsJSFunction(), + JSFunctionData::kInitialMapInstanceSizeWithMinSlack); return data()->AsJSFunction()->initial_map_instance_size_with_min_slack(); } @@ -2549,19 +1970,12 @@ OddballType MapRef::oddball_type() const { } FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const { - if (data_->should_access_heap()) { - // These should all be available because we request the cell for each - // CreateClosure bytecode. - return MakeRef(broker(), object()->closure_feedback_cell(index)); - } - - return FeedbackCellRef( - broker(), - data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index)); + return MakeRefAssumeMemoryFence(broker(), + object()->closure_feedback_cell(index)); } base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const { - if (data_->should_access_heap()) { + if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { return TryMakeRef(broker(), object()->raw_properties_or_hash()); } return ObjectRef(broker(), data()->AsJSObject()->raw_properties_or_hash()); @@ -2571,53 +1985,57 @@ base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt( FieldIndex index) const { CHECK(index.is_inobject()); if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - DisallowGarbageCollection no_gc; - Map current_map = object()->map(kAcquireLoad); - - // If the map changed in some prior GC epoch, our {index} could be - // outside the valid bounds of the cached map. - if (*map().object() != current_map) { - TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this); - return {}; - } + Handle<Object> value; + { + DisallowGarbageCollection no_gc; + Map current_map = object()->map(kAcquireLoad); + + // If the map changed in some prior GC epoch, our {index} could be + // outside the valid bounds of the cached map. + if (*map().object() != current_map) { + TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this); + return {}; + } - base::Optional<Object> value = - object()->RawInobjectPropertyAt(current_map, index); - if (!value.has_value()) { - TRACE_BROKER_MISSING(broker(), - "Unable to safely read property in " << *this); - return {}; + base::Optional<Object> maybe_value = + object()->RawInobjectPropertyAt(current_map, index); + if (!maybe_value.has_value()) { + TRACE_BROKER_MISSING(broker(), + "Unable to safely read property in " << *this); + return {}; + } + value = broker()->CanonicalPersistentHandle(maybe_value.value()); } - return TryMakeRef(broker(), value.value()); + return TryMakeRef(broker(), value); } JSObjectData* object_data = data()->AsJSObject(); return ObjectRef(broker(), object_data->GetInobjectField(index.property_index())); } -void JSObjectRef::SerializeAsBoilerplateRecursive() { +void JSObjectRef::SerializeAsBoilerplateRecursive( + NotConcurrentInliningTag tag) { if (data_->should_access_heap()) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker()); + data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker(), tag); } -void AllocationSiteRef::SerializeRecursive() { - if (!data_->should_access_heap()) { - data()->AsAllocationSite()->Serialize(broker()); - } - +void AllocationSiteRef::SerializeRecursive(NotConcurrentInliningTag tag) { + DCHECK(data_->should_access_heap()); + if (broker()->mode() == JSHeapBroker::kDisabled) return; + DCHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); if (boilerplate().has_value()) { - boilerplate()->SerializeAsBoilerplateRecursive(); + boilerplate()->SerializeAsBoilerplateRecursive(tag); } if (nested_site().IsAllocationSite()) { - nested_site().AsAllocationSite().SerializeRecursive(); + nested_site().AsAllocationSite().SerializeRecursive(tag); } } -void JSObjectRef::SerializeElements() { +void JSObjectRef::SerializeElements(NotConcurrentInliningTag tag) { if (data_->should_access_heap()) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsJSObject()->SerializeElements(broker()); + data()->AsJSObject()->SerializeElements(broker(), tag); } bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) { @@ -2629,14 +2047,7 @@ bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) { FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const { CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index); - DCHECK(result.is_inobject()); - return result; - } - DescriptorArrayData* descriptors = - data()->AsMap()->instance_descriptors()->AsDescriptorArray(); - FieldIndex result = descriptors->GetFieldIndexFor(descriptor_index); + FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index); DCHECK(result.is_inobject()); return result; } @@ -2671,17 +2082,12 @@ bool MapRef::IsPrimitiveMap() const { MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const { CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); - CHECK(!is_deprecated()); - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - // TODO(solanes, v8:7790): Consider caching the result of the field owner on - // the descriptor array. It would be useful for same map as well as any - // other map sharing that descriptor array. - return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner( - broker()->isolate(), descriptor_index))); - } - DescriptorArrayData* descriptors = - data()->AsMap()->instance_descriptors()->AsDescriptorArray(); - return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index)); + // TODO(solanes, v8:7790): Consider caching the result of the field owner on + // the descriptor array. It would be useful for same map as well as any + // other map sharing that descriptor array. + return MakeRefAssumeMemoryFence( + broker(), + object()->FindFieldOwner(broker()->isolate(), descriptor_index)); } ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const { @@ -2691,11 +2097,24 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const { base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined( uint32_t index, SerializationPolicy policy) const { - // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for - // concurrent inlining when we have the infrastructure to safely do so. - if (broker()->is_concurrent_inlining()) return base::nullopt; - CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject); - return GetOwnElementFromHeap(broker(), object(), index, true); + if (broker()->is_concurrent_inlining()) { + String maybe_char; + auto result = ConcurrentLookupIterator::TryGetOwnChar( + &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(), + index); + + if (result == ConcurrentLookupIterator::kGaveUp) { + TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on " + << *this << " at index " << index); + return {}; + } + + DCHECK_EQ(result, ConcurrentLookupIterator::kPresent); + return TryMakeRef(broker(), maybe_char); + } + + CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject); + return GetOwnElementFromHeap(broker(), object(), index, true); } bool StringRef::SupportedStringKind() const { @@ -2749,7 +2168,18 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const { ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); } base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const { - return TryMakeRef(broker(), object()->get(i, kRelaxedLoad)); + Handle<Object> value; + { + DisallowGarbageCollection no_gc; + CHECK_GE(i, 0); + value = broker()->CanonicalPersistentHandle(object()->get(i, kAcquireLoad)); + if (i >= object()->length(kAcquireLoad)) { + // Right-trimming happened. + CHECK_LT(i, length()); + return {}; + } + } + return TryMakeRef(broker(), value); } Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const { @@ -2805,7 +2235,7 @@ int BytecodeArrayRef::handler_table_size() const { } // Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for -// kSerialized only for methods that we identified to be safe. +// kBackgroundSerialized only for methods that we identified to be safe. #define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \ return MakeRef(broker(), result::cast(object()->name())); \ @@ -2816,9 +2246,9 @@ int BytecodeArrayRef::handler_table_size() const { } // Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if -// broker()->is_concurrent_inlining() is true (even for kSerialized). This is -// because we identified the method to be safe to use direct heap access, but -// the holder##Data class still needs to be serialized. +// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized). +// This is because we identified the method to be safe to use direct heap +// access, but the holder##Data class still needs to be serialized. #define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \ result##Ref holder##Ref::name() const { \ IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \ @@ -2835,19 +2265,17 @@ int BytecodeArrayRef::handler_table_size() const { return BitField::decode(ObjectRef::data()->As##holder()->field()); \ } -#define HEAP_ACCESSOR(holder, result, name) \ - result##Ref holder##Ref::name() const { \ - return MakeRef(broker(), result::cast(object()->name())); \ - } - #define HEAP_ACCESSOR_C(holder, result, name) \ result holder##Ref::name() const { return object()->name(); } -BIMODAL_ACCESSOR(AllocationSite, Object, nested_site) -BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall) -BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral) -BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind) -BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType) +ObjectRef AllocationSiteRef::nested_site() const { + return MakeRefAssumeMemoryFence(broker(), object()->nested_site()); +} + +HEAP_ACCESSOR_C(AllocationSite, bool, CanInlineCall) +HEAP_ACCESSOR_C(AllocationSite, bool, PointsToLiteral) +HEAP_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind) +HEAP_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType) BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64) @@ -2862,18 +2290,12 @@ BytecodeArrayRef::incoming_new_target_or_generator_register() const { return object()->incoming_new_target_or_generator_register(); } -BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count) - BIMODAL_ACCESSOR(HeapObject, Map, map) -BIMODAL_ACCESSOR_C(HeapNumber, double, value) +HEAP_ACCESSOR_C(HeapNumber, double, value) uint64_t HeapNumberRef::value_as_bits() const { - if (data_->should_access_heap()) { - return object()->value_as_bits(kRelaxedLoad); - } - - return ObjectRef::data()->AsHeapNumber()->value_as_bits(); + return object()->value_as_bits(kRelaxedLoad); } base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function() @@ -2906,18 +2328,6 @@ FixedArrayRef JSBoundFunctionRef::bound_arguments() const { // Immutable after initialization. BIMODAL_ACCESSOR_WITH_FLAG_C(JSDataView, size_t, byte_length) -BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector) -BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map) -BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype) -BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup) -BIMODAL_ACCESSOR(JSFunction, Context, context) -BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context) -BIMODAL_ACCESSOR(JSFunction, Map, initial_map) -BIMODAL_ACCESSOR(JSFunction, Object, prototype) -BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared) -BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell) -BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector) - BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind, Map::Bits2::ElementsKindBits) BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map, @@ -3002,38 +2412,47 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( DCHECK(has_call_code()); - DisallowGarbageCollection no_gc; - HeapObject signature = object()->signature(); - if (signature.IsUndefined()) { - return HolderLookupResult(CallOptimization::kHolderIsReceiver); - } - auto expected_receiver_type = FunctionTemplateInfo::cast(signature); - if (expected_receiver_type.IsTemplateFor(*receiver_map.object())) { - return HolderLookupResult(CallOptimization::kHolderIsReceiver); + Handle<FunctionTemplateInfo> expected_receiver_type; + { + DisallowGarbageCollection no_gc; + HeapObject signature = object()->signature(); + if (signature.IsUndefined()) { + return HolderLookupResult(CallOptimization::kHolderIsReceiver); + } + expected_receiver_type = broker()->CanonicalPersistentHandle( + FunctionTemplateInfo::cast(signature)); + if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) { + return HolderLookupResult(CallOptimization::kHolderIsReceiver); + } + + if (!receiver_map.IsJSGlobalProxyMap()) return not_found; } - if (!receiver_map.IsJSGlobalProxyMap()) return not_found; if (policy == SerializationPolicy::kSerializeIfNeeded) { - receiver_map.SerializePrototype(); + receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()}); } base::Optional<HeapObjectRef> prototype = receiver_map.prototype(); if (!prototype.has_value()) return not_found; if (prototype->IsNull()) return not_found; - JSObject raw_prototype = JSObject::cast(*prototype->object()); - if (!expected_receiver_type.IsTemplateFor(raw_prototype.map())) { + if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) { return not_found; } return HolderLookupResult(CallOptimization::kHolderFound, prototype->AsJSObject()); } -HEAP_ACCESSOR(CallHandlerInfo, Object, data) +ObjectRef CallHandlerInfoRef::data() const { + return MakeRefAssumeMemoryFence(broker(), object()->data()); +} HEAP_ACCESSOR_C(ScopeInfo, int, ContextLength) HEAP_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot) HEAP_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo) -HEAP_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo) + +ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const { + return MakeRefAssumeMemoryFence(broker(), object()->OuterScopeInfo()); +} HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id) @@ -3062,14 +2481,11 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability() } base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const { - if (data_->should_access_heap()) { - // Note that we use the synchronized accessor. - Object value = object()->value(kAcquireLoad); - if (!value.IsFeedbackVector()) return base::nullopt; - return TryMakeRef(broker(), FeedbackVector::cast(value)); - } - ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value(); - return FeedbackVectorRef(broker(), vector->AsFeedbackVector()); + DisallowGarbageCollection no_gc; + DCHECK(data_->should_access_heap()); + Object value = object()->value(kAcquireLoad); + if (!value.IsFeedbackVector()) return base::nullopt; + return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value)); } base::Optional<ObjectRef> MapRef::GetStrongValue( @@ -3079,13 +2495,9 @@ base::Optional<ObjectRef> MapRef::GetStrongValue( } DescriptorArrayRef MapRef::instance_descriptors() const { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - return MakeRefAssumeMemoryFence( - broker(), - object()->instance_descriptors(broker()->isolate(), kAcquireLoad)); - } - - return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors()); + return MakeRefAssumeMemoryFence( + broker(), + object()->instance_descriptors(broker()->isolate(), kAcquireLoad)); } base::Optional<HeapObjectRef> MapRef::prototype() const { @@ -3101,10 +2513,10 @@ base::Optional<HeapObjectRef> MapRef::prototype() const { return HeapObjectRef(broker(), prototype_data); } -void MapRef::SerializeRootMap() { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) return; +void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) { + if (data_->should_access_heap()) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsMap()->SerializeRootMap(broker()); + data()->AsMap()->SerializeRootMap(broker(), tag); } // TODO(solanes, v8:7790): Remove base::Optional from the return type when @@ -3230,7 +2642,7 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const { bool StringRef::IsSeqString() const { return object()->IsSeqString(); } -void NativeContextRef::Serialize() { +void NativeContextRef::Serialize(NotConcurrentInliningTag tag) { // TODO(jgruber): Disable visitation if should_access_heap() once all // NativeContext element refs can be created on background threads. Until // then, we *must* iterate them and create refs at serialization-time (even @@ -3241,10 +2653,7 @@ void NativeContextRef::Serialize() { ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \ if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \ member_data->AsMap()->instance_type())) { \ - member_data->AsMap()->SerializeConstructor(broker()); \ - } \ - if (member_data->IsJSFunction()) { \ - member_data->AsJSFunction()->Serialize(broker()); \ + member_data->AsMap()->SerializeConstructor(broker(), tag); \ } \ } BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER) @@ -3254,7 +2663,7 @@ void NativeContextRef::Serialize() { i <= Context::LAST_FUNCTION_MAP_INDEX; i++) { MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap(); if (!InstanceTypeChecker::IsContext(member_data->instance_type())) { - member_data->SerializeConstructor(broker()); + member_data->SerializeConstructor(broker(), tag); } } } @@ -3599,12 +3008,8 @@ HeapObjectType HeapObjectRef::GetHeapObjectType() const { base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const { if (!PointsToLiteral()) return {}; - if (data_->should_access_heap()) { - return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad)); - } - ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate(); - if (boilerplate == nullptr) return {}; - return JSObjectRef(broker(), boilerplate); + DCHECK(data_->should_access_heap()); + return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad)); } base::Optional<FixedArrayBaseRef> JSObjectRef::elements( @@ -3627,81 +3032,45 @@ int FixedArrayBaseRef::length() const { PropertyDetails DescriptorArrayRef::GetPropertyDetails( InternalIndex descriptor_index) const { - if (data_->should_access_heap()) { - return object()->GetDetails(descriptor_index); - } - return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index); + return object()->GetDetails(descriptor_index); } NameRef DescriptorArrayRef::GetPropertyKey( InternalIndex descriptor_index) const { - if (data_->should_access_heap()) { - NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index)); - CHECK(result.IsUniqueName()); - return result; - } - return NameRef(broker(), - data()->AsDescriptorArray()->GetPropertyKey(descriptor_index)); + NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index)); + CHECK(result.IsUniqueName()); + return result; } ObjectRef DescriptorArrayRef::GetFieldType( InternalIndex descriptor_index) const { - if (data_->should_access_heap()) { - return MakeRef<Object>(broker(), object()->GetFieldType(descriptor_index)); - } - return ObjectRef(broker(), - data()->AsDescriptorArray()->GetFieldType(descriptor_index)); + return MakeRef(broker(), + Object::cast(object()->GetFieldType(descriptor_index))); } base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue( InternalIndex descriptor_index) const { - if (data_->should_access_heap()) { - HeapObject heap_object; - if (!object() - ->GetValue(descriptor_index) - .GetHeapObjectIfStrong(&heap_object)) { - return {}; - } - // Since the descriptors in the descriptor array can be changed in-place - // via DescriptorArray::Replace, we might get a value that we haven't seen - // before. - return TryMakeRef(broker(), heap_object); + HeapObject heap_object; + if (!object() + ->GetValue(descriptor_index) + .GetHeapObjectIfStrong(&heap_object)) { + return {}; } - ObjectData* value = - data()->AsDescriptorArray()->GetStrongValue(descriptor_index); - if (!value) return base::nullopt; - return ObjectRef(broker(), value); + // Since the descriptors in the descriptor array can be changed in-place + // via DescriptorArray::Replace, we might get a value that we haven't seen + // before. + return TryMakeRef(broker(), heap_object); } base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info() const { - if (value()) { - FeedbackVectorRef vector = *value(); - if (vector.serialized()) { - return vector.shared_function_info(); - } - } - return base::nullopt; -} - -void FeedbackVectorRef::Serialize() { - if (data_->should_access_heap()) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsFeedbackVector()->Serialize(broker()); -} - -bool FeedbackVectorRef::serialized() const { - if (data_->should_access_heap()) return true; - return data()->AsFeedbackVector()->serialized(); + base::Optional<FeedbackVectorRef> feedback_vector = value(); + if (!feedback_vector.has_value()) return {}; + return feedback_vector->shared_function_info(); } SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const { - if (data_->should_access_heap()) { - return MakeRef(broker(), object()->shared_function_info()); - } - - return SharedFunctionInfoRef( - broker(), data()->AsFeedbackVector()->shared_function_info()); + return MakeRef(broker(), object()->shared_function_info()); } bool NameRef::IsUniqueName() const { @@ -3709,7 +3078,7 @@ bool NameRef::IsUniqueName() const { return IsInternalizedString() || IsSymbol(); } -void RegExpBoilerplateDescriptionRef::Serialize() { +void RegExpBoilerplateDescriptionRef::Serialize(NotConcurrentInliningTag) { // TODO(jgruber,v8:7790): Remove once member types are also never serialized. // Until then, we have to call these functions once on the main thread to // trigger serialization. @@ -3717,26 +3086,16 @@ void RegExpBoilerplateDescriptionRef::Serialize() { } Handle<Object> ObjectRef::object() const { -#ifdef DEBUG - if (broker()->mode() == JSHeapBroker::kSerialized && - data_->used_status == ObjectData::Usage::kUnused) { - data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; - } -#endif // DEBUG return data_->object(); } #ifdef DEBUG -#define DEF_OBJECT_GETTER(T, ...) \ +#define DEF_OBJECT_GETTER(T) \ Handle<T> T##Ref::object() const { \ - if (broker()->mode() == JSHeapBroker::kSerialized && \ - data_->used_status == ObjectData::Usage::kUnused) { \ - data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \ - } \ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \ } #else -#define DEF_OBJECT_GETTER(T, ...) \ +#define DEF_OBJECT_GETTER(T) \ Handle<T> T##Ref::object() const { \ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \ } @@ -3750,66 +3109,98 @@ JSHeapBroker* ObjectRef::broker() const { return broker_; } ObjectData* ObjectRef::data() const { switch (broker()->mode()) { case JSHeapBroker::kDisabled: - CHECK_NE(data_->kind(), kSerializedHeapObject); return data_; case JSHeapBroker::kSerializing: CHECK_NE(data_->kind(), kUnserializedHeapObject); return data_; case JSHeapBroker::kSerialized: -#ifdef DEBUG - data_->used_status = ObjectData::Usage::kDataUsed; -#endif // DEBUG + case JSHeapBroker::kRetired: CHECK_NE(data_->kind(), kUnserializedHeapObject); return data_; - case JSHeapBroker::kRetired: - UNREACHABLE(); } } -Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker, - const char* function, int line) { - TRACE_MISSING(broker, "data in function " << function << " at line " << line); - return AdvancedReducer::NoChange(); +template <class T> +typename TinyRef<T>::RefType TinyRef<T>::AsRef(JSHeapBroker* broker) const { + if (data_->kind() == kUnserializedHeapObject && + broker->mode() != JSHeapBroker::kDisabled) { + // Gotta reconstruct to avoid returning a stale unserialized ref. + return MakeRefAssumeMemoryFence<T>(broker, + Handle<T>::cast(data_->object())); + } + return TryMakeRef<T>(broker, data_).value(); } -void JSFunctionRef::Serialize() { - if (data_->should_access_heap()) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsJSFunction()->Serialize(broker()); +template <class T> +Handle<T> TinyRef<T>::object() const { + return Handle<T>::cast(data_->object()); } -void JSFunctionRef::SerializeCodeAndFeedback() { - if (data_->should_access_heap()) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsJSFunction()->SerializeCodeAndFeedback(broker()); +#define V(Name) \ + template class TinyRef<Name>; \ + /* TinyRef should contain only one pointer. */ \ + STATIC_ASSERT(sizeof(TinyRef<Name>) == kSystemPointerSize); +HEAP_BROKER_OBJECT_LIST(V) +#undef V + +Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker, + const char* function, int line) { + TRACE_MISSING(broker, "data in function " << function << " at line " << line); + return AdvancedReducer::NoChange(); } -bool JSBoundFunctionRef::Serialize() { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { +bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) { + if (data_->should_access_heap()) { return true; } CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - return data()->AsJSBoundFunction()->Serialize(broker()); -} + return data()->AsJSBoundFunction()->Serialize(broker(), tag); +} + +#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \ + Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \ + const { \ + IF_ACCESS_FROM_HEAP(Result, Name); \ + RecordConsistentJSFunctionViewDependencyIfNeeded( \ + broker(), *this, data()->AsJSFunction(), UsedField); \ + return Result##Ref(broker(), data()->AsJSFunction()->Name()); \ + } + +#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(Result, Name, UsedField) \ + Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \ + IF_ACCESS_FROM_HEAP_C(Name); \ + RecordConsistentJSFunctionViewDependencyIfNeeded( \ + broker(), *this, data()->AsJSFunction(), UsedField); \ + return data()->AsJSFunction()->Name(); \ + } + +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector, + JSFunctionData::kHasFeedbackVector) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map, + JSFunctionData::kHasInitialMap) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype, + JSFunctionData::kHasInstancePrototype) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C( + bool, PrototypeRequiresRuntimeLookup, + JSFunctionData::kPrototypeRequiresRuntimeLookup) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map, + JSFunctionData::kInitialMap) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype, + JSFunctionData::kInstancePrototype) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell, + JSFunctionData::kFeedbackCell) +JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector, + JSFunctionData::kFeedbackVector) -bool JSFunctionRef::serialized() const { - if (data_->should_access_heap()) return true; - if (data_->AsJSFunction()->serialized()) return true; - TRACE_BROKER_MISSING(broker(), "data for JSFunction " << this); - return false; -} +BIMODAL_ACCESSOR(JSFunction, Context, context) +BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context) +BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared) -bool JSFunctionRef::serialized_code_and_feedback() const { - if (data_->should_access_heap()) return true; - return data()->AsJSFunction()->serialized_code_and_feedback(); -} +#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP +#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C CodeRef JSFunctionRef::code() const { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad)); - } - - return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code()); + return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad)); } base::Optional<FunctionTemplateInfoRef> @@ -3824,16 +3215,12 @@ int SharedFunctionInfoRef::context_header_size() const { } ScopeInfoRef SharedFunctionInfoRef::scope_info() const { - return MakeRef(broker(), object()->scope_info()); + return MakeRefAssumeMemoryFence(broker(), object()->scope_info(kAcquireLoad)); } -void JSObjectRef::SerializeObjectCreateMap() { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - return; - } - CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info, - broker()->mode() == JSHeapBroker::kSerializing); - data()->AsJSObject()->SerializeObjectCreateMap(broker()); +void JSObjectRef::SerializeObjectCreateMap(NotConcurrentInliningTag tag) { + if (data_->should_access_heap()) return; + data()->AsJSObject()->SerializeObjectCreateMap(broker(), tag); } base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const { @@ -3864,55 +3251,30 @@ base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const { return MapRef(broker(), map_data->AsMap()); } -bool MapRef::TrySerializeOwnDescriptor(InternalIndex descriptor_index) { - CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { - return true; - } - CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info, - broker()->mode() == JSHeapBroker::kSerializing); - return data()->AsMap()->TrySerializeOwnDescriptor(broker(), descriptor_index); -} - -void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) { - CHECK(TrySerializeOwnDescriptor(descriptor_index)); +void MapRef::SerializeBackPointer(NotConcurrentInliningTag tag) { + if (data_->should_access_heap()) return; + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + data()->AsMap()->SerializeBackPointer(broker(), tag); } -bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const { - CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); +bool MapRef::TrySerializePrototype(NotConcurrentInliningTag tag) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { return true; } - ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors(); - if (!maybe_desc_array_data) return false; - if (maybe_desc_array_data->should_access_heap()) return true; - DescriptorArrayData* desc_array_data = - maybe_desc_array_data->AsDescriptorArray(); - return desc_array_data->serialized_descriptor(descriptor_index); -} - -void MapRef::SerializeBackPointer() { - if (data_->should_access_heap() || broker()->is_concurrent_inlining()) return; - CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info, - broker()->mode() == JSHeapBroker::kSerializing); - data()->AsMap()->SerializeBackPointer(broker()); + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + return data()->AsMap()->TrySerializePrototype(broker(), tag); } -bool MapRef::TrySerializePrototype() { - if (data_->should_access_heap()) return true; - CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info, - broker()->mode() == JSHeapBroker::kSerializing); - return data()->AsMap()->TrySerializePrototype(broker()); +void MapRef::SerializePrototype(NotConcurrentInliningTag tag) { + CHECK(TrySerializePrototype(tag)); } -void MapRef::SerializePrototype() { CHECK(TrySerializePrototype()); } - -void JSTypedArrayRef::Serialize() { +void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { // Nothing to do. } else { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsJSTypedArray()->Serialize(broker()); + data()->AsJSTypedArray()->Serialize(broker(), tag); } } @@ -3924,14 +3286,14 @@ bool JSTypedArrayRef::serialized() const { return false; } -bool PropertyCellRef::Serialize() const { +bool PropertyCellRef::Cache() const { if (data_->should_access_heap()) return true; CHECK(broker()->mode() == JSHeapBroker::kSerializing || broker()->mode() == JSHeapBroker::kSerialized); - return data()->AsPropertyCell()->Serialize(broker()); + return data()->AsPropertyCell()->Cache(broker()); } -void FunctionTemplateInfoRef::SerializeCallCode() { +void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); // CallHandlerInfo::data may still hold a serialized heap object, so we // have to make the broker aware of it. @@ -3973,17 +3335,13 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) { } unsigned CodeRef::GetInlinedBytecodeSize() const { - if (data_->should_access_heap()) { - unsigned value = object()->inlined_bytecode_size(); - if (value > 0) { - // Don't report inlined bytecode size if the code object was already - // deoptimized. - value = object()->marked_for_deoptimization() ? 0 : value; - } - return value; + unsigned value = object()->inlined_bytecode_size(); + if (value > 0) { + // Don't report inlined bytecode size if the code object was already + // deoptimized. + value = object()->marked_for_deoptimization() ? 0 : value; } - - return ObjectRef::data()->AsCode()->inlined_bytecode_size(); + return value; } #undef BIMODAL_ACCESSOR @@ -3992,7 +3350,6 @@ unsigned CodeRef::GetInlinedBytecodeSize() const { #undef BIMODAL_ACCESSOR_WITH_FLAG #undef BIMODAL_ACCESSOR_WITH_FLAG_B #undef BIMODAL_ACCESSOR_WITH_FLAG_C -#undef HEAP_ACCESSOR #undef HEAP_ACCESSOR_C #undef IF_ACCESS_FROM_HEAP #undef IF_ACCESS_FROM_HEAP_C diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index b72fac53f5..d580671f6d 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -57,6 +57,13 @@ inline bool IsAnyStore(AccessMode mode) { enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded }; +// Clarifies in function signatures that a method may only be called when +// concurrent inlining is disabled. +class NotConcurrentInliningTag final { + public: + explicit NotConcurrentInliningTag(JSHeapBroker* broker); +}; + enum class OddballType : uint8_t { kNone, // Not an Oddball. kBoolean, // True or False. @@ -68,70 +75,73 @@ enum class OddballType : uint8_t { }; enum class RefSerializationKind { - // Will skip serialization when --concurrent-inlining is on. Otherwise, they - // might get serialized. (The cake is a lie.) + // Skips serialization. kNeverSerialized, // Can be serialized on demand from the background thread. kBackgroundSerialized, - kSerialized, }; // This list is sorted such that subtypes appear before their supertypes. // DO NOT VIOLATE THIS PROPERTY! -#define HEAP_BROKER_OBJECT_LIST(V) \ - /* Subtypes of JSObject */ \ - V(JSArray, RefSerializationKind::kBackgroundSerialized) \ - V(JSBoundFunction, RefSerializationKind::kBackgroundSerialized) \ - V(JSDataView, RefSerializationKind::kBackgroundSerialized) \ - V(JSFunction, RefSerializationKind::kSerialized) \ - V(JSGlobalObject, RefSerializationKind::kBackgroundSerialized) \ - V(JSGlobalProxy, RefSerializationKind::kBackgroundSerialized) \ - V(JSTypedArray, RefSerializationKind::kBackgroundSerialized) \ - /* Subtypes of Context */ \ - V(NativeContext, RefSerializationKind::kNeverSerialized) \ - /* Subtypes of FixedArray */ \ - V(ObjectBoilerplateDescription, RefSerializationKind::kNeverSerialized) \ - V(ScriptContextTable, RefSerializationKind::kBackgroundSerialized) \ - /* Subtypes of String */ \ - V(InternalizedString, RefSerializationKind::kNeverSerialized) \ - /* Subtypes of FixedArrayBase */ \ - V(BytecodeArray, RefSerializationKind::kNeverSerialized) \ - V(FixedArray, RefSerializationKind::kBackgroundSerialized) \ - V(FixedDoubleArray, RefSerializationKind::kNeverSerialized) \ - /* Subtypes of Name */ \ - V(String, RefSerializationKind::kNeverSerialized) \ - V(Symbol, RefSerializationKind::kNeverSerialized) \ - /* Subtypes of JSReceiver */ \ - V(JSObject, RefSerializationKind::kBackgroundSerialized) \ - /* Subtypes of HeapObject */ \ - V(AccessorInfo, RefSerializationKind::kNeverSerialized) \ - V(AllocationSite, RefSerializationKind::kNeverSerialized) \ - V(ArrayBoilerplateDescription, RefSerializationKind::kNeverSerialized) \ - V(BigInt, RefSerializationKind::kBackgroundSerialized) \ - V(CallHandlerInfo, RefSerializationKind::kNeverSerialized) \ - V(Cell, RefSerializationKind::kNeverSerialized) \ - V(Code, RefSerializationKind::kNeverSerialized) \ - V(CodeDataContainer, RefSerializationKind::kNeverSerialized) \ - V(Context, RefSerializationKind::kNeverSerialized) \ - V(DescriptorArray, RefSerializationKind::kNeverSerialized) \ - V(FeedbackCell, RefSerializationKind::kNeverSerialized) \ - V(FeedbackVector, RefSerializationKind::kNeverSerialized) \ - V(FixedArrayBase, RefSerializationKind::kBackgroundSerialized) \ - V(FunctionTemplateInfo, RefSerializationKind::kNeverSerialized) \ - V(HeapNumber, RefSerializationKind::kNeverSerialized) \ - V(JSReceiver, RefSerializationKind::kBackgroundSerialized) \ - V(Map, RefSerializationKind::kBackgroundSerialized) \ - V(Name, RefSerializationKind::kNeverSerialized) \ - V(PropertyCell, RefSerializationKind::kBackgroundSerialized) \ - V(RegExpBoilerplateDescription, RefSerializationKind::kNeverSerialized) \ - V(ScopeInfo, RefSerializationKind::kNeverSerialized) \ - V(SharedFunctionInfo, RefSerializationKind::kNeverSerialized) \ - V(SourceTextModule, RefSerializationKind::kNeverSerialized) \ - V(TemplateObjectDescription, RefSerializationKind::kNeverSerialized) \ - /* Subtypes of Object */ \ - V(HeapObject, RefSerializationKind::kBackgroundSerialized) - -#define FORWARD_DECL(Name, ...) class Name##Ref; +#define HEAP_BROKER_OBJECT_LIST_BASE(BACKGROUND_SERIALIZED, NEVER_SERIALIZED) \ + /* Subtypes of JSObject */ \ + BACKGROUND_SERIALIZED(JSArray) \ + BACKGROUND_SERIALIZED(JSBoundFunction) \ + BACKGROUND_SERIALIZED(JSDataView) \ + BACKGROUND_SERIALIZED(JSFunction) \ + BACKGROUND_SERIALIZED(JSGlobalObject) \ + BACKGROUND_SERIALIZED(JSGlobalProxy) \ + BACKGROUND_SERIALIZED(JSTypedArray) \ + /* Subtypes of Context */ \ + NEVER_SERIALIZED(NativeContext) \ + /* Subtypes of FixedArray */ \ + NEVER_SERIALIZED(ObjectBoilerplateDescription) \ + BACKGROUND_SERIALIZED(ScriptContextTable) \ + /* Subtypes of String */ \ + NEVER_SERIALIZED(InternalizedString) \ + /* Subtypes of FixedArrayBase */ \ + NEVER_SERIALIZED(BytecodeArray) \ + BACKGROUND_SERIALIZED(FixedArray) \ + NEVER_SERIALIZED(FixedDoubleArray) \ + /* Subtypes of Name */ \ + NEVER_SERIALIZED(String) \ + NEVER_SERIALIZED(Symbol) \ + /* Subtypes of JSReceiver */ \ + BACKGROUND_SERIALIZED(JSObject) \ + /* Subtypes of HeapObject */ \ + NEVER_SERIALIZED(AccessorInfo) \ + NEVER_SERIALIZED(AllocationSite) \ + NEVER_SERIALIZED(ArrayBoilerplateDescription) \ + BACKGROUND_SERIALIZED(BigInt) \ + NEVER_SERIALIZED(CallHandlerInfo) \ + NEVER_SERIALIZED(Cell) \ + NEVER_SERIALIZED(Code) \ + NEVER_SERIALIZED(CodeDataContainer) \ + NEVER_SERIALIZED(Context) \ + NEVER_SERIALIZED(DescriptorArray) \ + NEVER_SERIALIZED(FeedbackCell) \ + NEVER_SERIALIZED(FeedbackVector) \ + BACKGROUND_SERIALIZED(FixedArrayBase) \ + NEVER_SERIALIZED(FunctionTemplateInfo) \ + NEVER_SERIALIZED(HeapNumber) \ + BACKGROUND_SERIALIZED(JSReceiver) \ + BACKGROUND_SERIALIZED(Map) \ + NEVER_SERIALIZED(Name) \ + BACKGROUND_SERIALIZED(PropertyCell) \ + NEVER_SERIALIZED(RegExpBoilerplateDescription) \ + NEVER_SERIALIZED(ScopeInfo) \ + NEVER_SERIALIZED(SharedFunctionInfo) \ + NEVER_SERIALIZED(SourceTextModule) \ + NEVER_SERIALIZED(TemplateObjectDescription) \ + /* Subtypes of Object */ \ + BACKGROUND_SERIALIZED(HeapObject) + +#define HEAP_BROKER_OBJECT_LIST(V) HEAP_BROKER_OBJECT_LIST_BASE(V, V) +#define IGNORE_CASE(...) +#define HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \ + HEAP_BROKER_OBJECT_LIST_BASE(V, IGNORE_CASE) + +#define FORWARD_DECL(Name) class Name##Ref; HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) #undef FORWARD_DECL @@ -140,14 +150,32 @@ class ObjectRef; template <class T> struct ref_traits; -#define REF_TRAITS(Name, Kind) \ - template <> \ - struct ref_traits<Name> { \ - using ref_type = Name##Ref; \ - static constexpr RefSerializationKind ref_serialization_kind = Kind; \ +#define FORWARD_DECL(Name) class Name##Data; +HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL) +#undef FORWARD_DECL + +#define BACKGROUND_SERIALIZED_REF_TRAITS(Name) \ + template <> \ + struct ref_traits<Name> { \ + using ref_type = Name##Ref; \ + using data_type = Name##Data; \ + static constexpr RefSerializationKind ref_serialization_kind = \ + RefSerializationKind::kBackgroundSerialized; \ + }; + +#define NEVER_SERIALIZED_REF_TRAITS(Name) \ + template <> \ + struct ref_traits<Name> { \ + using ref_type = Name##Ref; \ + using data_type = ObjectData; \ + static constexpr RefSerializationKind ref_serialization_kind = \ + RefSerializationKind::kNeverSerialized; \ }; -HEAP_BROKER_OBJECT_LIST(REF_TRAITS) -#undef REF_TYPE + +HEAP_BROKER_OBJECT_LIST_BASE(BACKGROUND_SERIALIZED_REF_TRAITS, + NEVER_SERIALIZED_REF_TRAITS) +#undef NEVER_SERIALIZED_REF_TRAITS +#undef BACKGROUND_SERIALIZED_REF_TRAITS template <> struct ref_traits<Object> { @@ -159,6 +187,39 @@ struct ref_traits<Object> { RefSerializationKind::kNeverSerialized; }; +// A ref without the broker_ field, used when storage size is important. +template <class T> +class TinyRef { + private: + using RefType = typename ref_traits<T>::ref_type; + + public: + explicit TinyRef(const RefType& ref) : TinyRef(ref.data_) {} + RefType AsRef(JSHeapBroker* broker) const; + static base::Optional<RefType> AsOptionalRef(JSHeapBroker* broker, + base::Optional<TinyRef<T>> ref) { + if (!ref.has_value()) return {}; + return ref->AsRef(broker); + } + Handle<T> object() const; + + private: + explicit TinyRef(ObjectData* data) : data_(data) { DCHECK_NOT_NULL(data); } + ObjectData* const data_; +}; + +#define V(Name) using Name##TinyRef = TinyRef<Name>; +HEAP_BROKER_OBJECT_LIST(V) +#undef V + +#ifdef V8_EXTERNAL_CODE_SPACE +using CodeTRef = CodeDataContainerRef; +using CodeTTinyRef = CodeDataContainerTinyRef; +#else +using CodeTRef = CodeRef; +using CodeTTinyRef = CodeTinyRef; +#endif + class V8_EXPORT_PRIVATE ObjectRef { public: ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true) @@ -173,11 +234,11 @@ class V8_EXPORT_PRIVATE ObjectRef { bool IsSmi() const; int AsSmi() const; -#define HEAP_IS_METHOD_DECL(Name, ...) bool Is##Name() const; +#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const; HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL) #undef HEAP_IS_METHOD_DECL -#define HEAP_AS_METHOD_DECL(Name, ...) Name##Ref As##Name() const; +#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const; HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL) #undef HEAP_AS_METHOD_DECL @@ -203,10 +264,6 @@ class V8_EXPORT_PRIVATE ObjectRef { } }; -#ifdef DEBUG - bool IsNeverSerializedHeapObject() const; -#endif // DEBUG - protected: JSHeapBroker* broker() const; ObjectData* data() const; @@ -220,12 +277,18 @@ class V8_EXPORT_PRIVATE ObjectRef { friend class JSHeapBroker; friend class JSObjectData; friend class StringData; + template <class T> + friend class TinyRef; friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); JSHeapBroker* broker_; }; +template <class T> +using ZoneRefUnorderedSet = + ZoneUnorderedSet<T, ObjectRef::Hash, ObjectRef::Equal>; + // Temporary class that carries information from a Map. We'd like to remove // this class and use MapRef instead, but we can't as long as we support the // kDisabled broker mode. That's because obtaining the MapRef via @@ -293,13 +356,12 @@ class PropertyCellRef : public HeapObjectRef { Handle<PropertyCell> object() const; - // Can be called from a background thread. - V8_WARN_UNUSED_RESULT bool Serialize() const; - void SerializeAsProtector() const { - bool serialized = Serialize(); + V8_WARN_UNUSED_RESULT bool Cache() const; + void CacheAsProtector() const { + bool cached = Cache(); // A protector always holds a Smi value and its cell type never changes, so - // Serialize can't fail. - CHECK(serialized); + // Cache can't fail. + CHECK(cached); } PropertyDetails property_details() const; @@ -365,13 +427,13 @@ class JSObjectRef : public JSReceiverRef { // relaxed read. This is to ease the transition to unserialized (or // background-serialized) elements. base::Optional<FixedArrayBaseRef> elements(RelaxedLoadTag) const; - void SerializeElements(); + void SerializeElements(NotConcurrentInliningTag tag); bool IsElementsTenured(const FixedArrayBaseRef& elements); - void SerializeObjectCreateMap(); + void SerializeObjectCreateMap(NotConcurrentInliningTag tag); base::Optional<MapRef> GetObjectCreateMap() const; - void SerializeAsBoilerplateRecursive(); + void SerializeAsBoilerplateRecursive(NotConcurrentInliningTag tag); }; class JSDataViewRef : public JSObjectRef { @@ -389,7 +451,7 @@ class JSBoundFunctionRef : public JSObjectRef { Handle<JSBoundFunction> object() const; - bool Serialize(); + bool Serialize(NotConcurrentInliningTag tag); // TODO(neis): Make return types non-optional once JSFunction is no longer // fg-serialized. @@ -404,27 +466,29 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { Handle<JSFunction> object() const; - bool has_feedback_vector() const; - bool has_initial_map() const; - bool has_prototype() const; - bool PrototypeRequiresRuntimeLookup() const; + // Returns true, iff the serialized JSFunctionData contents are consistent + // with the state of the underlying JSFunction object. Must be called from + // the main thread. + bool IsConsistentWithHeapState() const; - void Serialize(); - bool serialized() const; - - // The following are available only after calling Serialize(). - ObjectRef prototype() const; - MapRef initial_map() const; ContextRef context() const; NativeContextRef native_context() const; SharedFunctionInfoRef shared() const; - int InitialMapInstanceSizeWithMinSlack() const; - void SerializeCodeAndFeedback(); - bool serialized_code_and_feedback() const; + bool has_feedback_vector(CompilationDependencies* dependencies) const; + bool has_initial_map(CompilationDependencies* dependencies) const; + bool PrototypeRequiresRuntimeLookup( + CompilationDependencies* dependencies) const; + bool has_instance_prototype(CompilationDependencies* dependencies) const; + ObjectRef instance_prototype(CompilationDependencies* dependencies) const; + MapRef initial_map(CompilationDependencies* dependencies) const; + int InitialMapInstanceSizeWithMinSlack( + CompilationDependencies* dependencies) const; + FeedbackVectorRef feedback_vector( + CompilationDependencies* dependencies) const; + FeedbackCellRef raw_feedback_cell( + CompilationDependencies* dependencies) const; - FeedbackVectorRef feedback_vector() const; - FeedbackCellRef raw_feedback_cell() const; CodeRef code() const; }; @@ -434,7 +498,7 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef { Handle<RegExpBoilerplateDescription> object() const; - void Serialize(); + void Serialize(NotConcurrentInliningTag tag); FixedArrayRef data() const; StringRef source() const; @@ -489,6 +553,7 @@ class ContextRef : public HeapObjectRef { V(JSFunction, symbol_function) \ V(JSGlobalObject, global_object) \ V(JSGlobalProxy, global_proxy_object) \ + V(JSObject, initial_array_prototype) \ V(JSObject, promise_prototype) \ V(Map, async_function_object_map) \ V(Map, block_context_map) \ @@ -524,7 +589,7 @@ class NativeContextRef : public ContextRef { Handle<NativeContext> object() const; - void Serialize(); + void Serialize(NotConcurrentInliningTag tag); #define DECL_ACCESSOR(type, name) type##Ref name() const; BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR) @@ -580,10 +645,7 @@ class FeedbackVectorRef : public HeapObjectRef { Handle<FeedbackVector> object() const; SharedFunctionInfoRef shared_function_info() const; - double invocation_count() const; - void Serialize(); - bool serialized() const; FeedbackCellRef GetClosureFeedbackCell(int index) const; }; @@ -614,7 +676,7 @@ class AllocationSiteRef : public HeapObjectRef { AllocationType GetAllocationType() const; ObjectRef nested_site() const; - void SerializeRecursive(); + void SerializeRecursive(NotConcurrentInliningTag tag); base::Optional<JSObjectRef> boilerplate() const; ElementsKind GetElementsKind() const; @@ -675,24 +737,21 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { INSTANCE_TYPE_CHECKERS(DEF_TESTER) #undef DEF_TESTER - void SerializeBackPointer(); + void SerializeBackPointer(NotConcurrentInliningTag tag); HeapObjectRef GetBackPointer() const; - void SerializePrototype(); + void SerializePrototype(NotConcurrentInliningTag tag); // TODO(neis): We should be able to remove TrySerializePrototype once // concurrent-inlining is always on. Then we can also change the return type // of prototype() back to HeapObjectRef. - bool TrySerializePrototype(); + bool TrySerializePrototype(NotConcurrentInliningTag tag); base::Optional<HeapObjectRef> prototype() const; - void SerializeForElementStore(); + void SerializeForElementStore(NotConcurrentInliningTag tag); bool HasOnlyStablePrototypesWithFastElements( ZoneVector<MapRef>* prototype_maps); // Concerning the underlying instance_descriptors: - bool TrySerializeOwnDescriptor(InternalIndex descriptor_index); - void SerializeOwnDescriptor(InternalIndex descriptor_index); - bool serialized_own_descriptor(InternalIndex descriptor_index) const; MapRef FindFieldOwner(InternalIndex descriptor_index) const; PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const; NameRef GetPropertyKey(InternalIndex descriptor_index) const; @@ -703,11 +762,9 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { DescriptorArrayRef instance_descriptors() const; - void SerializeRootMap(); + void SerializeRootMap(NotConcurrentInliningTag tag); base::Optional<MapRef> FindRootMap() const; - // Available after calling JSFunctionRef::Serialize on a function that has - // this map as initial map. ObjectRef GetConstructor() const; }; @@ -731,7 +788,7 @@ class FunctionTemplateInfoRef : public HeapObjectRef { // The following returns true if the CallHandlerInfo is present. bool has_call_code() const; - void SerializeCallCode(); + void SerializeCallCode(NotConcurrentInliningTag tag); base::Optional<CallHandlerInfoRef> call_code() const; ZoneVector<Address> c_functions() const; ZoneVector<const CFunctionInfo*> c_signatures() const; @@ -858,9 +915,7 @@ class ScopeInfoRef : public HeapObjectRef { bool HasOuterScopeInfo() const; bool HasContextExtensionSlot() const; - // Only serialized via SerializeScopeInfoChain. ScopeInfoRef OuterScopeInfo() const; - void SerializeScopeInfoChain(); }; #define BROKER_SFI_FIELDS(V) \ @@ -948,7 +1003,7 @@ class JSTypedArrayRef : public JSObjectRef { size_t length() const; void* data_ptr() const; - void Serialize(); + void Serialize(NotConcurrentInliningTag tag); bool serialized() const; HeapObjectRef buffer() const; diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index 6341b99b98..3dcdc6a33e 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -59,6 +59,7 @@ class JSCallReducerAssembler : public JSGraphAssembler { reducer->ZoneForGraphAssembler(), [reducer](Node* n) { reducer->RevisitForGraphAssembler(n); }, nullptr, kMarkLoopExits), + dependencies_(reducer->dependencies()), node_(node), outermost_catch_scope_( CatchScope::Outermost(reducer->ZoneForGraphAssembler())), @@ -656,9 +657,11 @@ class JSCallReducerAssembler : public JSGraphAssembler { JSOperatorBuilder* javascript() const { return jsgraph()->javascript(); } + CompilationDependencies* dependencies() const { return dependencies_; } + private: + CompilationDependencies* const dependencies_; Node* const node_; - CatchScope outermost_catch_scope_; Node* outermost_handler_; CatchScope* catch_scope_; @@ -831,7 +834,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler { int slot_count) { return AddNode<Context>(graph()->NewNode( javascript()->CreateFunctionContext( - native_context.scope_info().object(), + native_context.scope_info(), slot_count - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE), outer_context, effect(), control())); } @@ -848,11 +851,10 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler { isolate()->factory()->many_closures_cell(); Callable const callable = Builtins::CallableFor(isolate(), shared.builtin_id()); - Handle<CodeT> code = - broker_->CanonicalPersistentHandle(ToCodeT(*callable.code())); + CodeTRef code = MakeRef(broker_, ToCodeT(*callable.code())); return AddNode<JSFunction>(graph()->NewNode( - javascript()->CreateClosure(shared.object(), code), - HeapConstant(feedback_cell), context, effect(), control())); + javascript()->CreateClosure(shared, code), HeapConstant(feedback_cell), + context, effect(), control())); } void CallPromiseExecutor(TNode<Object> executor, TNode<JSFunction> resolve, @@ -1117,9 +1119,9 @@ TNode<Object> JSCallReducerAssembler::CopyNode() { TNode<JSArray> JSCallReducerAssembler::CreateArrayNoThrow( TNode<Object> ctor, TNode<Number> size, FrameState frame_state) { - return AddNode<JSArray>(graph()->NewNode( - javascript()->CreateArray(1, MaybeHandle<AllocationSite>()), ctor, ctor, - size, ContextInput(), frame_state, effect(), control())); + return AddNode<JSArray>( + graph()->NewNode(javascript()->CreateArray(1, base::nullopt), ctor, ctor, + size, ContextInput(), frame_state, effect(), control())); } TNode<JSArray> JSCallReducerAssembler::AllocateEmptyJSArray( ElementsKind kind, const NativeContextRef& native_context) { @@ -2418,8 +2420,8 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) { node->RemoveInput(n.FeedbackVectorIndex()); NodeProperties::ReplaceValueInput(node, target, 0); NodeProperties::ReplaceValueInput(node, target, 1); - NodeProperties::ChangeOp( - node, javascript()->CreateArray(arity, MaybeHandle<AllocationSite>())); + NodeProperties::ChangeOp(node, + javascript()->CreateArray(arity, base::nullopt)); return Changed(node); } @@ -2611,17 +2613,16 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // definitely a constructor or not a constructor. MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); - MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]); + MapRef first_receiver_map = receiver_maps[0]; bool const is_constructor = first_receiver_map.is_constructor(); base::Optional<HeapObjectRef> const prototype = first_receiver_map.prototype(); if (!prototype.has_value()) return inference.NoChange(); - for (Handle<Map> const map : receiver_maps) { - MapRef receiver_map = MakeRef(broker(), map); + for (const MapRef& receiver_map : receiver_maps) { base::Optional<HeapObjectRef> map_prototype = receiver_map.prototype(); if (!map_prototype.has_value()) return inference.NoChange(); @@ -2653,12 +2654,6 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { JSFunctionOrBoundFunction::kLengthDescriptorIndex); const InternalIndex kNameIndex( JSFunctionOrBoundFunction::kNameDescriptorIndex); - if (!receiver_map.serialized_own_descriptor(kLengthIndex) || - !receiver_map.serialized_own_descriptor(kNameIndex)) { - TRACE_BROKER_MISSING(broker(), - "serialized descriptors on map " << receiver_map); - return inference.NoChange(); - } ReadOnlyRoots roots(isolate()); StringRef length_string = MakeRef(broker(), roots.length_string_handle()); StringRef name_string = MakeRef(broker(), roots.name_string_handle()); @@ -2719,7 +2714,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { DCHECK_EQ(cursor, input_count); Node* value = effect = graph()->NewNode(javascript()->CreateBoundFunction( - arity_with_bound_this - kBoundThis, map.object()), + arity_with_bound_this - kBoundThis, map), input_count, inputs); ReplaceWithValue(node, value, effect, control); return Replace(value); @@ -2739,7 +2734,6 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { HeapObjectMatcher m(target); if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) { JSFunctionRef function = m.Ref(broker()).AsJSFunction(); - if (!function.serialized()) return NoChange(); context = jsgraph()->Constant(function.context()); } else { context = effect = graph()->NewNode( @@ -2801,20 +2795,20 @@ Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) { } Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { - Node* effect = NodeProperties::GetEffectInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; // Try to determine the {object} map. MapInference inference(broker(), object, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& object_maps = inference.GetMaps(); + ZoneVector<MapRef> const& object_maps = inference.GetMaps(); - MapRef candidate_map = MakeRef(broker(), object_maps[0]); + MapRef candidate_map = object_maps[0]; base::Optional<HeapObjectRef> candidate_prototype = candidate_map.prototype(); if (!candidate_prototype.has_value()) return inference.NoChange(); // Check if we can constant-fold the {candidate_prototype}. for (size_t i = 0; i < object_maps.size(); ++i) { - MapRef object_map = MakeRef(broker(), object_maps[i]); + MapRef object_map = object_maps[i]; base::Optional<HeapObjectRef> map_prototype = object_map.prototype(); if (!map_prototype.has_value()) return inference.NoChange(); if (IsSpecialReceiverInstanceType(object_map.instance_type()) || @@ -3188,13 +3182,13 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) { } namespace { + bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker, - MapHandles const& receiver_maps, + ZoneVector<MapRef> const& receiver_maps, ElementsKind* kind_return) { DCHECK_NE(0, receiver_maps.size()); - *kind_return = MakeRef(broker, receiver_maps[0]).elements_kind(); - for (auto receiver_map : receiver_maps) { - MapRef map = MakeRef(broker, receiver_map); + *kind_return = receiver_maps[0].elements_kind(); + for (const MapRef& map : receiver_maps) { if (!map.supports_fast_array_iteration() || !UnionElementsKindUptoSize(kind_return, map.elements_kind())) { return false; @@ -3204,12 +3198,11 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker, } bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker, - MapHandles const& receiver_maps, + ZoneVector<MapRef> const& receiver_maps, std::vector<ElementsKind>* kinds, bool builtin_is_push = false) { DCHECK_NE(0, receiver_maps.size()); - for (auto receiver_map : receiver_maps) { - MapRef map = MakeRef(broker, receiver_map); + for (const MapRef& map : receiver_maps) { if (!map.supports_fast_array_resize()) return false; // TODO(turbofan): We should also handle fast holey double elements once // we got the hole NaN mess sorted out in TurboFan/V8. @@ -3249,7 +3242,7 @@ class IteratingArrayBuiltinHelper { // Try to determine the {receiver} map. if (!inference_.HaveMaps()) return; - MapHandles const& receiver_maps = inference_.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference_.GetMaps(); if (!CanInlineArrayIteratingBuiltin(broker, receiver_maps, &elements_kind_)) { @@ -3610,6 +3603,7 @@ FastApiCallFunctionVector CanOptimizeFastCall( optimize_to_fast_call = optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature); #endif + if (optimize_to_fast_call) { result.push_back({functions[i], c_signature}); } @@ -3671,8 +3665,8 @@ Reduction JSCallReducer::ReduceCallApiFunction( // Try to infer the {receiver} maps from the graph. MapInference inference(broker(), receiver, effect); if (inference.HaveMaps()) { - MapHandles const& receiver_maps = inference.GetMaps(); - MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); + MapRef first_receiver_map = receiver_maps[0]; // See if we can constant-fold the compatible receiver checks. HolderLookupResult api_holder = @@ -3705,7 +3699,7 @@ Reduction JSCallReducer::ReduceCallApiFunction( function_template_info.accept_any_receiver()); for (size_t i = 1; i < receiver_maps.size(); ++i) { - MapRef receiver_map = MakeRef(broker(), receiver_maps[i]); + MapRef receiver_map = receiver_maps[i]; HolderLookupResult holder_i = function_template_info.LookupHolderOfExpectedType(receiver_map); @@ -4061,10 +4055,6 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments( } } - // TODO(jgruber,v8:8888): Attempt to remove this restriction. The reason it - // currently exists is because we cannot create code dependencies in NCI code. - if (broker()->is_native_context_independent()) return NoChange(); - // For call/construct with spread, we need to also install a code // dependency on the array iterator lookup protector cell to ensure // that no one messed with the %ArrayIteratorPrototype%.next method. @@ -4202,9 +4192,10 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( if (feedback.IsInsufficient()) return NoChange(); AllocationSiteRef site = feedback.AsLiteral().value(); - base::Optional<JSArrayRef> boilerplate_array = - site.boilerplate()->AsJSArray(); - int const array_length = boilerplate_array->GetBoilerplateLength().AsSmi(); + if (!site.boilerplate().has_value()) return NoChange(); + + JSArrayRef boilerplate_array = site.boilerplate()->AsJSArray(); + int const array_length = boilerplate_array.GetBoilerplateLength().AsSmi(); // We'll replace the arguments_list input with {array_length} element loads. new_argument_count = argument_count - 1 + array_length; @@ -4217,7 +4208,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( } // Determine the array's map. - MapRef array_map = boilerplate_array->map(); + MapRef array_map = boilerplate_array.map(); if (!array_map.supports_fast_array_iteration()) { return NoChange(); } @@ -4277,16 +4268,15 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( } NodeProperties::ChangeOp( - node, javascript()->Call( - JSCallNode::ArityForArgc(new_argument_count), frequency, - feedback_source, ConvertReceiverMode::kNullOrUndefined, - speculation_mode, CallFeedbackRelation::kUnrelated)); + node, + javascript()->Call(JSCallNode::ArityForArgc(new_argument_count), + frequency, feedback_source, ConvertReceiverMode::kAny, + speculation_mode, CallFeedbackRelation::kUnrelated)); NodeProperties::ReplaceEffectInput(node, effect); return Changed(node).FollowedBy(ReduceJSCall(node)); } bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const { - if (!function.serialized()) return false; // TODO(neis): Add a way to check if function template info isn't serialized // and add a warning in such cases. Currently we can't tell if function // template info doesn't exist or wasn't serialized. @@ -4310,7 +4300,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { ObjectRef target_ref = m.Ref(broker()); if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - if (!function.serialized()) return NoChange(); // Don't inline cross native context. if (!function.native_context().equals(native_context())) { @@ -4380,7 +4369,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { // Same if the {target} is the result of a CheckClosure operation. if (target->opcode() == IrOpcode::kJSCreateClosure) { CreateClosureParameters const& p = JSCreateClosureNode{target}.Parameters(); - return ReduceJSCall(node, MakeRef(broker(), p.shared_info())); + return ReduceJSCall(node, p.shared_info(broker())); } else if (target->opcode() == IrOpcode::kCheckClosure) { FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op())); if (cell.shared_function_info().has_value()) { @@ -4471,13 +4460,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { if (feedback_cell.value().has_value()) { // Check that {target} is a closure with given {feedback_cell}, // which uniquely identifies a given function inside a native context. - FeedbackVectorRef feedback_vector = *feedback_cell.value(); - if (!feedback_vector.serialized()) { - TRACE_BROKER_MISSING( - broker(), "feedback vector, not serialized: " << feedback_vector); - return NoChange(); - } - Node* target_closure = effect = graph()->NewNode(simplified()->CheckClosure(feedback_cell.object()), target, effect, control); @@ -4969,8 +4951,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { node->ReplaceInput(n.NewTargetIndex(), array_function); node->RemoveInput(n.FeedbackVectorIndex()); NodeProperties::ChangeOp( - node, javascript()->CreateArray( - arity, feedback_target->AsAllocationSite().object())); + node, javascript()->CreateArray(arity, + feedback_target->AsAllocationSite())); return Changed(node); } else if (feedback_target.has_value() && !HeapObjectMatcher(new_target).HasResolvedValue() && @@ -5012,13 +4994,13 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - if (!function.serialized()) return NoChange(); // Do not reduce constructors with break points. // If this state changes during background compilation, the compilation // job will be aborted from the main thread (see // Debug::PrepareFunctionForDebugExecution()). - if (function.shared().HasBreakInfo()) return NoChange(); + SharedFunctionInfoRef sfi = function.shared(); + if (sfi.HasBreakInfo()) return NoChange(); // Don't inline cross native context. if (!function.native_context().equals(native_context())) { @@ -5026,9 +5008,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { } // Check for known builtin functions. - Builtin builtin = function.shared().HasBuiltinId() - ? function.shared().builtin_id() - : Builtin::kNoBuiltinId; + Builtin builtin = + sfi.HasBuiltinId() ? sfi.builtin_id() : Builtin::kNoBuiltinId; switch (builtin) { case Builtin::kArrayConstructor: { // TODO(bmeurer): Deal with Array subclasses here. @@ -5037,7 +5018,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { node->ReplaceInput(n.NewTargetIndex(), new_target); node->RemoveInput(n.FeedbackVectorIndex()); NodeProperties::ChangeOp( - node, javascript()->CreateArray(arity, Handle<AllocationSite>())); + node, javascript()->CreateArray(arity, base::nullopt)); return Changed(node); } case Builtin::kObjectConstructor: { @@ -5464,7 +5445,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) { @@ -5601,7 +5582,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) { @@ -5748,7 +5729,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); std::vector<ElementsKind> kinds; if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) { @@ -5985,14 +5966,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) { MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); // Check that the maps are of JSArray (and more). // TODO(turbofan): Consider adding special case for the common pattern // `slice.call(arguments)`, for example jQuery makes heavy use of that. bool can_be_holey = false; - for (Handle<Map> map : receiver_maps) { - MapRef receiver_map = MakeRef(broker(), map); + for (const MapRef& receiver_map : receiver_maps) { if (!receiver_map.supports_fast_array_iteration()) { return inference.NoChange(); } @@ -6136,23 +6116,21 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) { IterationKind const iteration_kind = CreateArrayIteratorParametersOf(iterator->op()).kind(); Node* iterated_object = NodeProperties::GetValueInput(iterator, 0); - Node* iterator_effect = NodeProperties::GetEffectInput(iterator); + Effect iterator_effect{NodeProperties::GetEffectInput(iterator)}; MapInference inference(broker(), iterated_object, iterator_effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& iterated_object_maps = inference.GetMaps(); + ZoneVector<MapRef> const& iterated_object_maps = inference.GetMaps(); // Check that various {iterated_object_maps} have compatible elements kinds. - ElementsKind elements_kind = - MakeRef(broker(), iterated_object_maps[0]).elements_kind(); + ElementsKind elements_kind = iterated_object_maps[0].elements_kind(); if (IsTypedArrayElementsKind(elements_kind)) { // TurboFan doesn't support loading from BigInt typed arrays yet. if (elements_kind == BIGUINT64_ELEMENTS || elements_kind == BIGINT64_ELEMENTS) { return inference.NoChange(); } - for (Handle<Map> map : iterated_object_maps) { - MapRef iterated_object_map = MakeRef(broker(), map); + for (const MapRef& iterated_object_map : iterated_object_maps) { if (iterated_object_map.elements_kind() != elements_kind) { return inference.NoChange(); } @@ -6416,16 +6394,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) { Effect effect = n.effect(); Control control = n.control(); - if (n.ArgumentCount() < 1) { - effect = graph()->NewNode(simplified()->CheckString(p.feedback()), receiver, - effect, control); - - Node* value = jsgraph()->FalseConstant(); - ReplaceWithValue(node, value, effect, control); - return Replace(value); - } - - Node* search_string = n.Argument(0); + Node* search_string = n.ArgumentOr(0, jsgraph()->UndefinedConstant()); Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant()); HeapObjectMatcher m(search_string); @@ -6433,51 +6402,59 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) { ObjectRef target_ref = m.Ref(broker()); if (target_ref.IsString()) { StringRef str = target_ref.AsString(); - if (str.length().has_value() && str.length().value() == 1) { + if (str.length().has_value()) { receiver = effect = graph()->NewNode( simplified()->CheckString(p.feedback()), receiver, effect, control); position = effect = graph()->NewNode( simplified()->CheckSmi(p.feedback()), position, effect, control); - Node* string_length = - graph()->NewNode(simplified()->StringLength(), receiver); - Node* unsigned_position = graph()->NewNode( - simplified()->NumberMax(), position, jsgraph()->ZeroConstant()); - - Node* check = graph()->NewNode(simplified()->NumberLessThan(), - unsigned_position, string_length); - Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone), - check, control); - - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - Node* vfalse = jsgraph()->FalseConstant(); - - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* etrue = effect; - Node* vtrue; - { - Node* masked_position = - graph()->NewNode(simplified()->PoisonIndex(), unsigned_position); - Node* string_first = etrue = - graph()->NewNode(simplified()->StringCharCodeAt(), receiver, - masked_position, etrue, if_true); - - Node* search_first = jsgraph()->Constant(str.GetFirstChar().value()); - vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first, - search_first); + if (str.length().value() == 0) { + Node* value = jsgraph()->TrueConstant(); + ReplaceWithValue(node, value, effect, control); + return Replace(value); } + if (str.length().value() == 1) { + Node* string_length = + graph()->NewNode(simplified()->StringLength(), receiver); + Node* unsigned_position = graph()->NewNode( + simplified()->NumberMax(), position, jsgraph()->ZeroConstant()); + + Node* check = graph()->NewNode(simplified()->NumberLessThan(), + unsigned_position, string_length); + Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone), + check, control); + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = effect; + Node* vfalse = jsgraph()->FalseConstant(); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = effect; + Node* vtrue; + { + Node* masked_position = graph()->NewNode( + simplified()->PoisonIndex(), unsigned_position); + Node* string_first = etrue = + graph()->NewNode(simplified()->StringCharCodeAt(), receiver, + masked_position, etrue, if_true); + + Node* search_first = + jsgraph()->Constant(str.GetFirstChar().value()); + vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first, + search_first); + } - control = graph()->NewNode(common()->Merge(2), if_true, if_false); - Node* value = - graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue, vfalse, control); - effect = - graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); + control = graph()->NewNode(common()->Merge(2), if_true, if_false); + Node* value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue, vfalse, control); + effect = + graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); - ReplaceWithValue(node, value, effect, control); - return Replace(value); + ReplaceWithValue(node, value, effect, control); + return Replace(value); + } } } } @@ -6617,10 +6594,6 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) { } Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) { - // TODO(jgruber): We could reduce here when generating native context - // independent code, if LowerJSCreateStringIterator were implemented in - // generic lowering. - if (broker()->is_native_context_independent()) return NoChange(); JSCallNode n(node); CallParameters const& p = n.Parameters(); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -6745,11 +6718,6 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) { } Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { - // TODO(jgruber): We could reduce here when generating native context - // independent code, if LowerJSCreatePromise were implemented in generic - // lowering. - if (broker()->is_native_context_independent()) return NoChange(); - PromiseBuiltinReducerAssembler a(this, node, broker()); // We only inline when we have the executor. @@ -6764,12 +6732,11 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { bool JSCallReducer::DoPromiseChecks(MapInference* inference) { if (!inference->HaveMaps()) return false; - MapHandles const& receiver_maps = inference->GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference->GetMaps(); // Check whether all {receiver_maps} are JSPromise maps and // have the initial Promise.prototype as their [[Prototype]]. - for (Handle<Map> map : receiver_maps) { - MapRef receiver_map = MakeRef(broker(), map); + for (const MapRef& receiver_map : receiver_maps) { if (!receiver_map.IsJSPromiseMap()) return false; base::Optional<HeapObjectRef> prototype = receiver_map.prototype(); if (!prototype.has_value() || @@ -6827,9 +6794,8 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo( isolate()->factory()->many_closures_cell(); Callable const callable = Builtins::CallableFor(isolate(), shared.builtin_id()); - Handle<CodeT> code = - broker()->CanonicalPersistentHandle(ToCodeT(*callable.code())); - return graph()->NewNode(javascript()->CreateClosure(shared.object(), code), + CodeTRef code = MakeRef(broker(), ToCodeT(*callable.code())); + return graph()->NewNode(javascript()->CreateClosure(shared, code), jsgraph()->HeapConstant(feedback_cell), context, effect, control); } @@ -6849,7 +6815,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { MapInference inference(broker(), receiver, effect); if (!DoPromiseChecks(&inference)) return inference.NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); if (!dependencies()->DependOnPromiseHookProtector()) { return inference.NoChange(); @@ -6881,7 +6847,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { // Allocate shared context for the closures below. context = etrue = graph()->NewNode(javascript()->CreateFunctionContext( - native_context().scope_info().object(), + native_context().scope_info(), PromiseBuiltins::kPromiseFinallyContextLength - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE), @@ -6927,7 +6893,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { // of the call to "then" below. { ZoneHandleSet<Map> maps; - for (Handle<Map> map : receiver_maps) maps.insert(map, graph()->zone()); + for (const MapRef& map : receiver_maps) { + maps.insert(map.object(), graph()->zone()); + } effect = graph()->NewNode(simplified()->MapGuard(maps), receiver, effect, control); } @@ -7007,7 +6975,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { // doesn't escape to user JavaScript. So bake this information // into the graph such that subsequent passes can use the // information for further optimizations. - MapRef promise_map = native_context().promise_function().initial_map(); + MapRef promise_map = + native_context().promise_function().initial_map(dependencies()); effect = graph()->NewNode( simplified()->MapGuard(ZoneHandleSet<Map>(promise_map.object())), promise, effect, control); @@ -7215,8 +7184,8 @@ Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) { JSCallNode n(node); if (n.ArgumentCount() != 1) return NoChange(); Node* receiver = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; Node* key = NodeProperties::GetValueInput(node, 2); MapInference inference(broker(), receiver, effect); @@ -7262,8 +7231,8 @@ Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) { JSCallNode n(node); if (n.ArgumentCount() != 1) return NoChange(); Node* receiver = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; Node* key = NodeProperties::GetValueInput(node, 2); MapInference inference(broker(), receiver, effect); @@ -7305,8 +7274,8 @@ Reduction JSCallReducer::ReduceCollectionIteration( DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* context = NodeProperties::GetContextInput(node); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; InstanceType type = InstanceTypeForCollectionKind(collection_kind); MapInference inference(broker(), receiver, effect); @@ -7325,8 +7294,8 @@ Reduction JSCallReducer::ReduceCollectionPrototypeSize( Node* node, CollectionKind collection_kind) { DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; InstanceType type = InstanceTypeForCollectionKind(collection_kind); MapInference inference(broker(), receiver, effect); @@ -7376,12 +7345,10 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( { MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); - receiver_instance_type = - MakeRef(broker(), receiver_maps[0]).instance_type(); + ZoneVector<MapRef> const& receiver_maps = inference.GetMaps(); + receiver_instance_type = receiver_maps[0].instance_type(); for (size_t i = 1; i < receiver_maps.size(); ++i) { - if (MakeRef(broker(), receiver_maps[i]).instance_type() != - receiver_instance_type) { + if (receiver_maps[i].instance_type() != receiver_instance_type) { return inference.NoChange(); } } @@ -7653,8 +7620,8 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) { Reduction JSCallReducer::ReduceArrayBufferViewAccessor( Node* node, InstanceType instance_type, FieldAccess const& access) { Node* receiver = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps() || @@ -7901,8 +7868,8 @@ Reduction JSCallReducer::ReduceGlobalIsNaN(Node* node) { // ES6 section 20.3.4.10 Date.prototype.getTime ( ) Reduction JSCallReducer::ReduceDatePrototypeGetTime(Node* node) { Node* receiver = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(JS_DATE_TYPE)) { @@ -7969,62 +7936,45 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { // Only the initial JSRegExp map is valid here, since the following lastIndex // check as well as the lowered builtin call rely on a known location of the // lastIndex field. - Handle<Map> regexp_initial_map = - native_context().regexp_function().initial_map().object(); + MapRef regexp_initial_map = + native_context().regexp_function().initial_map(dependencies()); MapInference inference(broker(), regexp, effect); if (!inference.Is(regexp_initial_map)) return inference.NoChange(); - MapHandles const& regexp_maps = inference.GetMaps(); + ZoneVector<MapRef> const& regexp_maps = inference.GetMaps(); ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - if (broker()->is_concurrent_inlining()) { - // Obtain precomputed access infos from the broker. - for (auto map : regexp_maps) { - MapRef map_ref = MakeRef(broker(), map); - PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( - map_ref, MakeRef(broker(), isolate()->factory()->exec_string()), - AccessMode::kLoad, dependencies()); - access_infos.push_back(access_info); - } - } else { - // Compute property access info for "exec" on {resolution}. - access_info_factory.ComputePropertyAccessInfos( - MapHandles(regexp_maps.begin(), regexp_maps.end()), - factory()->exec_string(), AccessMode::kLoad, &access_infos); + + for (const MapRef& map : regexp_maps) { + access_infos.push_back(broker()->GetPropertyAccessInfo( + map, MakeRef(broker(), isolate()->factory()->exec_string()), + AccessMode::kLoad, dependencies())); } PropertyAccessInfo ai_exec = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); if (ai_exec.IsInvalid()) return inference.NoChange(); + if (!ai_exec.IsFastDataConstant()) return inference.NoChange(); - // If "exec" has been modified on {regexp}, we can't do anything. - if (ai_exec.IsFastDataConstant()) { - Handle<JSObject> holder; - // Do not reduce if the exec method is not on the prototype chain. - if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange(); - - JSObjectRef holder_ref = MakeRef(broker(), holder); + // Do not reduce if the exec method is not on the prototype chain. + base::Optional<JSObjectRef> holder = ai_exec.holder(); + if (!holder.has_value()) return inference.NoChange(); - // Bail out if the exec method is not the original one. - base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty( - ai_exec.field_representation(), ai_exec.field_index(), dependencies()); - if (!constant.has_value() || - !constant->equals(native_context().regexp_exec_function())) { - return inference.NoChange(); - } - - // Add proper dependencies on the {regexp}s [[Prototype]]s. - dependencies()->DependOnStablePrototypeChains( - ai_exec.lookup_start_object_maps(), kStartAtPrototype, - MakeRef(broker(), holder)); - } else { - // TODO(v8:11457) Support dictionary mode protoypes here. + // Bail out if the exec method is not the original one. + base::Optional<ObjectRef> constant = holder->GetOwnFastDataProperty( + ai_exec.field_representation(), ai_exec.field_index(), dependencies()); + if (!constant.has_value() || + !constant->equals(native_context().regexp_exec_function())) { return inference.NoChange(); } + // Add proper dependencies on the {regexp}s [[Prototype]]s. + dependencies()->DependOnStablePrototypeChains( + ai_exec.lookup_start_object_maps(), kStartAtPrototype, holder.value()); + inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); @@ -8116,6 +8066,10 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) { return NoChange(); } +CompilationDependencies* JSCallReducer::dependencies() const { + return broker()->dependencies(); +} + Graph* JSCallReducer::graph() const { return jsgraph()->graph(); } Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); } diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h index b1ad8b5ba8..e9b09e3515 100644 --- a/deps/v8/src/compiler/js-call-reducer.h +++ b/deps/v8/src/compiler/js-call-reducer.h @@ -48,14 +48,12 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { using Flags = base::Flags<Flag>; JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, - Zone* temp_zone, Flags flags, - CompilationDependencies* dependencies) + Zone* temp_zone, Flags flags) : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker), temp_zone_(temp_zone), - flags_(flags), - dependencies_(dependencies) {} + flags_(flags) {} const char* reducer_name() const override { return "JSCallReducer"; } @@ -72,6 +70,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { bool has_wasm_calls() const { return has_wasm_calls_; } + CompilationDependencies* dependencies() const; + private: Reduction ReduceBooleanConstructor(Node* node); Reduction ReduceCallApiFunction(Node* node, @@ -256,13 +256,11 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { JSOperatorBuilder* javascript() const; SimplifiedOperatorBuilder* simplified() const; Flags flags() const { return flags_; } - CompilationDependencies* dependencies() const { return dependencies_; } JSGraph* const jsgraph_; JSHeapBroker* const broker_; Zone* const temp_zone_; Flags const flags_; - CompilationDependencies* const dependencies_; std::set<Node*> waitlist_; // For preventing infinite recursion via ReduceJSCallWithArrayLikeOrSpread. diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index ecccf7e373..414977eb7d 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -389,12 +389,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) { DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction()); JSFunctionRef js_function = closure_type.AsHeapConstant()->Ref().AsJSFunction(); - if (!js_function.has_initial_map()) return NoChange(); + if (!js_function.has_initial_map(dependencies())) return NoChange(); SlackTrackingPrediction slack_tracking_prediction = dependencies()->DependOnInitialMapInstanceSizePrediction(js_function); - MapRef initial_map = js_function.initial_map(); + MapRef initial_map = js_function.initial_map(dependencies()); DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE || initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE); @@ -618,13 +618,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode()); CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); int const arity = static_cast<int>(p.arity()); - base::Optional<AllocationSiteRef> site_ref; - { - Handle<AllocationSite> site; - if (p.site().ToHandle(&site)) { - site_ref = MakeRef(broker(), site); - } - } + base::Optional<AllocationSiteRef> site_ref = p.site(broker()); AllocationType allocation = AllocationType::kYoung; base::Optional<MapRef> initial_map = @@ -652,7 +646,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) { } else { PropertyCellRef array_constructor_protector = MakeRef(broker(), factory()->array_constructor_protector()); - array_constructor_protector.SerializeAsProtector(); + array_constructor_protector.CacheAsProtector(); can_inline_call = array_constructor_protector.value().AsSmi() == Protectors::kProtectorValid; } @@ -879,7 +873,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) { CreateBoundFunctionParameters const& p = CreateBoundFunctionParametersOf(node->op()); int const arity = static_cast<int>(p.arity()); - MapRef const map = MakeRef(broker(), p.map()); + MapRef const map = p.map(broker()); Node* bound_target_function = NodeProperties::GetValueInput(node, 0); Node* bound_this = NodeProperties::GetValueInput(node, 1); Node* effect = NodeProperties::GetEffectInput(node); @@ -920,9 +914,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) { Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) { JSCreateClosureNode n(node); CreateClosureParameters const& p = n.Parameters(); - SharedFunctionInfoRef shared = MakeRef(broker(), p.shared_info()); + SharedFunctionInfoRef shared = p.shared_info(broker()); FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker()); - HeapObjectRef code = MakeRef(broker(), p.code()); + HeapObjectRef code = p.code(broker()); Effect effect = n.effect(); Control control = n.control(); Node* context = n.context(); @@ -1060,7 +1054,8 @@ Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) { DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode()); Node* effect = NodeProperties::GetEffectInput(node); - MapRef promise_map = native_context().promise_function().initial_map(); + MapRef promise_map = + native_context().promise_function().initial_map(dependencies()); AllocationBuilder a(jsgraph(), effect, graph()->start()); a.Allocate(promise_map.instance_size()); @@ -1140,7 +1135,7 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) { Node* control = NodeProperties::GetControlInput(node); // Retrieve the initial map for the object. - MapRef map = native_context().object_function().initial_map(); + MapRef map = native_context().object_function().initial_map(dependencies()); DCHECK(!map.is_dictionary_map()); DCHECK(!map.IsInobjectSlackTrackingInProgress()); Node* js_object_map = jsgraph()->Constant(map); @@ -1203,7 +1198,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode()); const CreateFunctionContextParameters& parameters = CreateFunctionContextParametersOf(node->op()); - ScopeInfoRef scope_info = MakeRef(broker(), parameters.scope_info()); + ScopeInfoRef scope_info = parameters.scope_info(broker()); int slot_count = parameters.slot_count(); ScopeType scope_type = parameters.scope_type(); @@ -1243,7 +1238,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) { Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode()); - ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op())); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); Node* extension = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); @@ -1264,7 +1259,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) { Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode()); - ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op())); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); Node* exception = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); @@ -1285,7 +1280,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) { Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) { DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode()); - ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op())); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); int const context_length = scope_info.ContextLength(); // Use inline allocation for block contexts up to a size limit. @@ -1313,10 +1308,12 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) { } namespace { + base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker, HeapObjectRef prototype) { MapRef standard_map = - broker->target_native_context().object_function().initial_map(); + broker->target_native_context().object_function().initial_map( + broker->dependencies()); if (prototype.equals(standard_map.prototype().value())) { return standard_map; } @@ -1329,6 +1326,7 @@ base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker, } return base::Optional<MapRef>(); } + } // namespace Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) { @@ -1886,7 +1884,8 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements( Node* JSCreateLowering::AllocateLiteralRegExp( Node* effect, Node* control, RegExpBoilerplateDescriptionRef boilerplate) { - MapRef initial_map = native_context().regexp_function().initial_map(); + MapRef initial_map = + native_context().regexp_function().initial_map(dependencies()); // Sanity check that JSRegExp object layout hasn't changed. STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize); diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc index cc5d6aa69c..bbc47e45ad 100644 --- a/deps/v8/src/compiler/js-generic-lowering.cc +++ b/deps/v8/src/compiler/js-generic-lowering.cc @@ -239,15 +239,10 @@ namespace { // some cases - unlike the full builtin, the megamorphic builtin does fewer // checks and does not collect feedback. bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source, + base::Optional<NameRef> name, JSHeapBroker* broker) { - if (broker->is_native_context_independent()) { - // The decision to use the megamorphic load builtin is made based on - // current feedback, and is thus context-dependent. It cannot be used when - // generating NCI code. - return false; - } - - ProcessedFeedback const& feedback = broker->GetFeedback(source); + ProcessedFeedback const& feedback = + broker->GetFeedbackForPropertyAccess(source, AccessMode::kLoad, name); if (feedback.kind() == ProcessedFeedback::kElementAccess) { return feedback.AsElementAccess().transition_groups().empty(); @@ -263,6 +258,7 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source, } UNREACHABLE(); } + } // namespace void JSGenericLowering::LowerJSHasProperty(Node* node) { @@ -290,14 +286,14 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) { n->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall( - node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) + node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker()) ? Builtin::kKeyedLoadICTrampoline_Megamorphic : Builtin::kKeyedLoadICTrampoline); } else { n->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall( - node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) + node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker()) ? Builtin::kKeyedLoadIC_Megamorphic : Builtin::kKeyedLoadIC); } @@ -311,25 +307,25 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 1); if (!p.feedback().IsValid()) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); ReplaceWithBuiltinCall(node, Builtin::kGetProperty); } else if (outer_state->opcode() != IrOpcode::kFrameState) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); - ReplaceWithBuiltinCall( - node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) - ? Builtin::kLoadICTrampoline_Megamorphic - : Builtin::kLoadICTrampoline); + ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin( + p.feedback(), p.name(broker()), broker()) + ? Builtin::kLoadICTrampoline_Megamorphic + : Builtin::kLoadICTrampoline); } else { - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); - ReplaceWithBuiltinCall( - node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker()) - ? Builtin::kLoadIC_Megamorphic - : Builtin::kLoadIC); + ReplaceWithBuiltinCall(node, ShouldUseMegamorphicLoadBuiltin( + p.feedback(), p.name(broker()), broker()) + ? Builtin::kLoadIC_Megamorphic + : Builtin::kLoadIC); } } @@ -354,7 +350,7 @@ void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) { // be double-checked that the FeedbackVector parameter will be the // UndefinedConstant. DCHECK(p.feedback().IsValid()); - node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 2, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall(node, Builtin::kLoadSuperIC); @@ -369,13 +365,13 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 0); if (outer_state->opcode() != IrOpcode::kFrameState) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 1, jsgraph()->TaggedIndexConstant(p.feedback().index())); Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode()); ReplaceWithBuiltinCall(node, callable, flags); } else { - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 1, jsgraph()->TaggedIndexConstant(p.feedback().index())); Callable callable = @@ -434,16 +430,16 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 2); if (!p.feedback().IsValid()) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty); } else if (outer_state->opcode() != IrOpcode::kFrameState) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall(node, Builtin::kStoreICTrampoline); } else { - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall(node, Builtin::kStoreIC); @@ -459,13 +455,13 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 2); if (outer_state->opcode() != IrOpcode::kFrameState) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); Callable callable = CodeFactory::StoreOwnIC(isolate()); ReplaceWithBuiltinCall(node, callable, flags); } else { - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 1, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate()); @@ -481,12 +477,12 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 1); if (outer_state->opcode() != IrOpcode::kFrameState) { n->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalICTrampoline); } else { - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name())); + node->InsertInput(zone(), 0, jsgraph()->Constant(p.name(broker()))); node->InsertInput(zone(), 2, jsgraph()->TaggedIndexConstant(p.feedback().index())); ReplaceWithBuiltinCall(node, Builtin::kStoreGlobalIC); @@ -591,12 +587,9 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) { DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0); Node* stub_code = jsgraph()->ArrayConstructorStubConstant(); Node* stub_arity = jsgraph()->Int32Constant(arity); - MaybeHandle<AllocationSite> const maybe_site = p.site(); - Handle<AllocationSite> site; - DCHECK_IMPLIES(broker()->is_native_context_independent(), - maybe_site.is_null()); - Node* type_info = maybe_site.ToHandle(&site) ? jsgraph()->HeapConstant(site) - : jsgraph()->UndefinedConstant(); + base::Optional<AllocationSiteRef> const site = p.site(broker()); + Node* type_info = site.has_value() ? jsgraph()->Constant(site.value()) + : jsgraph()->UndefinedConstant(); Node* receiver = jsgraph()->UndefinedConstant(); node->InsertInput(zone(), 0, stub_code); node->InsertInput(zone(), 3, stub_arity); @@ -640,9 +633,9 @@ void JSGenericLowering::LowerJSRegExpTest(Node* node) { void JSGenericLowering::LowerJSCreateClosure(Node* node) { JSCreateClosureNode n(node); CreateClosureParameters const& p = n.Parameters(); - Handle<SharedFunctionInfo> const shared_info = p.shared_info(); + SharedFunctionInfoRef shared_info = p.shared_info(broker()); STATIC_ASSERT(n.FeedbackCellIndex() == 0); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info)); + node->InsertInput(zone(), 0, jsgraph()->Constant(shared_info)); node->RemoveInput(4); // control // Use the FastNewClosure builtin only for functions allocated in new space. @@ -656,7 +649,7 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) { void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) { const CreateFunctionContextParameters& parameters = CreateFunctionContextParametersOf(node->op()); - Handle<ScopeInfo> scope_info = parameters.scope_info(); + ScopeInfoRef scope_info = parameters.scope_info(broker()); int slot_count = parameters.slot_count(); ScopeType scope_type = parameters.scope_type(); CallDescriptor::Flags flags = FrameStateFlagForCall(node); @@ -664,11 +657,11 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) { if (slot_count <= ConstructorBuiltins::MaximumFunctionContextSlots()) { Callable callable = CodeFactory::FastNewFunctionContext(isolate(), scope_type); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info)); + node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info)); node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count)); ReplaceWithBuiltinCall(node, callable, flags); } else { - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info)); + node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info)); ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext); } } @@ -704,7 +697,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 0); node->InsertInput(zone(), 1, jsgraph()->TaggedIndexConstant(p.feedback().index())); - node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); + node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker()))); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags())); // Use the CreateShallowArrayLiteral builtin only for shallow boilerplates @@ -720,8 +713,8 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) { void JSGenericLowering::LowerJSGetTemplateObject(Node* node) { JSGetTemplateObjectNode n(node); GetTemplateObjectParameters const& p = n.Parameters(); - SharedFunctionInfoRef shared = MakeRef(broker(), p.shared()); - TemplateObjectDescriptionRef description = MakeRef(broker(), p.description()); + SharedFunctionInfoRef shared = p.shared(broker()); + TemplateObjectDescriptionRef description = p.description(broker()); DCHECK_EQ(node->op()->ControlInputCount(), 1); node->RemoveInput(NodeProperties::FirstControlIndex(node)); @@ -755,7 +748,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 0); node->InsertInput(zone(), 1, jsgraph()->TaggedIndexConstant(p.feedback().index())); - node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); + node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker()))); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags())); // Use the CreateShallowObjectLiteratal builtin only for shallow boilerplates @@ -789,47 +782,30 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) { STATIC_ASSERT(n.FeedbackVectorIndex() == 0); node->InsertInput(zone(), 1, jsgraph()->TaggedIndexConstant(p.feedback().index())); - node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant())); + node->InsertInput(zone(), 2, jsgraph()->Constant(p.constant(broker()))); node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags())); ReplaceWithBuiltinCall(node, Builtin::kCreateRegExpLiteral); } void JSGenericLowering::LowerJSCreateCatchContext(Node* node) { - Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info)); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); + node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info)); ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext); } void JSGenericLowering::LowerJSCreateWithContext(Node* node) { - Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op()); - node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info)); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); + node->InsertInput(zone(), 1, jsgraph()->Constant(scope_info)); ReplaceWithRuntimeCall(node, Runtime::kPushWithContext); } void JSGenericLowering::LowerJSCreateBlockContext(Node* node) { - Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op()); - node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info)); + ScopeInfoRef scope_info = ScopeInfoOf(broker(), node->op()); + node->InsertInput(zone(), 0, jsgraph()->Constant(scope_info)); ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext); } -namespace { - -bool CollectCallAndConstructFeedback(JSHeapBroker* broker) { - // Call and construct feedback is a special case. Besides shape feedback, we - // also increment the call count, which is later used to make inlining - // decisions. The call count is only comparable/reliable if it is incremented - // for all calls inside a function. This is not the case in default turbofan - // mode, in which many calls may be inlined and will thus never reach generic - // lowering (where we insert the feedback-collecting builtin call). - // Therefore it should only be collected in native context independent code, - // where we 1. know every call will reach generic lowering, and 2. we must - // collect full feedback to properly tier up later. - return broker->is_native_context_independent(); -} - -} // namespace - // TODO(jgruber,v8:8888): Should this collect feedback? void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) { ConstructForwardVarargsParameters p = @@ -861,57 +837,22 @@ void JSGenericLowering::LowerJSConstruct(Node* node) { CallDescriptor::Flags flags = FrameStateFlagForCall(node); static constexpr int kReceiver = 1; - static constexpr int kMaybeFeedbackVector = 1; - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - const int stack_argument_count = - arg_count + kReceiver + kMaybeFeedbackVector; - Callable callable = - Builtins::CallableFor(isolate(), Builtin::kConstruct_WithFeedback); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), - kMaybeFeedbackVector); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* stub_arity = jsgraph()->Int32Constant(arg_count); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - Node* receiver = jsgraph()->UndefinedConstant(); - Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex()); - // Register argument inputs are followed by stack argument inputs (such as - // feedback_vector). Both are listed in ascending order. Note that - // the receiver is implicitly placed on the stack and is thus inserted - // between explicitly-specified register and stack arguments. - // TODO(jgruber): Implement a simpler way to specify these mutations. - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 3, stub_arity); - node->InsertInput(zone(), 4, slot); - node->InsertInput(zone(), 5, feedback_vector); - node->InsertInput(zone(), 6, receiver); - // After: {code, target, new_target, arity, slot, vector, receiver, - // ...args}. - - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - const int stack_argument_count = arg_count + kReceiver; - Callable callable = Builtins::CallableFor(isolate(), Builtin::kConstruct); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* stub_arity = jsgraph()->Int32Constant(arg_count); - Node* receiver = jsgraph()->UndefinedConstant(); - node->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 3, stub_arity); - node->InsertInput(zone(), 4, receiver); + const int stack_argument_count = arg_count + kReceiver; + Callable callable = Builtins::CallableFor(isolate(), Builtin::kConstruct); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), stack_argument_count, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); + Node* stub_arity = jsgraph()->Int32Constant(arg_count); + Node* receiver = jsgraph()->UndefinedConstant(); + node->RemoveInput(n.FeedbackVectorIndex()); + node->InsertInput(zone(), 0, stub_code); + node->InsertInput(zone(), 3, stub_arity); + node->InsertInput(zone(), 4, receiver); - // After: {code, target, new_target, arity, receiver, ...args}. + // After: {code, target, new_target, arity, receiver, ...args}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) { @@ -923,58 +864,25 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) { static constexpr int kReceiver = 1; static constexpr int kArgumentList = 1; - static constexpr int kMaybeFeedbackVector = 1; - - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - const int stack_argument_count = - arg_count - kArgumentList + kReceiver + kMaybeFeedbackVector; - Callable callable = Builtins::CallableFor( - isolate(), Builtin::kConstructWithArrayLike_WithFeedback); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), - kMaybeFeedbackVector); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* receiver = jsgraph()->UndefinedConstant(); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex()); - // Register argument inputs are followed by stack argument inputs (such as - // feedback_vector). Both are listed in ascending order. Note that - // the receiver is implicitly placed on the stack and is thus inserted - // between explicitly-specified register and stack arguments. - // TODO(jgruber): Implement a simpler way to specify these mutations. - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 4, slot); - node->InsertInput(zone(), 5, feedback_vector); - node->InsertInput(zone(), 6, receiver); - // After: {code, target, new_target, arguments_list, slot, vector, - // receiver}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - const int stack_argument_count = arg_count - kArgumentList + kReceiver; - Callable callable = - Builtins::CallableFor(isolate(), Builtin::kConstructWithArrayLike); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* receiver = jsgraph()->UndefinedConstant(); - node->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 4, receiver); + const int stack_argument_count = arg_count - kArgumentList + kReceiver; + Callable callable = + Builtins::CallableFor(isolate(), Builtin::kConstructWithArrayLike); + // If this fails, we might need to update the parameter reordering code + // to ensure that the additional arguments passed via stack are pushed + // between top of stack and JS arguments. + DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), stack_argument_count, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); + Node* receiver = jsgraph()->UndefinedConstant(); + node->RemoveInput(n.FeedbackVectorIndex()); + node->InsertInput(zone(), 0, stub_code); + node->InsertInput(zone(), 4, receiver); - // After: {code, target, new_target, arguments_list, receiver}. + // After: {code, target, new_target, arguments_list, receiver}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } void JSGenericLowering::LowerJSConstructWithSpread(Node* node) { @@ -986,80 +894,34 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) { static constexpr int kReceiver = 1; static constexpr int kTheSpread = 1; // Included in `arg_count`. - static constexpr int kMaybeFeedbackVector = 1; - - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - const int stack_argument_count = - arg_count + kReceiver + kMaybeFeedbackVector; - Callable callable = Builtins::CallableFor( - isolate(), Builtin::kConstructWithSpread_WithFeedback); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), - kTheSpread + kMaybeFeedbackVector); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - - // The single available register is needed for `slot`, thus `spread` remains - // on the stack here. - Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); - Node* receiver = jsgraph()->UndefinedConstant(); - Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex()); - Node* spread = node->RemoveInput(n.LastArgumentIndex()); - - // Register argument inputs are followed by stack argument inputs (such as - // feedback_vector). Both are listed in ascending order. Note that - // the receiver is implicitly placed on the stack and is thus inserted - // between explicitly-specified register and stack arguments. - // TODO(jgruber): Implement a simpler way to specify these mutations. - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 3, stub_arity); - node->InsertInput(zone(), 4, slot); - // Arguments in the stack should be inserted in reversed order, ie, the last - // arguments defined in the interface descriptor should be inserted first. - DCHECK_EQ(callable.descriptor().GetStackArgumentOrder(), - StackArgumentOrder::kJS); - node->InsertInput(zone(), 5, feedback_vector); - node->InsertInput(zone(), 6, spread); - node->InsertInput(zone(), 7, receiver); - // After: {code, target, new_target, arity, slot, vector, spread, receiver, - // ...args}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - const int stack_argument_count = arg_count + kReceiver - kTheSpread; - Callable callable = CodeFactory::ConstructWithSpread(isolate()); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); + const int stack_argument_count = arg_count + kReceiver - kTheSpread; + Callable callable = CodeFactory::ConstructWithSpread(isolate()); + // If this fails, we might need to update the parameter reordering code + // to ensure that the additional arguments passed via stack are pushed + // between top of stack and JS arguments. + DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), stack_argument_count, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); - // We pass the spread in a register, not on the stack. - Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); - Node* receiver = jsgraph()->UndefinedConstant(); - DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex()); - node->RemoveInput(n.FeedbackVectorIndex()); - Node* spread = node->RemoveInput(n.LastArgumentIndex()); + // We pass the spread in a register, not on the stack. + Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); + Node* receiver = jsgraph()->UndefinedConstant(); + DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex()); + node->RemoveInput(n.FeedbackVectorIndex()); + Node* spread = node->RemoveInput(n.LastArgumentIndex()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 3, stub_arity); - node->InsertInput(zone(), 4, spread); - node->InsertInput(zone(), 5, receiver); + node->InsertInput(zone(), 0, stub_code); + node->InsertInput(zone(), 3, stub_arity); + node->InsertInput(zone(), 4, spread); + node->InsertInput(zone(), 5, receiver); - // After: {code, target, new_target, arity, spread, receiver, ...args}. + // After: {code, target, new_target, arity, spread, receiver, ...args}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } -// TODO(jgruber,v8:8888): Should this collect feedback? void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) { CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op()); int const arg_count = static_cast<int>(p.arity() - 2); @@ -1082,34 +944,17 @@ void JSGenericLowering::LowerJSCall(Node* node) { int const arg_count = p.arity_without_implicit_args(); ConvertReceiverMode const mode = p.convert_mode(); - Node* feedback_vector = n.feedback_vector(); node->RemoveInput(n.FeedbackVectorIndex()); - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - Callable callable = CodeFactory::Call_WithFeedback(isolate(), mode); - CallDescriptor::Flags flags = FrameStateFlagForCall(node); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), arg_count + 1, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* stub_arity = jsgraph()->Int32Constant(arg_count); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 2, stub_arity); - node->InsertInput(zone(), 3, slot); - node->InsertInput(zone(), 4, feedback_vector); - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - Callable callable = CodeFactory::Call(isolate(), mode); - CallDescriptor::Flags flags = FrameStateFlagForCall(node); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), arg_count + 1, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* stub_arity = jsgraph()->Int32Constant(arg_count); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 2, stub_arity); - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + Callable callable = CodeFactory::Call(isolate(), mode); + CallDescriptor::Flags flags = FrameStateFlagForCall(node); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), arg_count + 1, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); + Node* stub_arity = jsgraph()->Int32Constant(arg_count); + node->InsertInput(zone(), 0, stub_code); + node->InsertInput(zone(), 2, stub_arity); + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) { @@ -1122,55 +967,25 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) { static constexpr int kArgumentsList = 1; static constexpr int kReceiver = 1; - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - const int stack_argument_count = arg_count - kArgumentsList + kReceiver; - Callable callable = Builtins::CallableFor( - isolate(), Builtin::kCallWithArrayLike_WithFeedback); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* receiver = n.receiver(); - Node* arguments_list = n.Argument(0); - Node* feedback_vector = n.feedback_vector(); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - - // Shuffling inputs. - // Before: {target, receiver, arguments_list, vector}. - - node->ReplaceInput(1, arguments_list); - node->ReplaceInput(2, feedback_vector); - node->ReplaceInput(3, receiver); - - // Now: {target, arguments_list, vector, receiver}. - - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 3, slot); - - // After: {code, target, arguments_list, slot, vector, receiver}. - - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - const int stack_argument_count = arg_count - kArgumentsList + kReceiver; - Callable callable = CodeFactory::CallWithArrayLike(isolate()); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* receiver = n.receiver(); - Node* arguments_list = n.Argument(0); + const int stack_argument_count = arg_count - kArgumentsList + kReceiver; + Callable callable = CodeFactory::CallWithArrayLike(isolate()); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), stack_argument_count, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); + Node* receiver = n.receiver(); + Node* arguments_list = n.Argument(0); - // Shuffling inputs. - // Before: {target, receiver, arguments_list, vector}. + // Shuffling inputs. + // Before: {target, receiver, arguments_list, vector}. - node->RemoveInput(n.FeedbackVectorIndex()); - node->InsertInput(zone(), 0, stub_code); - node->ReplaceInput(2, arguments_list); - node->ReplaceInput(3, receiver); + node->RemoveInput(n.FeedbackVectorIndex()); + node->InsertInput(zone(), 0, stub_code); + node->ReplaceInput(2, arguments_list); + node->ReplaceInput(3, receiver); - // After: {code, target, arguments_list, receiver}. + // After: {code, target, arguments_list, receiver}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } void JSGenericLowering::LowerJSCallWithSpread(Node* node) { @@ -1182,73 +997,33 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) { static constexpr int kReceiver = 1; static constexpr int kTheSpread = 1; - static constexpr int kMaybeFeedbackVector = 1; - - if (CollectFeedbackInGenericLowering() && - CollectCallAndConstructFeedback(broker()) && p.feedback().IsValid()) { - const int stack_argument_count = - arg_count - kTheSpread + kReceiver + kMaybeFeedbackVector; - Callable callable = - Builtins::CallableFor(isolate(), Builtin::kCallWithSpread_WithFeedback); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), - kMaybeFeedbackVector); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); - Node* slot = jsgraph()->UintPtrConstant(p.feedback().index()); - - // We pass the spread in a register, not on the stack. - Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); - - // Register argument inputs are followed by stack argument inputs (such as - // feedback_vector). Both are listed in ascending order. Note that - // the receiver is implicitly placed on the stack and is thus inserted - // between explicitly-specified register and stack arguments. - // TODO(jgruber): Implement a simpler way to specify these mutations. - - // Shuffling inputs. - // Before: {target, receiver, ...args, spread, vector}. - Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex()); - Node* spread = node->RemoveInput(n.LastArgumentIndex()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 2, stub_arity); - node->InsertInput(zone(), 3, spread); - node->InsertInput(zone(), 4, slot); - node->InsertInput(zone(), 5, feedback_vector); - // After: {code, target, arity, spread, slot, vector, receiver, ...args}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } else { - const int stack_argument_count = arg_count - kTheSpread + kReceiver; - Callable callable = CodeFactory::CallWithSpread(isolate()); - // If this fails, we might need to update the parameter reordering code - // to ensure that the additional arguments passed via stack are pushed - // between top of stack and JS arguments. - DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); - auto call_descriptor = Linkage::GetStubCallDescriptor( - zone(), callable.descriptor(), stack_argument_count, flags); - Node* stub_code = jsgraph()->HeapConstant(callable.code()); + const int stack_argument_count = arg_count - kTheSpread + kReceiver; + Callable callable = CodeFactory::CallWithSpread(isolate()); + // If this fails, we might need to update the parameter reordering code + // to ensure that the additional arguments passed via stack are pushed + // between top of stack and JS arguments. + DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0); + auto call_descriptor = Linkage::GetStubCallDescriptor( + zone(), callable.descriptor(), stack_argument_count, flags); + Node* stub_code = jsgraph()->HeapConstant(callable.code()); - // We pass the spread in a register, not on the stack. - Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); + // We pass the spread in a register, not on the stack. + Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread); - // Shuffling inputs. - // Before: {target, receiver, ...args, spread, vector}. + // Shuffling inputs. + // Before: {target, receiver, ...args, spread, vector}. - node->RemoveInput(n.FeedbackVectorIndex()); - Node* spread = node->RemoveInput(n.LastArgumentIndex()); + node->RemoveInput(n.FeedbackVectorIndex()); + Node* spread = node->RemoveInput(n.LastArgumentIndex()); - node->InsertInput(zone(), 0, stub_code); - node->InsertInput(zone(), 2, stub_arity); - node->InsertInput(zone(), 3, spread); + node->InsertInput(zone(), 0, stub_code); + node->InsertInput(zone(), 2, stub_arity); + node->InsertInput(zone(), 3, spread); - // After: {code, target, arity, spread, receiver, ...args}. + // After: {code, target, arity, spread, receiver, ...args}. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - } + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); } void JSGenericLowering::LowerJSCallRuntime(Node* node) { diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc index 024e4f147f..dc34bcae6d 100644 --- a/deps/v8/src/compiler/js-heap-broker.cc +++ b/deps/v8/src/compiler/js-heap-broker.cc @@ -19,6 +19,7 @@ #include "src/objects/feedback-cell.h" #include "src/objects/js-array-inl.h" #include "src/objects/literal-objects-inl.h" +#include "src/objects/map-updater.h" #include "src/objects/objects-inl.h" #include "src/objects/oddball.h" #include "src/objects/property-cell.h" @@ -54,8 +55,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, feedback_(zone()), property_access_infos_(zone()), minimorphic_property_access_infos_(zone()), - typed_array_string_tags_(zone()), - serialized_functions_(zone()) { + typed_array_string_tags_(zone()) { // Note that this initialization of {refs_} with the minimal initial capacity // is redundant in the normal use case (concurrent compilation enabled, // standard objects to be serialized), as the map is going to be replaced @@ -124,10 +124,6 @@ void JSHeapBroker::Retire() { CHECK_EQ(mode_, kSerialized); TRACE(this, "Retiring"); mode_ = kRetired; - -#ifdef DEBUG - PrintRefsAnalysis(); -#endif // DEBUG } void JSHeapBroker::SetTargetNativeContextRef( @@ -170,40 +166,6 @@ StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) { } } -bool JSHeapBroker::ShouldBeSerializedForCompilation( - const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback, - const HintsVector& arguments) const { - if (serialized_functions_.size() >= kMaxSerializedFunctionsCacheSize) { - TRACE_BROKER_MISSING(this, - "opportunity - serialized functions cache is full."); - return false; - } - SerializedFunction function{shared, feedback}; - auto matching_functions = serialized_functions_.equal_range(function); - return std::find_if(matching_functions.first, matching_functions.second, - [&arguments](const auto& entry) { - return entry.second == arguments; - }) == matching_functions.second; -} - -void JSHeapBroker::SetSerializedForCompilation( - const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback, - const HintsVector& arguments) { - SerializedFunction function{shared, feedback}; - serialized_functions_.insert({function, arguments}); - TRACE(this, "Set function " << shared << " with " << feedback - << " as serialized for compilation"); -} - -bool JSHeapBroker::IsSerializedForCompilation( - const SharedFunctionInfoRef& shared, - const FeedbackVectorRef& feedback) const { - if (mode() == kDisabled) return true; - - SerializedFunction function = {shared, feedback}; - return serialized_functions_.find(function) != serialized_functions_.end(); -} - bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const { return IsArrayOrObjectPrototype(object.object()); } @@ -285,36 +247,37 @@ ElementAccessFeedback::transition_groups() const { } ElementAccessFeedback const& ElementAccessFeedback::Refine( - ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const { + JSHeapBroker* broker, ZoneVector<MapRef> const& inferred_maps) const { ElementAccessFeedback& refined_feedback = - *zone->New<ElementAccessFeedback>(zone, keyed_mode(), slot_kind()); + *broker->zone()->New<ElementAccessFeedback>(broker->zone(), keyed_mode(), + slot_kind()); if (inferred_maps.empty()) return refined_feedback; - ZoneUnorderedSet<Handle<Map>, Handle<Map>::hash, Handle<Map>::equal_to> - inferred(zone); + ZoneRefUnorderedSet<MapRef> inferred(broker->zone()); inferred.insert(inferred_maps.begin(), inferred_maps.end()); for (auto const& group : transition_groups()) { DCHECK(!group.empty()); - TransitionGroup new_group(zone); + TransitionGroup new_group(broker->zone()); for (size_t i = 1; i < group.size(); ++i) { - Handle<Map> source = group[i]; + MapRef source = MakeRefAssumeMemoryFence(broker, *group[i]); if (inferred.find(source) != inferred.end()) { - new_group.push_back(source); + new_group.push_back(source.object()); } } - Handle<Map> target = group.front(); + MapRef target = MakeRefAssumeMemoryFence(broker, *group.front()); bool const keep_target = inferred.find(target) != inferred.end() || new_group.size() > 1; if (keep_target) { - new_group.push_back(target); + new_group.push_back(target.object()); // The target must be at the front, the order of sources doesn't matter. std::swap(new_group[0], new_group[new_group.size() - 1]); } if (!new_group.empty()) { - DCHECK(new_group.size() == 1 || new_group.front().equals(target)); + DCHECK(new_group.size() == 1 || + new_group.front().equals(target.object())); refined_feedback.transition_groups_.push_back(std::move(new_group)); } } @@ -378,8 +341,8 @@ bool GlobalAccessFeedback::immutable() const { base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const { if (IsPropertyCell()) { - bool cell_serialized = property_cell().Serialize(); - CHECK(cell_serialized); // Can't fail on the main thread. + bool cell_cached = property_cell().Cache(); + CHECK(cell_cached); // Can't fail on the main thread. return property_cell().value(); } else if (IsScriptContextSlot() && immutable()) { return script_context().get(slot_index()); @@ -468,7 +431,7 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const { MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback( NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler, - ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps) + ZoneVector<MapRef> const& maps, bool has_migration_target_maps) : ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind), name_(name), handler_(handler), @@ -478,7 +441,7 @@ MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback( } NamedAccessFeedback::NamedAccessFeedback(NameRef const& name, - ZoneVector<Handle<Map>> const& maps, + ZoneVector<MapRef> const& maps, FeedbackSlotKind slot_kind) : ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) { DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) || @@ -510,46 +473,24 @@ ProcessedFeedback const& JSHeapBroker::GetFeedback( FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind( FeedbackSource const& source) const { - if (is_concurrent_inlining_) { - ProcessedFeedback const& processed = GetFeedback(source); - return processed.slot_kind(); - } + if (HasFeedback(source)) return GetFeedback(source).slot_kind(); FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); return nexus.kind(); } bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const { - return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient() - : FeedbackNexus(source.vector, source.slot, - feedback_nexus_config()) - .IsUninitialized(); + if (HasFeedback(source)) return GetFeedback(source).IsInsufficient(); + return FeedbackNexus(source.vector, source.slot, feedback_nexus_config()) + .IsUninitialized(); } namespace { -// Update deprecated maps, drop unupdatable ones and abandoned prototype maps. -void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) { - auto in = maps->begin(); - auto out = in; - auto end = maps->end(); - - for (; in != end; ++in) { - Handle<Map> map = *in; - if (Map::TryUpdate(isolate, map).ToHandle(&map) && - !map->is_abandoned_prototype_map()) { - DCHECK(!map->is_deprecated()); - *out = map; - ++out; - } - } - - // Remove everything between the last valid map and the end of the vector. - maps->erase(out, end); -} - +using MapRefAndHandler = std::pair<MapRef, MaybeObjectHandle>; MaybeObjectHandle TryGetMinimorphicHandler( - std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind, - Handle<NativeContext> native_context, bool is_turboprop) { + ZoneVector<MapRefAndHandler> const& maps_and_handlers, + FeedbackSlotKind kind, NativeContextRef const& native_context, + bool is_turboprop) { if (!is_turboprop || !FLAG_turbo_dynamic_map_checks || !IsLoadICKind(kind)) { return MaybeObjectHandle(); } @@ -560,14 +501,14 @@ MaybeObjectHandle TryGetMinimorphicHandler( // polymorphic loads currently we don't inline the builtins even without // dynamic map checks. if (maps_and_handlers.size() == 1 && - *maps_and_handlers[0].first == - native_context->initial_array_prototype().map()) { + maps_and_handlers[0].first.equals( + native_context.initial_array_prototype().map())) { return MaybeObjectHandle(); } MaybeObjectHandle initial_handler; - for (MapAndHandler map_and_handler : maps_and_handlers) { - auto map = map_and_handler.first; + for (const MapRefAndHandler& map_and_handler : maps_and_handlers) { + MapRef map = map_and_handler.first; MaybeObjectHandle handler = map_and_handler.second; if (handler.is_null()) return MaybeObjectHandle(); DCHECK(!handler->IsCleared()); @@ -577,7 +518,7 @@ MaybeObjectHandle TryGetMinimorphicHandler( LoadHandler::Kind::kField) { return MaybeObjectHandle(); } - CHECK(!map->IsJSGlobalProxyMap()); + CHECK(!map.object()->IsJSGlobalProxyMap()); if (initial_handler.is_null()) { initial_handler = handler; } else if (!handler.is_identical_to(initial_handler)) { @@ -587,21 +528,15 @@ MaybeObjectHandle TryGetMinimorphicHandler( return initial_handler; } -bool HasMigrationTargets(const MapHandles& maps) { - for (Handle<Map> map : maps) { - if (map->is_migration_target()) return true; +bool HasMigrationTargets(const ZoneVector<MapRef>& maps) { + for (const MapRef& map : maps) { + if (map.is_migration_target()) return true; } return false; } } // namespace -bool JSHeapBroker::CanUseFeedback(const FeedbackNexus& nexus) const { - // TODO(jgruber,v8:8888): Currently, nci code does not use any - // feedback. This restriction will be relaxed in the future. - return !is_native_context_independent() && !nexus.IsUninitialized(); -} - const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback( FeedbackSlotKind kind) const { return *zone()->New<InsufficientFeedback>(kind); @@ -612,29 +547,45 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( base::Optional<NameRef> static_name) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); FeedbackSlotKind kind = nexus.kind(); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(kind); - std::vector<MapAndHandler> maps_and_handlers; - nexus.ExtractMapsAndFeedback(&maps_and_handlers); - MapHandles maps; - for (auto const& entry : maps_and_handlers) { - maps.push_back(entry.first); + ZoneVector<MapRefAndHandler> maps_and_handlers(zone()); + ZoneVector<MapRef> maps(zone()); + { + std::vector<MapAndHandler> maps_and_handlers_unfiltered; + nexus.ExtractMapsAndFeedback(&maps_and_handlers_unfiltered); + + for (const MapAndHandler& map_and_handler : maps_and_handlers_unfiltered) { + MapRef map = MakeRefAssumeMemoryFence(this, *map_and_handler.first); + // May change concurrently at any time - must be guarded by a dependency + // if non-deprecation is important. + if (map.is_deprecated()) { + // TODO(ishell): support fast map updating if we enable it. + CHECK(!FLAG_fast_map_update); + base::Optional<Map> maybe_map = MapUpdater::TryUpdateNoLock( + isolate(), *map.object(), ConcurrencyMode::kConcurrent); + if (maybe_map.has_value()) { + map = MakeRefAssumeMemoryFence(this, maybe_map.value()); + } else { + continue; // Couldn't update the deprecated map. + } + } + if (map.is_abandoned_prototype_map()) continue; + maps_and_handlers.push_back({map, map_and_handler.second}); + maps.push_back(map); + } } base::Optional<NameRef> name = static_name.has_value() ? static_name : GetNameFeedback(nexus); MaybeObjectHandle handler = TryGetMinimorphicHandler( - maps_and_handlers, kind, target_native_context().object(), - is_turboprop()); + maps_and_handlers, kind, target_native_context(), is_turboprop()); if (!handler.is_null()) { return *zone()->New<MinimorphicLoadPropertyAccessFeedback>( - *name, kind, handler.object(), - ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), + *name, kind, CanonicalPersistentHandle(handler.object()), maps, HasMigrationTargets(maps)); } - FilterRelevantReceiverMaps(isolate(), &maps); - // If no maps were found for a non-megamorphic access, then our maps died // and we should soft-deopt. if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) { @@ -644,8 +595,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( if (name.has_value()) { // We rely on this invariant in JSGenericLowering. DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC); - return *zone()->New<NamedAccessFeedback>( - *name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind); + return *zone()->New<NamedAccessFeedback>(*name, maps, kind); } else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) { return ProcessFeedbackMapsForElementAccess( maps, KeyedAccessMode::FromNexus(nexus), kind); @@ -661,58 +611,52 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess( ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess( FeedbackSource const& source) { - FeedbackNexus nexus(source.vector, source.slot); + FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof || nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof || nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy || nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) { return *zone()->New<GlobalAccessFeedback>(nexus.kind()); } - Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(), - isolate()); + Handle<Object> feedback_value = + CanonicalPersistentHandle(nexus.GetFeedback()->GetHeapObjectOrSmi()); if (feedback_value->IsSmi()) { // The wanted name belongs to a script-scope variable and the feedback // tells us where to find its value. - int number = feedback_value->Number(); + int const number = feedback_value->Number(); int const script_context_index = FeedbackNexus::ContextIndexBits::decode(number); int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number); - bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number); - Handle<Context> context = ScriptContextTable::GetContext( - isolate(), target_native_context().script_context_table().object(), - script_context_index); - { - ObjectRef contents = - MakeRef(this, handle(context->get(context_slot_index), isolate())); - CHECK(!contents.equals( - MakeRef<Object>(this, isolate()->factory()->the_hole_value()))); - } - ContextRef context_ref = MakeRef(this, context); - if (immutable) { - context_ref.get(context_slot_index); - } - return *zone()->New<GlobalAccessFeedback>(context_ref, context_slot_index, - immutable, nexus.kind()); + ContextRef context = MakeRefAssumeMemoryFence( + this, + target_native_context().script_context_table().object()->get_context( + script_context_index, kAcquireLoad)); + + base::Optional<ObjectRef> contents = context.get(context_slot_index); + if (contents.has_value()) CHECK(!contents->IsTheHole()); + + return *zone()->New<GlobalAccessFeedback>( + context, context_slot_index, + FeedbackNexus::ImmutabilityBit::decode(number), nexus.kind()); } CHECK(feedback_value->IsPropertyCell()); // The wanted name belongs (or did belong) to a property on the global // object and the feedback is the cell holding its value. - PropertyCellRef cell = - MakeRef(this, Handle<PropertyCell>::cast(feedback_value)); - MakeRef(this, - Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad)); - return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind()); + return *zone()->New<GlobalAccessFeedback>( + MakeRefAssumeMemoryFence(this, + Handle<PropertyCell>::cast(feedback_value)), + nexus.kind()); } ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation( FeedbackSource const& source) const { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); BinaryOperationHint hint = nexus.GetBinaryOperationFeedback(); DCHECK_NE(hint, BinaryOperationHint::kNone); // Not uninitialized. return *zone()->New<BinaryOperationFeedback>(hint, nexus.kind()); @@ -721,7 +665,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation( ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation( FeedbackSource const& source) const { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); CompareOperationHint hint = nexus.GetCompareOperationFeedback(); DCHECK_NE(hint, CompareOperationHint::kNone); // Not uninitialized. return *zone()->New<CompareOperationFeedback>(hint, nexus.kind()); @@ -730,7 +674,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation( ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn( FeedbackSource const& source) const { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); ForInHint hint = nexus.GetForInFeedback(); DCHECK_NE(hint, ForInHint::kNone); // Not uninitialized. return *zone()->New<ForInFeedback>(hint, nexus.kind()); @@ -739,14 +683,14 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn( ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); base::Optional<JSObjectRef> optional_constructor; { MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback(); Handle<JSObject> constructor; if (maybe_constructor.ToHandle(&constructor)) { - optional_constructor = MakeRef(this, constructor); + optional_constructor = MakeRefAssumeMemoryFence(this, *constructor); } } return *zone()->New<InstanceOfFeedback>(optional_constructor, nexus.kind()); @@ -755,63 +699,67 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf( ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); HeapObject object; if (!nexus.GetFeedback()->GetHeapObject(&object)) { return NewInsufficientFeedback(nexus.kind()); } - AllocationSiteRef site = MakeRef(this, AllocationSite::cast(object)); - if (site.PointsToLiteral()) site.SerializeRecursive(); + AllocationSiteRef site = + MakeRefAssumeMemoryFence(this, AllocationSite::cast(object)); + if (!is_concurrent_inlining() && site.PointsToLiteral()) { + site.SerializeRecursive(NotConcurrentInliningTag{this}); + } return *zone()->New<LiteralFeedback>(site, nexus.kind()); } ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); HeapObject object; if (!nexus.GetFeedback()->GetHeapObject(&object)) { return NewInsufficientFeedback(nexus.kind()); } - RegExpBoilerplateDescriptionRef boilerplate = MakeRef( - this, handle(RegExpBoilerplateDescription::cast(object), isolate())); - boilerplate.Serialize(); + RegExpBoilerplateDescriptionRef boilerplate = MakeRefAssumeMemoryFence( + this, RegExpBoilerplateDescription::cast(object)); + if (!is_concurrent_inlining()) { + boilerplate.Serialize(NotConcurrentInliningTag{this}); + } return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind()); } ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); HeapObject object; if (!nexus.GetFeedback()->GetHeapObject(&object)) { return NewInsufficientFeedback(nexus.kind()); } - JSArrayRef array = MakeRef(this, handle(JSArray::cast(object), isolate())); + JSArrayRef array = MakeRefAssumeMemoryFence(this, JSArray::cast(object)); return *zone()->New<TemplateObjectFeedback>(array, nexus.kind()); } ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( FeedbackSource const& source) { FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config()); - if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind()); + if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind()); base::Optional<HeapObjectRef> target_ref; { MaybeObject maybe_target = nexus.GetFeedback(); HeapObject target_object; if (maybe_target->GetHeapObject(&target_object)) { - // TryMakeRef is used because the GC predicate may fail if the - // JSFunction was allocated too recently to be store-ordered. - target_ref = TryMakeRef(this, handle(target_object, isolate())); + target_ref = MakeRefAssumeMemoryFence(this, target_object); } } + float frequency = nexus.ComputeCallFrequency(); SpeculationMode mode = nexus.GetSpeculationMode(); CallFeedbackContent content = nexus.GetCallFeedbackContent(); @@ -821,9 +769,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( FeedbackSource const& source) { - ProcessedFeedback const& feedback = - is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForBinaryOperation(source); + ProcessedFeedback const& feedback = ProcessFeedbackForBinaryOperation(source); return feedback.IsInsufficient() ? BinaryOperationHint::kNone : feedback.AsBinaryOperation().value(); } @@ -831,67 +777,19 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation( FeedbackSource const& source) { ProcessedFeedback const& feedback = - is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForCompareOperation(source); + ProcessFeedbackForCompareOperation(source); return feedback.IsInsufficient() ? CompareOperationHint::kNone : feedback.AsCompareOperation().value(); } ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { - ProcessedFeedback const& feedback = is_concurrent_inlining_ - ? GetFeedback(source) - : ProcessFeedbackForForIn(source); + ProcessedFeedback const& feedback = ProcessFeedbackForForIn(source); return feedback.IsInsufficient() ? ForInHint::kNone : feedback.AsForIn().value(); } -ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess( - FeedbackSource const& source, AccessMode mode, - base::Optional<NameRef> static_name) { - return is_concurrent_inlining_ - ? GetFeedback(source) - : ProcessFeedbackForPropertyAccess(source, mode, static_name); -} - -ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf( - FeedbackSource const& source) { - return is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForInstanceOf(source); -} - -ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall( - FeedbackSource const& source) { - return is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForCall(source); -} - -ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( - FeedbackSource const& source) { - return is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForGlobalAccess(source); -} - ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral( FeedbackSource const& source) { - return is_concurrent_inlining_ - ? GetFeedback(source) - : ProcessFeedbackForArrayOrObjectLiteral(source); -} - -ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral( - FeedbackSource const& source) { - return is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForRegExpLiteral(source); -} - -ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject( - FeedbackSource const& source) { - return is_concurrent_inlining_ ? GetFeedback(source) - : ProcessFeedbackForTemplateObject(source); -} - -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral( - FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForArrayOrObjectLiteral(source); @@ -899,7 +797,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source); @@ -907,7 +805,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source); @@ -939,7 +837,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess( FeedbackSource const& source, AccessMode mode, base::Optional<NameRef> static_name) { if (HasFeedback(source)) return GetFeedback(source); @@ -949,7 +847,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source); @@ -957,7 +855,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForCall(source); @@ -965,7 +863,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall( return feedback; } -ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess( +ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( FeedbackSource const& source) { if (HasFeedback(source)) return GetFeedback(source); ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source); @@ -974,21 +872,22 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess( } ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess( - MapHandles const& maps, KeyedAccessMode const& keyed_mode, + ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode, FeedbackSlotKind slot_kind) { DCHECK(!maps.empty()); // Collect possible transition targets. MapHandles possible_transition_targets; possible_transition_targets.reserve(maps.size()); - for (Handle<Map> map : maps) { - MapRef map_ref = MakeRef(this, map); - map_ref.SerializeRootMap(); - - if (CanInlineElementAccess(map_ref) && - IsFastElementsKind(map->elements_kind()) && - GetInitialFastElementsKind() != map->elements_kind()) { - possible_transition_targets.push_back(map); + for (MapRef& map : maps) { + if (!is_concurrent_inlining()) { + map.SerializeRootMap(NotConcurrentInliningTag{this}); + } + + if (CanInlineElementAccess(map) && + IsFastElementsKind(map.elements_kind()) && + GetInitialFastElementsKind() != map.elements_kind()) { + possible_transition_targets.push_back(map.object()); } } @@ -1001,21 +900,28 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess( ZoneMap<Handle<Map>, TransitionGroup, HandleLess> transition_groups(zone()); // Separate the actual receiver maps and the possible transition sources. - for (Handle<Map> map : maps) { + for (const MapRef& map : maps) { + Map transition_target; + // Don't generate elements kind transitions from stable maps. - Map transition_target = map->is_stable() - ? Map() - : map->FindElementsKindTransitionedMap( - isolate(), possible_transition_targets); + if (!map.is_stable()) { + // The lock is needed for UnusedPropertyFields (called deep inside + // FindElementsKindTransitionedMap). + MapUpdaterGuardIfNeeded mumd_scope(this); + + transition_target = map.object()->FindElementsKindTransitionedMap( + isolate(), possible_transition_targets, ConcurrencyMode::kConcurrent); + } + if (transition_target.is_null()) { - TransitionGroup group(1, map, zone()); - transition_groups.insert({map, group}); + TransitionGroup group(1, map.object(), zone()); + transition_groups.insert({map.object(), group}); } else { - Handle<Map> target(transition_target, isolate()); + Handle<Map> target = CanonicalPersistentHandle(transition_target); TransitionGroup new_group(1, target, zone()); TransitionGroup& actual_group = transition_groups.insert({target, new_group}).first->second; - actual_group.push_back(map); + actual_group.push_back(map.object()); } } @@ -1052,31 +958,22 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback( FeedbackNexus const& nexus) { Name raw_name = nexus.GetName(); if (raw_name.is_null()) return base::nullopt; - return MakeRef(this, handle(raw_name, isolate())); + return MakeRefAssumeMemoryFence(this, raw_name); } PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo( MapRef map, NameRef name, AccessMode access_mode, - CompilationDependencies* dependencies, SerializationPolicy policy) { + CompilationDependencies* dependencies) { + DCHECK_NOT_NULL(dependencies); + PropertyAccessTarget target({map, name, access_mode}); auto it = property_access_infos_.find(target); if (it != property_access_infos_.end()) return it->second; - if (policy == SerializationPolicy::kAssumeSerialized && - !FLAG_turbo_concurrent_get_property_access_info) { - TRACE_BROKER_MISSING(this, "PropertyAccessInfo for " - << access_mode << " of property " << name - << " on map " << map); - return PropertyAccessInfo::Invalid(zone()); - } - - CHECK_NOT_NULL(dependencies); AccessInfoFactory factory(this, dependencies, zone()); - PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo( - map.object(), name.object(), access_mode); + PropertyAccessInfo access_info = + factory.ComputePropertyAccessInfo(map, name, access_mode); if (is_concurrent_inlining_) { - CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info, - mode() == kSerializing); TRACE(this, "Storing PropertyAccessInfo for " << access_mode << " of property " << name << " on map " << map); @@ -1087,17 +984,10 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo( MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo( MinimorphicLoadPropertyAccessFeedback const& feedback, - FeedbackSource const& source, SerializationPolicy policy) { + FeedbackSource const& source) { auto it = minimorphic_property_access_infos_.find(source); if (it != minimorphic_property_access_infos_.end()) return it->second; - if (policy == SerializationPolicy::kAssumeSerialized) { - TRACE_BROKER_MISSING(this, "MinimorphicLoadPropertyAccessInfo for slot " - << source.index() << " " - << MakeRef<Object>(this, source.vector)); - return MinimorphicLoadPropertyAccessInfo::Invalid(); - } - AccessInfoFactory factory(this, nullptr, zone()); MinimorphicLoadPropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(feedback); diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h index cff68af67a..91b94bebb5 100644 --- a/deps/v8/src/compiler/js-heap-broker.h +++ b/deps/v8/src/compiler/js-heap-broker.h @@ -15,7 +15,6 @@ #include "src/compiler/heap-refs.h" #include "src/compiler/processed-feedback.h" #include "src/compiler/refs-map.h" -#include "src/compiler/serializer-hints.h" #include "src/execution/local-isolate.h" #include "src/handles/handles.h" #include "src/handles/persistent-handles.h" @@ -119,23 +118,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker { bool tracing_enabled() const { return tracing_enabled_; } bool is_concurrent_inlining() const { return is_concurrent_inlining_; } bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; } - bool is_native_context_independent() const { - // TODO(jgruber,v8:8888): Remove dependent code. - return false; - } - bool generate_full_feedback_collection() const { - // NCI code currently collects full feedback. - DCHECK_IMPLIES(is_native_context_independent(), - CollectFeedbackInGenericLowering()); - return is_native_context_independent(); - } bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; } NexusConfig feedback_nexus_config() const { - // TODO(mvstanton): when the broker gathers feedback on the background - // thread, this should return a local NexusConfig object which points - // to the associated LocalHeap. - return NexusConfig::FromMainThread(isolate()); + return IsMainThread() ? NexusConfig::FromMainThread(isolate()) + : NexusConfig::FromBackgroundThread( + isolate(), local_isolate()->heap()); } enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired }; @@ -183,12 +171,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker { bool HasFeedback(FeedbackSource const& source) const; void SetFeedback(FeedbackSource const& source, ProcessedFeedback const* feedback); - ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const; FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const; // TODO(neis): Move these into serializer when we're always in the background. ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess( - MapHandles const& maps, KeyedAccessMode const& keyed_mode, + ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode, FeedbackSlotKind slot_kind); // Binary, comparison and for-in hints can be fully expressed via @@ -216,71 +203,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ProcessedFeedback const& ProcessFeedbackForBinaryOperation( FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForCall(FeedbackSource const& source); ProcessedFeedback const& ProcessFeedbackForCompareOperation( FeedbackSource const& source); ProcessedFeedback const& ProcessFeedbackForForIn( FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForGlobalAccess( - FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForInstanceOf( - FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForPropertyAccess( - FeedbackSource const& source, AccessMode mode, - base::Optional<NameRef> static_name); - ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral( - FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForRegExpLiteral( - FeedbackSource const& source); - ProcessedFeedback const& ProcessFeedbackForTemplateObject( - FeedbackSource const& source); bool FeedbackIsInsufficient(FeedbackSource const& source) const; base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus); - // If {policy} is {kAssumeSerialized} and the broker doesn't know about the - // combination of {map}, {name}, and {access_mode}, returns Invalid. PropertyAccessInfo GetPropertyAccessInfo( MapRef map, NameRef name, AccessMode access_mode, - CompilationDependencies* dependencies = nullptr, - SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); + CompilationDependencies* dependencies); MinimorphicLoadPropertyAccessInfo GetPropertyAccessInfo( MinimorphicLoadPropertyAccessFeedback const& feedback, - FeedbackSource const& source, - SerializationPolicy policy = SerializationPolicy::kAssumeSerialized); - - // Used to separate the problem of a concurrent GetPropertyAccessInfo (GPAI) - // from serialization. GPAI is currently called both during the serialization - // phase, and on the background thread. While some crucial objects (like - // JSObject) still must be serialized, we do the following: - // - Run GPAI during serialization to discover and serialize required objects. - // - After the serialization phase, clear cached property access infos. - // - On the background thread, rerun GPAI in a concurrent setting. The cache - // has been cleared, thus the actual logic runs again. - // Once all required object kinds no longer require serialization, this - // should be removed together with all GPAI calls during serialization. - void ClearCachedPropertyAccessInfos() { - CHECK(FLAG_turbo_concurrent_get_property_access_info); - property_access_infos_.clear(); - } - - // As above, clear cached ObjectData that can be reconstructed, i.e. is - // either never-serialized or background-serialized. - void ClearReconstructibleData(); + FeedbackSource const& source); StringRef GetTypedArrayStringTag(ElementsKind kind); - bool ShouldBeSerializedForCompilation(const SharedFunctionInfoRef& shared, - const FeedbackVectorRef& feedback, - const HintsVector& arguments) const; - void SetSerializedForCompilation(const SharedFunctionInfoRef& shared, - const FeedbackVectorRef& feedback, - const HintsVector& arguments); - bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared, - const FeedbackVectorRef& feedback) const; - bool IsMainThread() const { return local_isolate() == nullptr || local_isolate()->is_main_thread(); } @@ -404,13 +345,23 @@ class V8_EXPORT_PRIVATE JSHeapBroker { bool ObjectMayBeUninitialized(Object object) const; bool ObjectMayBeUninitialized(HeapObject object) const; + void set_dependencies(CompilationDependencies* dependencies) { + DCHECK_NOT_NULL(dependencies); + DCHECK_NULL(dependencies_); + dependencies_ = dependencies; + } + CompilationDependencies* dependencies() const { + DCHECK_NOT_NULL(dependencies_); + return dependencies_; + } + private: friend class HeapObjectRef; friend class ObjectRef; friend class ObjectData; friend class PropertyCellData; - bool CanUseFeedback(const FeedbackNexus& nexus) const; + ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const; const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const; // Bottleneck FeedbackNexus access here, for storage in the broker @@ -497,21 +448,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ZoneVector<ObjectData*> typed_array_string_tags_; - struct SerializedFunction { - SharedFunctionInfoRef shared; - FeedbackVectorRef feedback; - - bool operator<(const SerializedFunction& other) const { - if (shared.object().address() < other.shared.object().address()) { - return true; - } - if (shared.object().address() == other.shared.object().address()) { - return feedback.object().address() < other.feedback.object().address(); - } - return false; - } - }; - ZoneMultimap<SerializedFunction, HintsVector> serialized_functions_; + CompilationDependencies* dependencies_ = nullptr; // The MapUpdater mutex is used in recursive patterns; for example, // ComputePropertyAccessInfo may call itself recursively. Thus we need to diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc index 7c5585a4bc..5692d128a7 100644 --- a/deps/v8/src/compiler/js-heap-copy-reducer.cc +++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc @@ -27,26 +27,16 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; } Reduction JSHeapCopyReducer::Reduce(Node* node) { switch (node->opcode()) { - case IrOpcode::kCheckClosure: { - FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(node->op())); - base::Optional<FeedbackVectorRef> feedback_vector = cell.value(); - if (feedback_vector.has_value()) { - feedback_vector->Serialize(); - } - break; - } case IrOpcode::kHeapConstant: { ObjectRef object = MakeRef(broker(), HeapConstantOf(node->op())); - if (object.IsJSFunction()) object.AsJSFunction().Serialize(); if (object.IsJSObject()) { - object.AsJSObject().SerializeObjectCreateMap(); + object.AsJSObject().SerializeObjectCreateMap( + NotConcurrentInliningTag{broker()}); } break; } case IrOpcode::kJSCreateArray: { - CreateArrayParameters const& p = CreateArrayParametersOf(node->op()); - Handle<AllocationSite> site; - if (p.site().ToHandle(&site)) MakeRef(broker(), site); + CreateArrayParametersOf(node->op()).site(broker()); break; } case IrOpcode::kJSCreateArguments: { @@ -56,29 +46,29 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { break; } case IrOpcode::kJSCreateBlockContext: { - MakeRef(broker(), ScopeInfoOf(node->op())); + USE(ScopeInfoOf(broker(), node->op())); break; } case IrOpcode::kJSCreateBoundFunction: { CreateBoundFunctionParameters const& p = CreateBoundFunctionParametersOf(node->op()); - MakeRef(broker(), p.map()); + p.map(broker()); break; } case IrOpcode::kJSCreateCatchContext: { - MakeRef(broker(), ScopeInfoOf(node->op())); + USE(ScopeInfoOf(broker(), node->op())); break; } case IrOpcode::kJSCreateClosure: { CreateClosureParameters const& p = CreateClosureParametersOf(node->op()); - MakeRef(broker(), p.shared_info()); - MakeRef(broker(), p.code()); + p.shared_info(broker()); + p.code(broker()); break; } case IrOpcode::kJSCreateEmptyLiteralArray: { FeedbackParameter const& p = FeedbackParameterOf(node->op()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback()); + broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback()); } break; } @@ -90,7 +80,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { FeedbackParameter const& p = FeedbackParameterOf(node->op()); if (p.feedback().IsValid()) { // Unary ops are treated as binary ops with respect to feedback. - broker()->ProcessFeedbackForBinaryOperation(p.feedback()); + broker()->GetFeedbackForBinaryOperation(p.feedback()); } break; } @@ -109,7 +99,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { case IrOpcode::kJSShiftRightLogical: { FeedbackParameter const& p = FeedbackParameterOf(node->op()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForBinaryOperation(p.feedback()); + broker()->GetFeedbackForBinaryOperation(p.feedback()); } break; } @@ -122,64 +112,64 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { case IrOpcode::kJSStrictEqual: { FeedbackParameter const& p = FeedbackParameterOf(node->op()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForCompareOperation(p.feedback()); + broker()->GetFeedbackForCompareOperation(p.feedback()); } break; } case IrOpcode::kJSCreateFunctionContext: { CreateFunctionContextParameters const& p = CreateFunctionContextParametersOf(node->op()); - MakeRef(broker(), p.scope_info()); + p.scope_info(broker()); break; } case IrOpcode::kJSCreateLiteralArray: case IrOpcode::kJSCreateLiteralObject: { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback()); + broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback()); } break; } case IrOpcode::kJSCreateLiteralRegExp: { CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForRegExpLiteral(p.feedback()); + broker()->GetFeedbackForRegExpLiteral(p.feedback()); } break; } case IrOpcode::kJSGetTemplateObject: { GetTemplateObjectParameters const& p = GetTemplateObjectParametersOf(node->op()); - MakeRef(broker(), p.shared()); - MakeRef(broker(), p.description()); - broker()->ProcessFeedbackForTemplateObject(p.feedback()); + p.shared(broker()); + p.description(broker()); + broker()->GetFeedbackForTemplateObject(p.feedback()); break; } case IrOpcode::kJSCreateWithContext: { - MakeRef(broker(), ScopeInfoOf(node->op())); + USE(ScopeInfoOf(broker(), node->op())); break; } case IrOpcode::kJSLoadNamed: { NamedAccess const& p = NamedAccessOf(node->op()); - NameRef name = MakeRef(broker(), p.name()); + NameRef name = p.name(broker()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForPropertyAccess(p.feedback(), - AccessMode::kLoad, name); + broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad, + name); } break; } case IrOpcode::kJSLoadNamedFromSuper: { NamedAccess const& p = NamedAccessOf(node->op()); - NameRef name = MakeRef(broker(), p.name()); + NameRef name = p.name(broker()); if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForPropertyAccess(p.feedback(), - AccessMode::kLoad, name); + broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad, + name); } break; } case IrOpcode::kJSStoreNamed: { NamedAccess const& p = NamedAccessOf(node->op()); - MakeRef(broker(), p.name()); + p.name(broker()); break; } case IrOpcode::kStoreField: @@ -220,8 +210,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { PropertyAccess const& p = PropertyAccessOf(node->op()); AccessMode access_mode = AccessMode::kLoad; if (p.feedback().IsValid()) { - broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode, - base::nullopt); + broker()->GetFeedbackForPropertyAccess(p.feedback(), access_mode, + base::nullopt); } break; } diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc index 8449a0b3d5..177f35c7a0 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.cc +++ b/deps/v8/src/compiler/js-inlining-heuristic.cc @@ -37,34 +37,21 @@ bool CanConsiderForInlining(JSHeapBroker* broker, } DCHECK(shared.HasBytecodeArray()); - if (!broker->IsSerializedForCompilation(shared, feedback_vector)) { - TRACE_BROKER_MISSING( - broker, "data for " << shared << " (not serialized for compilation)"); - TRACE("Cannot consider " << shared << " for inlining with " - << feedback_vector << " (missing data)"); - return false; - } TRACE("Considering " << shared << " for inlining with " << feedback_vector); return true; } bool CanConsiderForInlining(JSHeapBroker* broker, JSFunctionRef const& function) { - if (!function.has_feedback_vector()) { + if (!function.has_feedback_vector(broker->dependencies())) { TRACE("Cannot consider " << function << " for inlining (no feedback vector)"); return false; } - if (!function.serialized() || !function.serialized_code_and_feedback()) { - TRACE_BROKER_MISSING( - broker, "data for " << function << " (cannot consider for inlining)"); - TRACE("Cannot consider " << function << " for inlining (missing data)"); - return false; - } - - return CanConsiderForInlining(broker, function.shared(), - function.feedback_vector()); + return CanConsiderForInlining( + broker, function.shared(), + function.feedback_vector(broker->dependencies())); } } // namespace @@ -124,7 +111,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( JSCreateClosureNode n(callee); CreateClosureParameters const& p = n.Parameters(); FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker()); - SharedFunctionInfoRef shared_info = MakeRef(broker(), p.shared_info()); + SharedFunctionInfoRef shared_info = p.shared_info(broker()); out.shared_info = shared_info; if (feedback_cell.value().has_value() && CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) { @@ -819,6 +806,10 @@ void JSInliningHeuristic::PrintCandidates() { Graph* JSInliningHeuristic::graph() const { return jsgraph()->graph(); } +CompilationDependencies* JSInliningHeuristic::dependencies() const { + return broker()->dependencies(); +} + CommonOperatorBuilder* JSInliningHeuristic::common() const { return jsgraph()->common(); } diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h index 848efd2f57..af8e913a47 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.h +++ b/deps/v8/src/compiler/js-inlining-heuristic.h @@ -100,6 +100,7 @@ class JSInliningHeuristic final : public AdvancedReducer { JSGraph* jsgraph() const { return jsgraph_; } // TODO(neis): Make heap broker a component of JSGraph? JSHeapBroker* broker() const { return broker_; } + CompilationDependencies* dependencies() const; Isolate* isolate() const { return jsgraph_->isolate(); } SimplifiedOperatorBuilder* simplified() const; Mode mode() const { return mode_; } diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index 150e409651..a17a43ecd2 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget( JSFunctionRef function = match.Ref(broker()).AsJSFunction(); // The function might have not been called yet. - if (!function.has_feedback_vector()) { + if (!function.has_feedback_vector(broker()->dependencies())) { return base::nullopt; } @@ -355,11 +355,11 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node, if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) { JSFunctionRef function = match.Ref(broker()).AsJSFunction(); // This was already ensured by DetermineCallTarget - CHECK(function.has_feedback_vector()); + CHECK(function.has_feedback_vector(broker()->dependencies())); // The inlinee specializes to the context from the JSFunction object. *context_out = jsgraph()->Constant(function.context()); - return function.raw_feedback_cell(); + return function.raw_feedback_cell(broker()->dependencies()); } if (match.IsJSCreateClosure()) { @@ -520,20 +520,29 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // always hold true. CHECK(shared_info->is_compiled()); - if (!broker()->is_concurrent_inlining() && info_->source_positions()) { - SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), - shared_info->object()); + if (info_->source_positions()) { + if (broker()->is_concurrent_inlining()) { + if (!shared_info->object()->AreSourcePositionsAvailable( + broker()->local_isolate_or_isolate())) { + // This case is expected to be very rare, since we generate source + // positions for all functions when debugging or profiling are turned + // on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source + // positions should only be missing here if there is a race between 1) + // enabling/disabling the debugger/profiler, and 2) this compile job. + // In that case, we simply don't inline. + TRACE("Not inlining " << *shared_info << " into " << outer_shared_info + << " because source positions are missing."); + return NoChange(); + } + } else { + SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), + shared_info->object()); + } } // Determine the target's feedback vector and its context. Node* context; FeedbackCellRef feedback_cell = DetermineCallContext(node, &context); - if (!broker()->IsSerializedForCompilation(*shared_info, - *feedback_cell.value())) { - TRACE("Not inlining " << *shared_info << " into " << outer_shared_info - << " because it wasn't serialized for compilation."); - return NoChange(); - } TRACE("Inlining " << *shared_info << " into " << outer_shared_info << ((exception_target != nullptr) ? " (inside try-block)" @@ -683,7 +692,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // passed into this node has to be the callees context (loaded above). if (node->opcode() == IrOpcode::kJSCall && is_sloppy(shared_info->language_mode()) && !shared_info->native()) { - Node* effect = NodeProperties::GetEffectInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) { CallParameters const& p = CallParametersOf(node->op()); Node* global_proxy = jsgraph()->Constant( diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 30cab3ae26..e03e0d41a3 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -35,19 +35,16 @@ namespace compiler { namespace { -bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) { - for (auto map : maps) { - MapRef map_ref = MakeRef(broker, map); - if (map_ref.IsHeapNumberMap()) return true; +bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) { + for (MapRef map : maps) { + if (map.IsHeapNumberMap()) return true; } return false; } -bool HasOnlyJSArrayMaps(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps) { - for (auto map : maps) { - MapRef map_ref = MakeRef(broker, map); - if (!map_ref.IsJSArrayMap()) return false; +bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) { + for (MapRef map : maps) { + if (!map.IsJSArrayMap()) return false; } return true; } @@ -393,42 +390,30 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { // Check if the right hand side is a known {receiver}, or // we have feedback from the InstanceOfIC. - Handle<JSObject> receiver; + base::Optional<JSObjectRef> receiver; HeapObjectMatcher m(constructor); if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) { - receiver = m.Ref(broker()).AsJSObject().object(); + receiver = m.Ref(broker()).AsJSObject(); } else if (p.feedback().IsValid()) { ProcessedFeedback const& feedback = broker()->GetFeedbackForInstanceOf(FeedbackSource(p.feedback())); if (feedback.IsInsufficient()) return NoChange(); - base::Optional<JSObjectRef> maybe_receiver = - feedback.AsInstanceOf().value(); - if (!maybe_receiver.has_value()) return NoChange(); - receiver = maybe_receiver->object(); + receiver = feedback.AsInstanceOf().value(); } else { return NoChange(); } - JSObjectRef receiver_ref = MakeRef(broker(), receiver); - MapRef receiver_map = receiver_ref.map(); + if (!receiver.has_value()) return NoChange(); - PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone()); - if (broker()->is_concurrent_inlining()) { - access_info = broker()->GetPropertyAccessInfo( - receiver_map, - MakeRef(broker(), isolate()->factory()->has_instance_symbol()), - AccessMode::kLoad, dependencies()); - } else { - AccessInfoFactory access_info_factory(broker(), dependencies(), - graph()->zone()); - access_info = access_info_factory.ComputePropertyAccessInfo( - receiver_map.object(), factory()->has_instance_symbol(), - AccessMode::kLoad); - } + MapRef receiver_map = receiver->map(); + NameRef name = MakeRef(broker(), isolate()->factory()->has_instance_symbol()); + PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( + receiver_map, name, AccessMode::kLoad, dependencies()); // TODO(v8:11457) Support dictionary mode holders here. - if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) + if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) { return NoChange(); + } access_info.RecordDependencies(dependencies()); PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies()); @@ -456,26 +441,26 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { } if (access_info.IsFastDataConstant()) { - Handle<JSObject> holder; - bool found_on_proto = access_info.holder().ToHandle(&holder); - JSObjectRef holder_ref = - found_on_proto ? MakeRef(broker(), holder) : receiver_ref; + base::Optional<JSObjectRef> holder = access_info.holder(); + bool found_on_proto = holder.has_value(); + JSObjectRef holder_ref = found_on_proto ? holder.value() : receiver.value(); base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty( access_info.field_representation(), access_info.field_index(), dependencies()); if (!constant.has_value() || !constant->IsHeapObject() || - !constant->AsHeapObject().map().is_callable()) + !constant->AsHeapObject().map().is_callable()) { return NoChange(); + } if (found_on_proto) { dependencies()->DependOnStablePrototypeChains( access_info.lookup_start_object_maps(), kStartAtPrototype, - MakeRef(broker(), holder)); + holder.value()); } // Check that {constructor} is actually {receiver}. - constructor = - access_builder.BuildCheckValue(constructor, &effect, control, receiver); + constructor = access_builder.BuildCheckValue(constructor, &effect, control, + receiver->object()); // Monomorphic property access. access_builder.BuildCheckMaps(constructor, &effect, control, @@ -526,19 +511,21 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { JSNativeContextSpecialization::InferHasInPrototypeChainResult JSNativeContextSpecialization::InferHasInPrototypeChain( - Node* receiver, Node* effect, HeapObjectRef const& prototype) { - ZoneHandleSet<Map> receiver_maps; + Node* receiver, Effect effect, HeapObjectRef const& prototype) { + ZoneRefUnorderedSet<MapRef> receiver_maps(zone()); NodeProperties::InferMapsResult result = NodeProperties::InferMapsUnsafe( broker(), receiver, effect, &receiver_maps); if (result == NodeProperties::kNoMaps) return kMayBeInPrototypeChain; + ZoneVector<MapRef> receiver_map_refs(zone()); + // Try to determine either that all of the {receiver_maps} have the given // {prototype} in their chain, or that none do. If we can't tell, return // kMayBeInPrototypeChain. bool all = true; bool none = true; - for (size_t i = 0; i < receiver_maps.size(); ++i) { - MapRef map = MakeRef(broker(), receiver_maps[i]); + for (MapRef map : receiver_maps) { + receiver_map_refs.push_back(map); if (result == NodeProperties::kUnreliableMaps && !map.is_stable()) { return kMayBeInPrototypeChain; } @@ -558,8 +545,9 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( } map = map_prototype->map(); // TODO(v8:11457) Support dictionary mode protoypes here. - if (!map.is_stable() || map.is_dictionary_map()) + if (!map.is_stable() || map.is_dictionary_map()) { return kMayBeInPrototypeChain; + } if (map.oddball_type() == OddballType::kNull) { all = false; break; @@ -584,7 +572,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( WhereToStart start = result == NodeProperties::kUnreliableMaps ? kStartAtReceiver : kStartAtPrototype; - dependencies()->DependOnStablePrototypeChains(receiver_maps, start, + dependencies()->DependOnStablePrototypeChains(receiver_map_refs, start, last_prototype); } @@ -597,7 +585,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain( DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode()); Node* value = NodeProperties::GetValueInput(node, 0); Node* prototype = NodeProperties::GetValueInput(node, 1); - Node* effect = NodeProperties::GetEffectInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; // Check if we can constant-fold the prototype chain walk // for the given {value} and the {prototype}. @@ -649,12 +637,12 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( // Optimize if we currently know the "prototype" property. JSFunctionRef function = m.Ref(broker()).AsJSFunction(); - if (!function.serialized()) return NoChange(); // TODO(neis): Remove the has_prototype_slot condition once the broker is // always enabled. - if (!function.map().has_prototype_slot() || !function.has_prototype() || - function.PrototypeRequiresRuntimeLookup()) { + if (!function.map().has_prototype_slot() || + !function.has_instance_prototype(dependencies()) || + function.PrototypeRequiresRuntimeLookup(dependencies())) { return NoChange(); } @@ -677,9 +665,9 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) { Node* constructor = NodeProperties::GetValueInput(node, 0); Node* value = NodeProperties::GetValueInput(node, 1); Node* context = NodeProperties::GetContextInput(node); - Node* frame_state = NodeProperties::GetFrameStateInput(node); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + FrameState frame_state{NodeProperties::GetFrameStateInput(node)}; + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; // Check if the {constructor} is the %Promise% function. HeapObjectMatcher m(constructor); @@ -712,38 +700,32 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { Node* promise = NodeProperties::GetValueInput(node, 0); Node* resolution = NodeProperties::GetValueInput(node, 1); Node* context = NodeProperties::GetContextInput(node); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; // Check if we know something about the {resolution}. MapInference inference(broker(), resolution, effect); if (!inference.HaveMaps()) return NoChange(); - MapHandles const& resolution_maps = inference.GetMaps(); + ZoneVector<MapRef> const& resolution_maps = inference.GetMaps(); // Compute property access info for "then" on {resolution}. ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - if (!broker()->is_concurrent_inlining()) { - access_info_factory.ComputePropertyAccessInfos( - resolution_maps, factory()->then_string(), AccessMode::kLoad, - &access_infos); - } else { - // Obtain pre-computed access infos from the broker. - for (auto map : resolution_maps) { - MapRef map_ref = MakeRef(broker(), map); - access_infos.push_back(broker()->GetPropertyAccessInfo( - map_ref, MakeRef(broker(), isolate()->factory()->then_string()), - AccessMode::kLoad, dependencies())); - } + + for (const MapRef& map : resolution_maps) { + access_infos.push_back(broker()->GetPropertyAccessInfo( + map, MakeRef(broker(), isolate()->factory()->then_string()), + AccessMode::kLoad, dependencies())); } PropertyAccessInfo access_info = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); // TODO(v8:11457) Support dictionary mode prototypes here. - if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) + if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) { return inference.NoChange(); + } // Only optimize when {resolution} definitely doesn't have a "then" property. if (!access_info.IsNotFound()) return inference.NoChange(); @@ -793,7 +775,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( Node* node, Node* lookup_start_object, Node* receiver, Node* value, NameRef const& name, AccessMode access_mode, Node* key, PropertyCellRef const& property_cell, Node* effect) { - if (!property_cell.Serialize()) { + if (!property_cell.Cache()) { TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell); return NoChange(); } @@ -1016,9 +998,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { ReplaceWithValue(node, value, effect); return Replace(value); } else if (feedback.IsPropertyCell()) { - return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, - MakeRef(broker(), p.name()), AccessMode::kLoad, - nullptr, feedback.property_cell()); + return ReduceGlobalAccess(node, nullptr, nullptr, nullptr, p.name(broker()), + AccessMode::kLoad, nullptr, + feedback.property_cell()); } else { DCHECK(feedback.IsMegamorphic()); return NoChange(); @@ -1047,9 +1029,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { ReplaceWithValue(node, value, effect, control); return Replace(value); } else if (feedback.IsPropertyCell()) { - return ReduceGlobalAccess(node, nullptr, nullptr, value, - MakeRef(broker(), p.name()), AccessMode::kStore, - nullptr, feedback.property_cell()); + return ReduceGlobalAccess(node, nullptr, nullptr, value, p.name(broker()), + AccessMode::kStore, nullptr, + feedback.property_cell()); } else { DCHECK(feedback.IsMegamorphic()); return NoChange(); @@ -1081,11 +1063,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess( } MinimorphicLoadPropertyAccessInfo access_info = - broker()->GetPropertyAccessInfo( - feedback, source, - broker()->is_concurrent_inlining() - ? SerializationPolicy::kAssumeSerialized - : SerializationPolicy::kSerializeIfNeeded); + broker()->GetPropertyAccessInfo(feedback, source); if (access_info.IsInvalid()) return NoChange(); PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr); @@ -1095,8 +1073,8 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess( } ZoneHandleSet<Map> maps; - for (Handle<Map> map : feedback.maps()) { - maps.insert(map, graph()->zone()); + for (const MapRef& map : feedback.maps()) { + maps.insert(map.object(), graph()->zone()); } effect = graph()->NewNode( @@ -1131,9 +1109,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( STATIC_ASSERT(JSLoadNamedFromSuperNode::ReceiverIndex() == 0); Node* context = NodeProperties::GetContextInput(node); - Node* frame_state = NodeProperties::GetFrameStateInput(node); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); + FrameState frame_state{NodeProperties::GetFrameStateInput(node)}; + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; // receiver = the object we pass to the accessor (if any) as the "this" value. Node* receiver = NodeProperties::GetValueInput(node, 0); @@ -1150,9 +1128,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( } // Either infer maps from the graph or use the feedback. - ZoneVector<Handle<Map>> lookup_start_object_maps(zone()); + ZoneVector<MapRef> lookup_start_object_maps(zone()); if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) { - lookup_start_object_maps = feedback.maps(); + for (const MapRef& map : feedback.maps()) { + lookup_start_object_maps.push_back(map); + } } RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps); @@ -1160,8 +1140,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( // contexts' global proxy, and turn that into a direct access to the // corresponding global object instead. if (lookup_start_object_maps.size() == 1) { - MapRef lookup_start_object_map = - MakeRef(broker(), lookup_start_object_maps[0]); + MapRef lookup_start_object_map = lookup_start_object_maps[0]; if (lookup_start_object_map.equals( native_context().global_proxy_object().map())) { if (!native_context().GlobalIsDetached()) { @@ -1180,14 +1159,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( ZoneVector<PropertyAccessInfo> access_infos(zone()); { ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone()); - for (Handle<Map> map_handle : lookup_start_object_maps) { - MapRef map = MakeRef(broker(), map_handle); + for (const MapRef& map : lookup_start_object_maps) { if (map.is_deprecated()) continue; PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( - map, feedback.name(), access_mode, dependencies(), - broker()->is_concurrent_inlining() - ? SerializationPolicy::kAssumeSerialized - : SerializationPolicy::kSerializeIfNeeded); + map, feedback.name(), access_mode, dependencies()); access_infos_for_feedback.push_back(access_info); } @@ -1246,12 +1221,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* etrue = effect; - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - { - access_builder.BuildCheckMaps(receiver, &efalse, if_false, - access_info.lookup_start_object_maps()); - } + Control if_false{graph()->NewNode(common()->IfFalse(), branch)}; + Effect efalse = effect; + access_builder.BuildCheckMaps(receiver, &efalse, if_false, + access_info.lookup_start_object_maps()); control = graph()->NewNode(common()->Merge(2), if_true, if_false); effect = @@ -1319,11 +1292,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( Node* this_value = value; Node* this_lookup_start_object = lookup_start_object; Node* this_receiver = receiver; - Node* this_effect = effect; - Node* this_control = fallthrough_control; + Effect this_effect = effect; + Control this_control{fallthrough_control}; // Perform map check on {lookup_start_object}. - ZoneVector<Handle<Map>> const& lookup_start_object_maps = + ZoneVector<MapRef> const& lookup_start_object_maps = access_info.lookup_start_object_maps(); { // Whether to insert a dedicated MapGuard node into the @@ -1345,8 +1318,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( } else { // Explicitly branch on the {lookup_start_object_maps}. ZoneHandleSet<Map> maps; - for (Handle<Map> map : lookup_start_object_maps) { - maps.insert(map, graph()->zone()); + for (MapRef map : lookup_start_object_maps) { + maps.insert(map.object(), graph()->zone()); } Node* check = this_effect = graph()->NewNode(simplified()->CompareMaps(maps), @@ -1377,8 +1350,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( // Introduce a MapGuard to learn from this on the effect chain. if (insert_map_guard) { ZoneHandleSet<Map> maps; - for (auto lookup_start_object_map : lookup_start_object_maps) { - maps.insert(lookup_start_object_map, graph()->zone()); + for (MapRef map : lookup_start_object_maps) { + maps.insert(map.object(), graph()->zone()); } this_effect = graph()->NewNode(simplified()->MapGuard(maps), @@ -1464,7 +1437,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { JSLoadNamedNode n(node); NamedAccess const& p = n.Parameters(); Node* const receiver = n.object(); - NameRef name = MakeRef(broker(), p.name()); + NameRef name = p.name(broker()); // Check if we have a constant receiver. HeapObjectMatcher m(receiver); @@ -1474,11 +1447,11 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { name.equals(MakeRef(broker(), factory()->prototype_string()))) { // Optimize "prototype" property of functions. JSFunctionRef function = object.AsJSFunction(); - if (!function.serialized()) return NoChange(); // TODO(neis): Remove the has_prototype_slot condition once the broker is // always enabled. - if (!function.map().has_prototype_slot() || !function.has_prototype() || - function.PrototypeRequiresRuntimeLookup()) { + if (!function.map().has_prototype_slot() || + !function.has_instance_prototype(dependencies()) || + function.PrototypeRequiresRuntimeLookup(dependencies())) { return NoChange(); } ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function); @@ -1504,7 +1477,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper( Node* node) { JSLoadNamedFromSuperNode n(node); NamedAccess const& p = n.Parameters(); - NameRef name = MakeRef(broker(), p.name()); + NameRef name = p.name(broker()); if (!p.feedback().IsValid()) return NoChange(); return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(), @@ -1522,7 +1495,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) { Control control = n.control(); // Load iterator property operator - Handle<Name> iterator_symbol = factory()->iterator_symbol(); + NameRef iterator_symbol = MakeRef(broker(), factory()->iterator_symbol()); const Operator* load_op = javascript()->LoadNamed(iterator_symbol, p.loadFeedback()); @@ -1599,17 +1572,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { JSStoreNamedNode n(node); NamedAccess const& p = n.Parameters(); if (!p.feedback().IsValid()) return NoChange(); - return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()), - n.value(), FeedbackSource(p.feedback()), - AccessMode::kStore); + return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(), + FeedbackSource(p.feedback()), AccessMode::kStore); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { JSStoreNamedOwnNode n(node); StoreNamedOwnParameters const& p = n.Parameters(); if (!p.feedback().IsValid()) return NoChange(); - return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()), - n.value(), FeedbackSource(p.feedback()), + return ReducePropertyAccess(node, nullptr, p.name(broker()), n.value(), + FeedbackSource(p.feedback()), AccessMode::kStoreInLiteral); } @@ -1642,6 +1614,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( } namespace { + base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker, Node* receiver) { HeapObjectMatcher m(receiver); @@ -1652,34 +1625,34 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker, if (typed_array.is_on_heap()) return base::nullopt; return typed_array; } + } // namespace void JSNativeContextSpecialization::RemoveImpossibleMaps( - Node* object, ZoneVector<Handle<Map>>* maps) const { + Node* object, ZoneVector<MapRef>* maps) const { base::Optional<MapRef> root_map = InferRootMap(object); if (root_map.has_value() && !root_map->is_abandoned_prototype_map()) { - maps->erase( - std::remove_if(maps->begin(), maps->end(), - [root_map, this](Handle<Map> map) { - MapRef map_ref = MakeRef(broker(), map); - return map_ref.is_abandoned_prototype_map() || - (map_ref.FindRootMap().has_value() && - !map_ref.FindRootMap()->equals(*root_map)); - }), - maps->end()); + maps->erase(std::remove_if(maps->begin(), maps->end(), + [root_map](const MapRef& map) { + return map.is_abandoned_prototype_map() || + (map.FindRootMap().has_value() && + !map.FindRootMap()->equals(*root_map)); + }), + maps->end()); } } // Possibly refine the feedback using inferred map information from the graph. ElementAccessFeedback const& JSNativeContextSpecialization::TryRefineElementAccessFeedback( - ElementAccessFeedback const& feedback, Node* receiver, Node* effect) const { + ElementAccessFeedback const& feedback, Node* receiver, + Effect effect) const { AccessMode access_mode = feedback.keyed_mode().access_mode(); bool use_inference = access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas; if (!use_inference) return feedback; - ZoneVector<Handle<Map>> inferred_maps(zone()); + ZoneVector<MapRef> inferred_maps(zone()); if (!InferMaps(receiver, effect, &inferred_maps)) return feedback; RemoveImpossibleMaps(receiver, &inferred_maps); @@ -1687,7 +1660,7 @@ JSNativeContextSpecialization::TryRefineElementAccessFeedback( // impossible maps when a target is kept only because more than one of its // sources was inferred. Think of a way to completely rule out impossible // maps. - return feedback.Refine(inferred_maps, zone()); + return feedback.Refine(broker(), inferred_maps); } Reduction JSNativeContextSpecialization::ReduceElementAccess( @@ -1705,10 +1678,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( JSHasPropertyNode::ObjectIndex() == 0); Node* receiver = NodeProperties::GetValueInput(node, 0); - Node* effect = NodeProperties::GetEffectInput(node); - Node* control = NodeProperties::GetControlInput(node); - Node* frame_state = - NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); + Effect effect{NodeProperties::GetEffectInput(node)}; + Control control{NodeProperties::GetControlInput(node)}; // TODO(neis): It's odd that we do optimizations below that don't really care // about the feedback, but we don't do them when the feedback is megamorphic. @@ -1749,8 +1720,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // the zone allocation of this vector. ZoneVector<MapRef> prototype_maps(zone()); for (ElementAccessInfo const& access_info : access_infos) { - for (Handle<Map> map : access_info.lookup_start_object_maps()) { - MapRef receiver_map = MakeRef(broker(), map); + for (MapRef receiver_map : access_info.lookup_start_object_maps()) { // If the {receiver_map} has a prototype and its elements backing // store is either holey, or we have a potentially growing store, // then we need to check that all prototypes have stable maps with @@ -1793,11 +1763,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( ElementAccessInfo access_info = access_infos.front(); // Perform possible elements kind transitions. - MapRef transition_target = - MakeRef(broker(), access_info.lookup_start_object_maps().front()); - for (auto source : access_info.transition_sources()) { + MapRef transition_target = access_info.lookup_start_object_maps().front(); + for (MapRef transition_source : access_info.transition_sources()) { DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1); - MapRef transition_source = MakeRef(broker(), source); effect = graph()->NewNode( simplified()->TransitionElementsKind(ElementsTransition( IsSimpleMapChangeTransition(transition_source.elements_kind(), @@ -1813,6 +1781,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // elements kind transition above. This is because those operators // don't have the kNoWrite flag on it, even though they are not // observable by JavaScript. + Node* frame_state = + NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); effect = graph()->NewNode(common()->Checkpoint(), frame_state, effect, control); @@ -1841,14 +1811,12 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( Node* this_receiver = receiver; Node* this_value = value; Node* this_index = index; - Node* this_effect = effect; - Node* this_control = fallthrough_control; + Effect this_effect = effect; + Control this_control{fallthrough_control}; // Perform possible elements kind transitions. - MapRef transition_target = - MakeRef(broker(), access_info.lookup_start_object_maps().front()); - for (auto source : access_info.transition_sources()) { - MapRef transition_source = MakeRef(broker(), source); + MapRef transition_target = access_info.lookup_start_object_maps().front(); + for (MapRef transition_source : access_info.transition_sources()) { DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1); this_effect = graph()->NewNode( simplified()->TransitionElementsKind(ElementsTransition( @@ -1861,7 +1829,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } // Perform map check(s) on {receiver}. - ZoneVector<Handle<Map>> const& receiver_maps = + ZoneVector<MapRef> const& receiver_maps = access_info.lookup_start_object_maps(); if (j == access_infos.size() - 1) { // Last map check on the fallthrough control path, do a @@ -1872,8 +1840,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } else { // Explicitly branch on the {receiver_maps}. ZoneHandleSet<Map> maps; - for (Handle<Map> map : receiver_maps) { - maps.insert(map, graph()->zone()); + for (MapRef map : receiver_maps) { + maps.insert(map.object(), graph()->zone()); } Node* check = this_effect = graph()->NewNode(simplified()->CompareMaps(maps), receiver, @@ -2213,14 +2181,13 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall( Node* receiver, ConvertReceiverMode receiver_mode, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) { - ObjectRef constant = MakeRef(broker(), access_info.constant()); + ObjectRef constant = access_info.constant().value(); if (access_info.IsDictionaryProtoAccessorConstant()) { // For fast mode holders we recorded dependencies in BuildPropertyLoad. - for (const Handle<Map> map : access_info.lookup_start_object_maps()) { + for (const MapRef map : access_info.lookup_start_object_maps()) { dependencies()->DependOnConstantInDictionaryPrototypeChain( - MakeRef(broker(), map), MakeRef(broker(), access_info.name()), - constant, PropertyKind::kAccessor); + map, access_info.name(), constant, PropertyKind::kAccessor); } } @@ -2235,10 +2202,9 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall( receiver_mode), target, receiver, feedback, context, frame_state, *effect, *control); } else { - Node* holder = access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(MakeRef( - broker(), access_info.holder().ToHandleChecked())); + Node* holder = access_info.holder().has_value() + ? jsgraph()->Constant(access_info.holder().value()) + : receiver; value = InlineApiCall(receiver, holder, frame_state, nullptr, effect, control, constant.AsFunctionTemplateInfo()); } @@ -2258,7 +2224,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall( Node* receiver, Node* value, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) { - ObjectRef constant = MakeRef(broker(), access_info.constant()); + ObjectRef constant = access_info.constant().value(); Node* target = jsgraph()->Constant(constant); // Introduce the call to the setter function. if (constant.IsJSFunction()) { @@ -2270,10 +2236,9 @@ void JSNativeContextSpecialization::InlinePropertySetterCall( target, receiver, value, feedback, context, frame_state, *effect, *control); } else { - Node* holder = access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(MakeRef( - broker(), access_info.holder().ToHandleChecked())); + Node* holder = access_info.holder().has_value() + ? jsgraph()->Constant(access_info.holder().value()) + : receiver; InlineApiCall(receiver, holder, frame_state, value, effect, control, constant.AsFunctionTemplateInfo()); } @@ -2347,12 +2312,11 @@ JSNativeContextSpecialization::BuildPropertyLoad( Node* effect, Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) { // Determine actual holder and perform prototype chain checks. - Handle<JSObject> holder; - if (access_info.holder().ToHandle(&holder) && - !access_info.HasDictionaryHolder()) { + base::Optional<JSObjectRef> holder = access_info.holder(); + if (holder.has_value() && !access_info.HasDictionaryHolder()) { dependencies()->DependOnStablePrototypeChains( access_info.lookup_start_object_maps(), kStartAtPrototype, - MakeRef(broker(), holder)); + holder.value()); } // Generate the actual property access. @@ -2369,8 +2333,7 @@ JSNativeContextSpecialization::BuildPropertyLoad( InlinePropertyGetterCall(receiver, receiver_mode, context, frame_state, &effect, &control, if_exceptions, access_info); } else if (access_info.IsModuleExport()) { - Node* cell = - jsgraph()->Constant(MakeRef(broker(), access_info.constant()).AsCell()); + Node* cell = jsgraph()->Constant(access_info.constant().value().AsCell()); value = effect = graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()), cell, effect, control); @@ -2402,11 +2365,11 @@ JSNativeContextSpecialization::BuildPropertyTest( DCHECK(!access_info.HasDictionaryHolder()); // Determine actual holder and perform prototype chain checks. - Handle<JSObject> holder; - if (access_info.holder().ToHandle(&holder)) { + base::Optional<JSObjectRef> holder = access_info.holder(); + if (holder.has_value()) { dependencies()->DependOnStablePrototypeChains( access_info.lookup_start_object_maps(), kStartAtPrototype, - MakeRef(broker(), holder)); + holder.value()); } Node* value = access_info.IsNotFound() ? jsgraph()->FalseConstant() @@ -2444,13 +2407,13 @@ JSNativeContextSpecialization::BuildPropertyStore( Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info, AccessMode access_mode) { // Determine actual holder and perform prototype chain checks. - Handle<JSObject> holder; PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies()); - if (access_info.holder().ToHandle(&holder)) { + base::Optional<JSObjectRef> holder = access_info.holder(); + if (holder.has_value()) { DCHECK_NE(AccessMode::kStoreInLiteral, access_mode); dependencies()->DependOnStablePrototypeChains( access_info.lookup_start_object_maps(), kStartAtPrototype, - MakeRef(broker(), holder)); + holder.value()); } DCHECK(!access_info.IsNotFound()); @@ -2571,13 +2534,14 @@ JSNativeContextSpecialization::BuildPropertyStore( } else if (field_representation == MachineRepresentation::kTaggedPointer) { - Handle<Map> field_map; - if (access_info.field_map().ToHandle(&field_map)) { + base::Optional<MapRef> field_map = access_info.field_map(); + if (field_map.has_value()) { // Emit a map check for the value. - effect = graph()->NewNode( - simplified()->CheckMaps(CheckMapsFlag::kNone, - ZoneHandleSet<Map>(field_map)), - value, effect, control); + effect = + graph()->NewNode(simplified()->CheckMaps( + CheckMapsFlag::kNone, + ZoneHandleSet<Map>(field_map->object())), + value, effect, control); } else { // Ensure that {value} is a HeapObject. value = effect = graph()->NewNode(simplified()->CheckHeapObject(), @@ -2603,11 +2567,11 @@ JSNativeContextSpecialization::BuildPropertyStore( UNREACHABLE(); } // Check if we need to perform a transitioning store. - Handle<Map> transition_map; - if (access_info.transition_map().ToHandle(&transition_map)) { + base::Optional<MapRef> transition_map = access_info.transition_map(); + if (transition_map.has_value()) { // Check if we need to grow the properties backing store // with this transitioning store. - MapRef transition_map_ref = MakeRef(broker(), transition_map); + MapRef transition_map_ref = transition_map.value(); MapRef original_map = transition_map_ref.GetBackPointer().AsMap(); if (original_map.UnusedPropertyFields() == 0) { DCHECK(!field_index.is_inobject()); @@ -2674,7 +2638,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral( Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) { DCHECK_EQ(IrOpcode::kJSToObject, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 0); - Node* effect = NodeProperties::GetEffectInput(node); + Effect effect{NodeProperties::GetEffectInput(node)}; MapInference inference(broker(), receiver, effect); if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) { @@ -2709,7 +2673,7 @@ JSNativeContextSpecialization::BuildElementAccess( // TODO(bmeurer): We currently specialize based on elements kind. We should // also be able to properly support strings and other JSObjects here. ElementsKind elements_kind = access_info.elements_kind(); - ZoneVector<Handle<Map>> const& receiver_maps = + ZoneVector<MapRef> const& receiver_maps = access_info.lookup_start_object_maps(); if (IsTypedArrayElementsKind(elements_kind)) { @@ -3451,12 +3415,11 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name, } bool JSNativeContextSpecialization::CanTreatHoleAsUndefined( - ZoneVector<Handle<Map>> const& receiver_maps) { + ZoneVector<MapRef> const& receiver_maps) { // Check if all {receiver_maps} have one of the initial Array.prototype // or Object.prototype objects as their prototype (in any of the current // native contexts, as the global Array protector works isolate-wide). - for (Handle<Map> map : receiver_maps) { - MapRef receiver_map = MakeRef(broker(), map); + for (MapRef receiver_map : receiver_maps) { ObjectRef receiver_prototype = receiver_map.prototype().value(); if (!receiver_prototype.IsJSObject() || !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) { @@ -3468,25 +3431,24 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined( return dependencies()->DependOnNoElementsProtector(); } -bool JSNativeContextSpecialization::InferMaps( - Node* object, Node* effect, ZoneVector<Handle<Map>>* maps) const { - ZoneHandleSet<Map> map_set; +bool JSNativeContextSpecialization::InferMaps(Node* object, Effect effect, + ZoneVector<MapRef>* maps) const { + ZoneRefUnorderedSet<MapRef> map_set(broker()->zone()); NodeProperties::InferMapsResult result = NodeProperties::InferMapsUnsafe(broker(), object, effect, &map_set); if (result == NodeProperties::kReliableMaps) { - for (size_t i = 0; i < map_set.size(); ++i) { - maps->push_back(map_set[i]); + for (const MapRef& map : map_set) { + maps->push_back(map); } return true; } else if (result == NodeProperties::kUnreliableMaps) { // For untrusted maps, we can still use the information // if the maps are stable. - for (size_t i = 0; i < map_set.size(); ++i) { - MapRef map = MakeRef(broker(), map_set[i]); + for (const MapRef& map : map_set) { if (!map.is_stable()) return false; } - for (size_t i = 0; i < map_set.size(); ++i) { - maps->push_back(map_set[i]); + for (const MapRef& map : map_set) { + maps->push_back(map); } return true; } diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h index 0a6d1e9536..7f67a4d67c 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.h +++ b/deps/v8/src/compiler/js-native-context-specialization.h @@ -210,17 +210,16 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final // Checks if we can turn the hole into undefined when loading an element // from an object with one of the {receiver_maps}; sets up appropriate // code dependencies and might use the array protector cell. - bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps); + bool CanTreatHoleAsUndefined(ZoneVector<MapRef> const& receiver_maps); - void RemoveImpossibleMaps(Node* object, ZoneVector<Handle<Map>>* maps) const; + void RemoveImpossibleMaps(Node* object, ZoneVector<MapRef>* maps) const; ElementAccessFeedback const& TryRefineElementAccessFeedback( ElementAccessFeedback const& feedback, Node* receiver, - Node* effect) const; + Effect effect) const; // Try to infer maps for the given {object} at the current {effect}. - bool InferMaps(Node* object, Node* effect, - ZoneVector<Handle<Map>>* maps) const; + bool InferMaps(Node* object, Effect effect, ZoneVector<MapRef>* maps) const; // Try to infer a root map for the {object} independent of the current program // location. @@ -235,7 +234,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final kMayBeInPrototypeChain }; InferHasInPrototypeChainResult InferHasInPrototypeChain( - Node* receiver, Node* effect, HeapObjectRef const& prototype); + Node* receiver, Effect effect, HeapObjectRef const& prototype); Node* BuildLoadPrototypeFromObject(Node* object, Node* effect, Node* control); diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc index 6bd0efd389..a06416b6f2 100644 --- a/deps/v8/src/compiler/js-operator.cc +++ b/deps/v8/src/compiler/js-operator.cc @@ -28,6 +28,12 @@ constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) { : Operator::kNoProperties; } +template <class T> +Address AddressOrNull(base::Optional<T> ref) { + if (!ref.has_value()) return kNullAddress; + return ref->object().address(); +} + } // namespace namespace js_node_wrapper_utils { @@ -177,15 +183,10 @@ ContextAccess const& ContextAccessOf(Operator const* op) { return OpParameter<ContextAccess>(op); } -CreateFunctionContextParameters::CreateFunctionContextParameters( - Handle<ScopeInfo> scope_info, int slot_count, ScopeType scope_type) - : scope_info_(scope_info), - slot_count_(slot_count), - scope_type_(scope_type) {} - bool operator==(CreateFunctionContextParameters const& lhs, CreateFunctionContextParameters const& rhs) { - return lhs.scope_info().location() == rhs.scope_info().location() && + return lhs.scope_info_.object().location() == + rhs.scope_info_.object().location() && lhs.slot_count() == rhs.slot_count() && lhs.scope_type() == rhs.scope_type(); } @@ -196,7 +197,7 @@ bool operator!=(CreateFunctionContextParameters const& lhs, } size_t hash_value(CreateFunctionContextParameters const& parameters) { - return base::hash_combine(parameters.scope_info().location(), + return base::hash_combine(parameters.scope_info_.object().location(), parameters.slot_count(), static_cast<int>(parameters.scope_type())); } @@ -214,7 +215,7 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf( bool operator==(StoreNamedOwnParameters const& lhs, StoreNamedOwnParameters const& rhs) { - return lhs.name().location() == rhs.name().location() && + return lhs.name_.object().location() == rhs.name_.object().location() && lhs.feedback() == rhs.feedback(); } @@ -224,12 +225,12 @@ bool operator!=(StoreNamedOwnParameters const& lhs, } size_t hash_value(StoreNamedOwnParameters const& p) { - return base::hash_combine(p.name().location(), + return base::hash_combine(p.name_.object().location(), FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) { - return os << Brief(*p.name()); + return os << Brief(*p.name_.object()); } StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) { @@ -264,7 +265,7 @@ FeedbackParameter const& FeedbackParameterOf(const Operator* op) { } bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) { - return lhs.name().location() == rhs.name().location() && + return lhs.name_.object().location() == rhs.name_.object().location() && lhs.language_mode() == rhs.language_mode() && lhs.feedback() == rhs.feedback(); } @@ -276,13 +277,13 @@ bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) { size_t hash_value(NamedAccess const& p) { - return base::hash_combine(p.name().location(), p.language_mode(), + return base::hash_combine(p.name_.object().location(), p.language_mode(), FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, NamedAccess const& p) { - return os << Brief(*p.name()) << ", " << p.language_mode(); + return os << Brief(*p.name_.object()) << ", " << p.language_mode(); } @@ -326,7 +327,7 @@ size_t hash_value(PropertyAccess const& p) { bool operator==(LoadGlobalParameters const& lhs, LoadGlobalParameters const& rhs) { - return lhs.name().location() == rhs.name().location() && + return lhs.name_.object().location() == rhs.name_.object().location() && lhs.feedback() == rhs.feedback() && lhs.typeof_mode() == rhs.typeof_mode(); } @@ -339,13 +340,14 @@ bool operator!=(LoadGlobalParameters const& lhs, size_t hash_value(LoadGlobalParameters const& p) { - return base::hash_combine(p.name().location(), + return base::hash_combine(p.name_.object().location(), static_cast<int>(p.typeof_mode())); } std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) { - return os << Brief(*p.name()) << ", " << static_cast<int>(p.typeof_mode()); + return os << Brief(*p.name_.object()) << ", " + << static_cast<int>(p.typeof_mode()); } @@ -358,7 +360,7 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op) { bool operator==(StoreGlobalParameters const& lhs, StoreGlobalParameters const& rhs) { return lhs.language_mode() == rhs.language_mode() && - lhs.name().location() == rhs.name().location() && + lhs.name_.object().location() == rhs.name_.object().location() && lhs.feedback() == rhs.feedback(); } @@ -370,13 +372,13 @@ bool operator!=(StoreGlobalParameters const& lhs, size_t hash_value(StoreGlobalParameters const& p) { - return base::hash_combine(p.language_mode(), p.name().location(), + return base::hash_combine(p.language_mode(), p.name_.object().location(), FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) { - return os << p.language_mode() << ", " << Brief(*p.name()); + return os << p.language_mode() << ", " << Brief(*p.name_.object()); } @@ -391,11 +393,10 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) { return OpParameter<CreateArgumentsType>(op); } - bool operator==(CreateArrayParameters const& lhs, CreateArrayParameters const& rhs) { return lhs.arity() == rhs.arity() && - lhs.site().address() == rhs.site().address(); + AddressOrNull(lhs.site_) == AddressOrNull(rhs.site_); } @@ -406,14 +407,15 @@ bool operator!=(CreateArrayParameters const& lhs, size_t hash_value(CreateArrayParameters const& p) { - return base::hash_combine(p.arity(), p.site().address()); + return base::hash_combine(p.arity(), AddressOrNull(p.site_)); } std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) { os << p.arity(); - Handle<AllocationSite> site; - if (p.site().ToHandle(&site)) os << ", " << Brief(*site); + if (p.site_.has_value()) { + os << ", " << Brief(*p.site_->object()); + } return os; } @@ -477,7 +479,7 @@ const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf( bool operator==(CreateBoundFunctionParameters const& lhs, CreateBoundFunctionParameters const& rhs) { return lhs.arity() == rhs.arity() && - lhs.map().location() == rhs.map().location(); + lhs.map_.object().location() == rhs.map_.object().location(); } bool operator!=(CreateBoundFunctionParameters const& lhs, @@ -486,13 +488,13 @@ bool operator!=(CreateBoundFunctionParameters const& lhs, } size_t hash_value(CreateBoundFunctionParameters const& p) { - return base::hash_combine(p.arity(), p.map().location()); + return base::hash_combine(p.arity(), p.map_.object().location()); } std::ostream& operator<<(std::ostream& os, CreateBoundFunctionParameters const& p) { os << p.arity(); - if (!p.map().is_null()) os << ", " << Brief(*p.map()); + if (!p.map_.object().is_null()) os << ", " << Brief(*p.map_.object()); return os; } @@ -504,8 +506,9 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf( bool operator==(GetTemplateObjectParameters const& lhs, GetTemplateObjectParameters const& rhs) { - return lhs.description().location() == rhs.description().location() && - lhs.shared().location() == rhs.shared().location() && + return lhs.description_.object().location() == + rhs.description_.object().location() && + lhs.shared_.object().location() == rhs.shared_.object().location() && lhs.feedback() == rhs.feedback(); } @@ -515,13 +518,15 @@ bool operator!=(GetTemplateObjectParameters const& lhs, } size_t hash_value(GetTemplateObjectParameters const& p) { - return base::hash_combine(p.description().location(), p.shared().location(), + return base::hash_combine(p.description_.object().location(), + p.shared_.object().location(), FeedbackSource::Hash()(p.feedback())); } std::ostream& operator<<(std::ostream& os, GetTemplateObjectParameters const& p) { - return os << Brief(*p.description()) << ", " << Brief(*p.shared()); + return os << Brief(*p.description_.object()) << ", " + << Brief(*p.shared_.object()); } const GetTemplateObjectParameters& GetTemplateObjectParametersOf( @@ -533,8 +538,9 @@ const GetTemplateObjectParameters& GetTemplateObjectParametersOf( bool operator==(CreateClosureParameters const& lhs, CreateClosureParameters const& rhs) { return lhs.allocation() == rhs.allocation() && - lhs.code().location() == rhs.code().location() && - lhs.shared_info().location() == rhs.shared_info().location(); + lhs.code_.object().location() == rhs.code_.object().location() && + lhs.shared_info_.object().location() == + rhs.shared_info_.object().location(); } @@ -545,13 +551,14 @@ bool operator!=(CreateClosureParameters const& lhs, size_t hash_value(CreateClosureParameters const& p) { - return base::hash_combine(p.allocation(), p.shared_info().location()); + return base::hash_combine(p.allocation(), p.code_.object().location(), + p.shared_info_.object().location()); } std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) { - return os << p.allocation() << ", " << Brief(*p.shared_info()) << ", " - << Brief(*p.code()); + return os << p.allocation() << ", " << Brief(*p.shared_info_.object()) << ", " + << Brief(*p.code_.object()); } @@ -563,7 +570,8 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) { bool operator==(CreateLiteralParameters const& lhs, CreateLiteralParameters const& rhs) { - return lhs.constant().location() == rhs.constant().location() && + return lhs.constant_.object().location() == + rhs.constant_.object().location() && lhs.feedback() == rhs.feedback() && lhs.length() == rhs.length() && lhs.flags() == rhs.flags(); } @@ -576,14 +584,15 @@ bool operator!=(CreateLiteralParameters const& lhs, size_t hash_value(CreateLiteralParameters const& p) { - return base::hash_combine(p.constant().location(), + return base::hash_combine(p.constant_.object().location(), FeedbackSource::Hash()(p.feedback()), p.length(), p.flags()); } std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) { - return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags(); + return os << Brief(*p.constant_.object()) << ", " << p.length() << ", " + << p.flags(); } @@ -983,7 +992,7 @@ const Operator* JSOperatorBuilder::ConstructWithSpread( parameters); // parameter } -const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name, +const Operator* JSOperatorBuilder::LoadNamed(const NameRef& name, const FeedbackSource& feedback) { static constexpr int kObject = 1; static constexpr int kFeedbackVector = 1; @@ -997,7 +1006,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name, } const Operator* JSOperatorBuilder::LoadNamedFromSuper( - Handle<Name> name, const FeedbackSource& feedback) { + const NameRef& name, const FeedbackSource& feedback) { static constexpr int kReceiver = 1; static constexpr int kHomeObject = 1; static constexpr int kFeedbackVector = 1; @@ -1090,7 +1099,7 @@ int RestoreRegisterIndexOf(const Operator* op) { } const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode, - Handle<Name> name, + const NameRef& name, FeedbackSource const& feedback) { static constexpr int kObject = 1; static constexpr int kValue = 1; @@ -1115,7 +1124,7 @@ const Operator* JSOperatorBuilder::StoreProperty( } const Operator* JSOperatorBuilder::StoreNamedOwn( - Handle<Name> name, FeedbackSource const& feedback) { + const NameRef& name, FeedbackSource const& feedback) { static constexpr int kObject = 1; static constexpr int kValue = 1; static constexpr int kFeedbackVector = 1; @@ -1142,7 +1151,7 @@ const Operator* JSOperatorBuilder::CreateGeneratorObject() { 2, 1, 1, 1, 1, 0); // counts } -const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name, +const Operator* JSOperatorBuilder::LoadGlobal(const NameRef& name, const FeedbackSource& feedback, TypeofMode typeof_mode) { static constexpr int kFeedbackVector = 1; @@ -1156,7 +1165,7 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name, } const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode, - const Handle<Name>& name, + const NameRef& name, const FeedbackSource& feedback) { static constexpr int kValue = 1; static constexpr int kFeedbackVector = 1; @@ -1235,7 +1244,7 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) { } const Operator* JSOperatorBuilder::CreateArray( - size_t arity, MaybeHandle<AllocationSite> site) { + size_t arity, base::Optional<AllocationSiteRef> site) { // constructor, new_target, arg1, ..., argN int const value_input_count = static_cast<int>(arity) + 2; CreateArrayParameters parameters(arity, site); @@ -1275,7 +1284,7 @@ const Operator* JSOperatorBuilder::CreateCollectionIterator( } const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity, - Handle<Map> map) { + const MapRef& map) { // bound_target_function, bound_this, arg1, ..., argN int const value_input_count = static_cast<int>(arity) + 2; CreateBoundFunctionParameters parameters(arity, map); @@ -1287,7 +1296,7 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity, } const Operator* JSOperatorBuilder::CreateClosure( - Handle<SharedFunctionInfo> shared_info, Handle<CodeT> code, + const SharedFunctionInfoRef& shared_info, const CodeTRef& code, AllocationType allocation) { static constexpr int kFeedbackCell = 1; static constexpr int kArity = kFeedbackCell; @@ -1300,7 +1309,7 @@ const Operator* JSOperatorBuilder::CreateClosure( } const Operator* JSOperatorBuilder::CreateLiteralArray( - Handle<ArrayBoilerplateDescription> description, + const ArrayBoilerplateDescriptionRef& description, FeedbackSource const& feedback, int literal_flags, int number_of_elements) { CreateLiteralParameters parameters(description, feedback, number_of_elements, literal_flags); @@ -1334,7 +1343,7 @@ const Operator* JSOperatorBuilder::CreateArrayFromIterable() { } const Operator* JSOperatorBuilder::CreateLiteralObject( - Handle<ObjectBoilerplateDescription> constant_properties, + const ObjectBoilerplateDescriptionRef& constant_properties, FeedbackSource const& feedback, int literal_flags, int number_of_properties) { CreateLiteralParameters parameters(constant_properties, feedback, @@ -1348,8 +1357,8 @@ const Operator* JSOperatorBuilder::CreateLiteralObject( } const Operator* JSOperatorBuilder::GetTemplateObject( - Handle<TemplateObjectDescription> description, - Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback) { + const TemplateObjectDescriptionRef& description, + const SharedFunctionInfoRef& shared, FeedbackSource const& feedback) { GetTemplateObjectParameters parameters(description, shared, feedback); return zone()->New<Operator1<GetTemplateObjectParameters>>( // -- IrOpcode::kJSGetTemplateObject, // opcode @@ -1388,7 +1397,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() { } const Operator* JSOperatorBuilder::CreateLiteralRegExp( - Handle<String> constant_pattern, FeedbackSource const& feedback, + const StringRef& constant_pattern, FeedbackSource const& feedback, int literal_flags) { CreateLiteralParameters parameters(constant_pattern, feedback, -1, literal_flags); @@ -1401,7 +1410,7 @@ const Operator* JSOperatorBuilder::CreateLiteralRegExp( } const Operator* JSOperatorBuilder::CreateFunctionContext( - Handle<ScopeInfo> scope_info, int slot_count, ScopeType scope_type) { + const ScopeInfoRef& scope_info, int slot_count, ScopeType scope_type) { CreateFunctionContextParameters parameters(scope_info, slot_count, scope_type); return zone()->New<Operator1<CreateFunctionContextParameters>>( // -- @@ -1412,37 +1421,53 @@ const Operator* JSOperatorBuilder::CreateFunctionContext( } const Operator* JSOperatorBuilder::CreateCatchContext( - const Handle<ScopeInfo>& scope_info) { - return zone()->New<Operator1<Handle<ScopeInfo>>>( + const ScopeInfoRef& scope_info) { + return zone()->New<Operator1<ScopeInfoTinyRef>>( IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode "JSCreateCatchContext", // name 1, 1, 1, 1, 1, 2, // counts - scope_info); // parameter + ScopeInfoTinyRef{scope_info}); // parameter } const Operator* JSOperatorBuilder::CreateWithContext( - const Handle<ScopeInfo>& scope_info) { - return zone()->New<Operator1<Handle<ScopeInfo>>>( + const ScopeInfoRef& scope_info) { + return zone()->New<Operator1<ScopeInfoTinyRef>>( IrOpcode::kJSCreateWithContext, Operator::kNoProperties, // opcode "JSCreateWithContext", // name 1, 1, 1, 1, 1, 2, // counts - scope_info); // parameter + ScopeInfoTinyRef{scope_info}); // parameter } const Operator* JSOperatorBuilder::CreateBlockContext( - const Handle<ScopeInfo>& scope_info) { - return zone()->New<Operator1<Handle<ScopeInfo>>>( // -- + const ScopeInfoRef& scope_info) { + return zone()->New<Operator1<ScopeInfoTinyRef>>( // -- IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode "JSCreateBlockContext", // name 0, 1, 1, 1, 1, 2, // counts - scope_info); // parameter + ScopeInfoTinyRef{scope_info}); // parameter } -Handle<ScopeInfo> ScopeInfoOf(const Operator* op) { +ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker, const Operator* op) { DCHECK(IrOpcode::kJSCreateBlockContext == op->opcode() || IrOpcode::kJSCreateWithContext == op->opcode() || IrOpcode::kJSCreateCatchContext == op->opcode()); - return OpParameter<Handle<ScopeInfo>>(op); + return OpParameter<ScopeInfoTinyRef>(op).AsRef(broker); +} + +bool operator==(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) { + return lhs.object().location() == rhs.object().location(); +} + +bool operator!=(ScopeInfoTinyRef const& lhs, ScopeInfoTinyRef const& rhs) { + return !(lhs == rhs); +} + +size_t hash_value(ScopeInfoTinyRef const& ref) { + return reinterpret_cast<size_t>(ref.object().location()); +} + +std::ostream& operator<<(std::ostream& os, ScopeInfoTinyRef const& ref) { + return os << Brief(*ref.object()); } #undef CACHED_OP_LIST diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h index 5bc2734023..260e366af8 100644 --- a/deps/v8/src/compiler/js-operator.h +++ b/deps/v8/src/compiler/js-operator.h @@ -363,28 +363,33 @@ V8_EXPORT_PRIVATE ContextAccess const& ContextAccessOf(Operator const*); // is used as a parameter by the JSCreateFunctionContext operator. class CreateFunctionContextParameters final { public: - CreateFunctionContextParameters(Handle<ScopeInfo> scope_info, int slot_count, - ScopeType scope_type); + CreateFunctionContextParameters(const ScopeInfoRef& scope_info, + int slot_count, ScopeType scope_type) + : scope_info_(scope_info), + slot_count_(slot_count), + scope_type_(scope_type) {} - Handle<ScopeInfo> scope_info() const { return scope_info_; } + ScopeInfoRef scope_info(JSHeapBroker* broker) const { + return scope_info_.AsRef(broker); + } int slot_count() const { return slot_count_; } ScopeType scope_type() const { return scope_type_; } private: - Handle<ScopeInfo> scope_info_; + const ScopeInfoTinyRef scope_info_; int const slot_count_; ScopeType const scope_type_; -}; -bool operator==(CreateFunctionContextParameters const& lhs, - CreateFunctionContextParameters const& rhs); -bool operator!=(CreateFunctionContextParameters const& lhs, - CreateFunctionContextParameters const& rhs); + friend bool operator==(CreateFunctionContextParameters const& lhs, + CreateFunctionContextParameters const& rhs); + friend bool operator!=(CreateFunctionContextParameters const& lhs, + CreateFunctionContextParameters const& rhs); -size_t hash_value(CreateFunctionContextParameters const& parameters); + friend size_t hash_value(CreateFunctionContextParameters const& parameters); -std::ostream& operator<<(std::ostream& os, - CreateFunctionContextParameters const& parameters); + friend std::ostream& operator<<( + std::ostream& os, CreateFunctionContextParameters const& parameters); +}; CreateFunctionContextParameters const& CreateFunctionContextParametersOf( Operator const*); @@ -392,23 +397,24 @@ CreateFunctionContextParameters const& CreateFunctionContextParametersOf( // Defines parameters for JSStoreNamedOwn operator. class StoreNamedOwnParameters final { public: - StoreNamedOwnParameters(Handle<Name> name, FeedbackSource const& feedback) + StoreNamedOwnParameters(const NameRef& name, FeedbackSource const& feedback) : name_(name), feedback_(feedback) {} - Handle<Name> name() const { return name_; } + NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); } FeedbackSource const& feedback() const { return feedback_; } private: - Handle<Name> const name_; + const NameTinyRef name_; FeedbackSource const feedback_; -}; - -bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&); -bool operator!=(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&); -size_t hash_value(StoreNamedOwnParameters const&); - -std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&); + friend bool operator==(StoreNamedOwnParameters const&, + StoreNamedOwnParameters const&); + friend bool operator!=(StoreNamedOwnParameters const&, + StoreNamedOwnParameters const&); + friend size_t hash_value(StoreNamedOwnParameters const&); + friend std::ostream& operator<<(std::ostream&, + StoreNamedOwnParameters const&); +}; const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op); @@ -439,26 +445,26 @@ const FeedbackParameter& FeedbackParameterOf(const Operator* op); // used as a parameter by the JSLoadNamed and JSStoreNamed operators. class NamedAccess final { public: - NamedAccess(LanguageMode language_mode, Handle<Name> name, + NamedAccess(LanguageMode language_mode, const NameRef& name, FeedbackSource const& feedback) : name_(name), feedback_(feedback), language_mode_(language_mode) {} - Handle<Name> name() const { return name_; } + NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); } LanguageMode language_mode() const { return language_mode_; } FeedbackSource const& feedback() const { return feedback_; } private: - Handle<Name> const name_; + const NameTinyRef name_; FeedbackSource const feedback_; LanguageMode const language_mode_; -}; -bool operator==(NamedAccess const&, NamedAccess const&); -bool operator!=(NamedAccess const&, NamedAccess const&); + friend bool operator==(NamedAccess const&, NamedAccess const&); + friend bool operator!=(NamedAccess const&, NamedAccess const&); -size_t hash_value(NamedAccess const&); + friend size_t hash_value(NamedAccess const&); -std::ostream& operator<<(std::ostream&, NamedAccess const&); + friend std::ostream& operator<<(std::ostream&, NamedAccess const&); +}; const NamedAccess& NamedAccessOf(const Operator* op); @@ -467,27 +473,29 @@ const NamedAccess& NamedAccessOf(const Operator* op); // used as a parameter by JSLoadGlobal operator. class LoadGlobalParameters final { public: - LoadGlobalParameters(const Handle<Name>& name, const FeedbackSource& feedback, + LoadGlobalParameters(const NameRef& name, const FeedbackSource& feedback, TypeofMode typeof_mode) : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {} - const Handle<Name>& name() const { return name_; } + NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); } TypeofMode typeof_mode() const { return typeof_mode_; } const FeedbackSource& feedback() const { return feedback_; } private: - const Handle<Name> name_; + const NameTinyRef name_; const FeedbackSource feedback_; const TypeofMode typeof_mode_; -}; -bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&); -bool operator!=(LoadGlobalParameters const&, LoadGlobalParameters const&); + friend bool operator==(LoadGlobalParameters const&, + LoadGlobalParameters const&); + friend bool operator!=(LoadGlobalParameters const&, + LoadGlobalParameters const&); -size_t hash_value(LoadGlobalParameters const&); + friend size_t hash_value(LoadGlobalParameters const&); -std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&); + friend std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&); +}; const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op); @@ -497,26 +505,27 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op); class StoreGlobalParameters final { public: StoreGlobalParameters(LanguageMode language_mode, - const FeedbackSource& feedback, - const Handle<Name>& name) + const FeedbackSource& feedback, const NameRef& name) : language_mode_(language_mode), name_(name), feedback_(feedback) {} LanguageMode language_mode() const { return language_mode_; } FeedbackSource const& feedback() const { return feedback_; } - Handle<Name> const& name() const { return name_; } + NameRef name(JSHeapBroker* broker) const { return name_.AsRef(broker); } private: LanguageMode const language_mode_; - Handle<Name> const name_; + const NameTinyRef name_; FeedbackSource const feedback_; -}; -bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&); -bool operator!=(StoreGlobalParameters const&, StoreGlobalParameters const&); + friend bool operator==(StoreGlobalParameters const&, + StoreGlobalParameters const&); + friend bool operator!=(StoreGlobalParameters const&, + StoreGlobalParameters const&); -size_t hash_value(StoreGlobalParameters const&); + friend size_t hash_value(StoreGlobalParameters const&); -std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&); + friend std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&); +}; const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op); @@ -555,24 +564,26 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op); // used as parameter by JSCreateArray operators. class CreateArrayParameters final { public: - explicit CreateArrayParameters(size_t arity, MaybeHandle<AllocationSite> site) + CreateArrayParameters(size_t arity, base::Optional<AllocationSiteRef> site) : arity_(arity), site_(site) {} size_t arity() const { return arity_; } - MaybeHandle<AllocationSite> site() const { return site_; } + base::Optional<AllocationSiteRef> site(JSHeapBroker* broker) const { + return AllocationSiteTinyRef::AsOptionalRef(broker, site_); + } private: size_t const arity_; - MaybeHandle<AllocationSite> const site_; + base::Optional<AllocationSiteTinyRef> const site_; + + friend bool operator==(CreateArrayParameters const&, + CreateArrayParameters const&); + friend bool operator!=(CreateArrayParameters const&, + CreateArrayParameters const&); + friend size_t hash_value(CreateArrayParameters const&); + friend std::ostream& operator<<(std::ostream&, CreateArrayParameters const&); }; -bool operator==(CreateArrayParameters const&, CreateArrayParameters const&); -bool operator!=(CreateArrayParameters const&, CreateArrayParameters const&); - -size_t hash_value(CreateArrayParameters const&); - -std::ostream& operator<<(std::ostream&, CreateArrayParameters const&); - const CreateArrayParameters& CreateArrayParametersOf(const Operator* op); // Defines shared information for the array iterator that should be created. @@ -635,25 +646,26 @@ const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf( // This is used as parameter by JSCreateBoundFunction operators. class CreateBoundFunctionParameters final { public: - CreateBoundFunctionParameters(size_t arity, Handle<Map> map) + CreateBoundFunctionParameters(size_t arity, const MapRef& map) : arity_(arity), map_(map) {} size_t arity() const { return arity_; } - Handle<Map> map() const { return map_; } + MapRef map(JSHeapBroker* broker) const { return map_.AsRef(broker); } private: size_t const arity_; - Handle<Map> const map_; -}; + const MapTinyRef map_; -bool operator==(CreateBoundFunctionParameters const&, - CreateBoundFunctionParameters const&); -bool operator!=(CreateBoundFunctionParameters const&, - CreateBoundFunctionParameters const&); + friend bool operator==(CreateBoundFunctionParameters const&, + CreateBoundFunctionParameters const&); + friend bool operator!=(CreateBoundFunctionParameters const&, + CreateBoundFunctionParameters const&); -size_t hash_value(CreateBoundFunctionParameters const&); + friend size_t hash_value(CreateBoundFunctionParameters const&); -std::ostream& operator<<(std::ostream&, CreateBoundFunctionParameters const&); + friend std::ostream& operator<<(std::ostream&, + CreateBoundFunctionParameters const&); +}; const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf( const Operator* op); @@ -662,54 +674,64 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf( // used as a parameter by JSCreateClosure operators. class CreateClosureParameters final { public: - CreateClosureParameters(Handle<SharedFunctionInfo> shared_info, - Handle<CodeT> code, AllocationType allocation) + CreateClosureParameters(const SharedFunctionInfoRef& shared_info, + const CodeTRef& code, AllocationType allocation) : shared_info_(shared_info), code_(code), allocation_(allocation) {} - Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } - Handle<CodeT> code() const { return code_; } + SharedFunctionInfoRef shared_info(JSHeapBroker* broker) const { + return shared_info_.AsRef(broker); + } + CodeTRef code(JSHeapBroker* broker) const { return code_.AsRef(broker); } AllocationType allocation() const { return allocation_; } private: - Handle<SharedFunctionInfo> const shared_info_; - Handle<CodeT> const code_; + const SharedFunctionInfoTinyRef shared_info_; + const CodeTTinyRef code_; AllocationType const allocation_; -}; -bool operator==(CreateClosureParameters const&, CreateClosureParameters const&); -bool operator!=(CreateClosureParameters const&, CreateClosureParameters const&); + friend bool operator==(CreateClosureParameters const&, + CreateClosureParameters const&); + friend bool operator!=(CreateClosureParameters const&, + CreateClosureParameters const&); -size_t hash_value(CreateClosureParameters const&); + friend size_t hash_value(CreateClosureParameters const&); -std::ostream& operator<<(std::ostream&, CreateClosureParameters const&); + friend std::ostream& operator<<(std::ostream&, + CreateClosureParameters const&); +}; const CreateClosureParameters& CreateClosureParametersOf(const Operator* op); class GetTemplateObjectParameters final { public: - GetTemplateObjectParameters(Handle<TemplateObjectDescription> description, - Handle<SharedFunctionInfo> shared, + GetTemplateObjectParameters(const TemplateObjectDescriptionRef& description, + const SharedFunctionInfoRef& shared, FeedbackSource const& feedback) : description_(description), shared_(shared), feedback_(feedback) {} - Handle<TemplateObjectDescription> description() const { return description_; } - Handle<SharedFunctionInfo> shared() const { return shared_; } + TemplateObjectDescriptionRef description(JSHeapBroker* broker) const { + return description_.AsRef(broker); + } + SharedFunctionInfoRef shared(JSHeapBroker* broker) const { + return shared_.AsRef(broker); + } FeedbackSource const& feedback() const { return feedback_; } private: - Handle<TemplateObjectDescription> const description_; - Handle<SharedFunctionInfo> const shared_; + const TemplateObjectDescriptionTinyRef description_; + const SharedFunctionInfoTinyRef shared_; FeedbackSource const feedback_; -}; -bool operator==(GetTemplateObjectParameters const&, - GetTemplateObjectParameters const&); -bool operator!=(GetTemplateObjectParameters const&, - GetTemplateObjectParameters const&); + friend bool operator==(GetTemplateObjectParameters const&, + GetTemplateObjectParameters const&); + friend bool operator!=(GetTemplateObjectParameters const&, + GetTemplateObjectParameters const&); -size_t hash_value(GetTemplateObjectParameters const&); + friend size_t hash_value(GetTemplateObjectParameters const&); -std::ostream& operator<<(std::ostream&, GetTemplateObjectParameters const&); + friend std::ostream& operator<<(std::ostream&, + GetTemplateObjectParameters const&); +}; const GetTemplateObjectParameters& GetTemplateObjectParametersOf( const Operator* op); @@ -719,31 +741,36 @@ const GetTemplateObjectParameters& GetTemplateObjectParametersOf( // JSCreateLiteralRegExp operators. class CreateLiteralParameters final { public: - CreateLiteralParameters(Handle<HeapObject> constant, + CreateLiteralParameters(const HeapObjectRef& constant, FeedbackSource const& feedback, int length, int flags) : constant_(constant), feedback_(feedback), length_(length), flags_(flags) {} - Handle<HeapObject> constant() const { return constant_; } + HeapObjectRef constant(JSHeapBroker* broker) const { + return constant_.AsRef(broker); + } FeedbackSource const& feedback() const { return feedback_; } int length() const { return length_; } int flags() const { return flags_; } private: - Handle<HeapObject> const constant_; + const HeapObjectTinyRef constant_; FeedbackSource const feedback_; int const length_; int const flags_; -}; -bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&); -bool operator!=(CreateLiteralParameters const&, CreateLiteralParameters const&); + friend bool operator==(CreateLiteralParameters const&, + CreateLiteralParameters const&); + friend bool operator!=(CreateLiteralParameters const&, + CreateLiteralParameters const&); -size_t hash_value(CreateLiteralParameters const&); + friend size_t hash_value(CreateLiteralParameters const&); -std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&); + friend std::ostream& operator<<(std::ostream&, + CreateLiteralParameters const&); +}; const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op); @@ -857,7 +884,16 @@ int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT; int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT; int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT; -Handle<ScopeInfo> ScopeInfoOf(const Operator* op) V8_WARN_UNUSED_RESULT; +ScopeInfoRef ScopeInfoOf(JSHeapBroker* broker, + const Operator* op) V8_WARN_UNUSED_RESULT; + +bool operator==(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&); +bool operator!=(ScopeInfoTinyRef const&, ScopeInfoTinyRef const&); + +size_t hash_value(ScopeInfoTinyRef const&); + +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, + ScopeInfoTinyRef const&); // Interface for building JavaScript-level operators, e.g. directly from the // AST. Most operators have no parameters, thus can be globally shared for all @@ -904,13 +940,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* Create(); const Operator* CreateArguments(CreateArgumentsType type); - const Operator* CreateArray(size_t arity, MaybeHandle<AllocationSite> site); + const Operator* CreateArray(size_t arity, + base::Optional<AllocationSiteRef> site); const Operator* CreateArrayIterator(IterationKind); const Operator* CreateAsyncFunctionObject(int register_count); const Operator* CreateCollectionIterator(CollectionKind, IterationKind); - const Operator* CreateBoundFunction(size_t arity, Handle<Map> map); + const Operator* CreateBoundFunction(size_t arity, const MapRef& map); const Operator* CreateClosure( - Handle<SharedFunctionInfo> shared_info, Handle<CodeT> code, + const SharedFunctionInfoRef& shared_info, const CodeTRef& code, AllocationType allocation = AllocationType::kYoung); const Operator* CreateIterResultObject(); const Operator* CreateStringIterator(); @@ -919,25 +956,25 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* CreatePromise(); const Operator* CreateTypedArray(); const Operator* CreateLiteralArray( - Handle<ArrayBoilerplateDescription> constant, + const ArrayBoilerplateDescriptionRef& constant, FeedbackSource const& feedback, int literal_flags, int number_of_elements); const Operator* CreateEmptyLiteralArray(FeedbackSource const& feedback); const Operator* CreateArrayFromIterable(); const Operator* CreateEmptyLiteralObject(); const Operator* CreateLiteralObject( - Handle<ObjectBoilerplateDescription> constant, + const ObjectBoilerplateDescriptionRef& constant, FeedbackSource const& feedback, int literal_flags, int number_of_properties); const Operator* CloneObject(FeedbackSource const& feedback, int literal_flags); - const Operator* CreateLiteralRegExp(Handle<String> constant_pattern, + const Operator* CreateLiteralRegExp(const StringRef& constant_pattern, FeedbackSource const& feedback, int literal_flags); const Operator* GetTemplateObject( - Handle<TemplateObjectDescription> description, - Handle<SharedFunctionInfo> shared, FeedbackSource const& feedback); + const TemplateObjectDescriptionRef& description, + const SharedFunctionInfoRef& shared, FeedbackSource const& feedback); const Operator* CallForwardVarargs(size_t arity, uint32_t start_index); const Operator* Call( @@ -978,16 +1015,17 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final FeedbackSource const& feedback = FeedbackSource()); const Operator* LoadProperty(FeedbackSource const& feedback); - const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback); - const Operator* LoadNamedFromSuper(Handle<Name> name, + const Operator* LoadNamed(const NameRef& name, + FeedbackSource const& feedback); + const Operator* LoadNamedFromSuper(const NameRef& name, FeedbackSource const& feedback); const Operator* StoreProperty(LanguageMode language_mode, FeedbackSource const& feedback); - const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name, + const Operator* StoreNamed(LanguageMode language_mode, const NameRef& name, FeedbackSource const& feedback); - const Operator* StoreNamedOwn(Handle<Name> name, + const Operator* StoreNamedOwn(const NameRef& name, FeedbackSource const& feedback); const Operator* StoreDataPropertyInLiteral(const FeedbackSource& feedback); const Operator* StoreInArrayLiteral(const FeedbackSource& feedback); @@ -1000,11 +1038,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* CreateGeneratorObject(); - const Operator* LoadGlobal(const Handle<Name>& name, + const Operator* LoadGlobal(const NameRef& name, const FeedbackSource& feedback, TypeofMode typeof_mode = TypeofMode::kNotInside); - const Operator* StoreGlobal(LanguageMode language_mode, - const Handle<Name>& name, + const Operator* StoreGlobal(LanguageMode language_mode, const NameRef& name, const FeedbackSource& feedback); const Operator* HasContextExtension(size_t depth); @@ -1051,11 +1088,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* RejectPromise(); const Operator* ResolvePromise(); - const Operator* CreateFunctionContext(Handle<ScopeInfo> scope_info, + const Operator* CreateFunctionContext(const ScopeInfoRef& scope_info, int slot_count, ScopeType scope_type); - const Operator* CreateCatchContext(const Handle<ScopeInfo>& scope_info); - const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info); - const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info); + const Operator* CreateCatchContext(const ScopeInfoRef& scope_info); + const Operator* CreateWithContext(const ScopeInfoRef& scope_info); + const Operator* CreateBlockContext(const ScopeInfoRef& scpope_info); const Operator* ObjectIsArray(); const Operator* ParseInt(); diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index 833afaf31d..e986ef1baf 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -10,6 +10,7 @@ #include "src/codegen/interface-descriptors-inl.h" #include "src/compiler/access-builder.h" #include "src/compiler/allocation-builder.h" +#include "src/compiler/compilation-dependencies.h" #include "src/compiler/graph-assembler.h" #include "src/compiler/js-graph.h" #include "src/compiler/js-heap-broker.h" @@ -595,7 +596,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) { PropertyCellRef string_length_protector = MakeRef(broker(), factory()->string_length_protector()); - string_length_protector.SerializeAsProtector(); + string_length_protector.CacheAsProtector(); if (string_length_protector.value().AsSmi() == Protectors::kProtectorValid) { @@ -1172,7 +1173,7 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) { JSLoadNamedNode n(node); Node* receiver = n.object(); Type receiver_type = NodeProperties::GetType(receiver); - NameRef name = MakeRef(broker(), NamedAccessOf(node->op()).name()); + NameRef name = NamedAccessOf(node->op()).name(broker()); NameRef length_str = MakeRef(broker(), factory()->length_string()); // Optimize "length" property of strings. if (name.equals(length_str) && receiver_type.Is(Type::String())) { @@ -1622,11 +1623,6 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) { // Only optimize [[Construct]] here if {function} is a Constructor. if (!function.map().is_constructor()) return NoChange(); - if (!function.serialized()) { - TRACE_BROKER_MISSING(broker(), "data for function " << function); - return NoChange(); - } - // Patch {node} to an indirect call via the {function}s construct stub. bool use_builtin_construct_stub = function.shared().construct_as_builtin(); CodeRef code = MakeRef( @@ -1704,22 +1700,14 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) { if (target_type.IsHeapConstant() && target_type.AsHeapConstant()->Ref().IsJSFunction()) { function = target_type.AsHeapConstant()->Ref().AsJSFunction(); - - if (!function->serialized()) { - TRACE_BROKER_MISSING(broker(), "data for function " << *function); - return NoChange(); - } shared = function->shared(); } else if (target->opcode() == IrOpcode::kJSCreateClosure) { CreateClosureParameters const& ccp = JSCreateClosureNode{target}.Parameters(); - shared = MakeRef(broker(), ccp.shared_info()); + shared = ccp.shared_info(broker()); } else if (target->opcode() == IrOpcode::kCheckClosure) { FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op())); - base::Optional<FeedbackVectorRef> feedback_vector = cell.value(); - if (feedback_vector.has_value()) { - shared = feedback_vector->shared_function_info(); - } + shared = cell.shared_function_info(); } if (shared.has_value()) { @@ -2086,7 +2074,7 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) { Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) { DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode()); ExternalReference const ref = - ExternalReference::address_of_pending_message_obj(isolate()); + ExternalReference::address_of_pending_message(isolate()); node->ReplaceInput(0, jsgraph()->ExternalConstant(ref)); NodeProperties::ChangeOp(node, simplified()->LoadMessage()); return Changed(node); @@ -2095,7 +2083,7 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) { Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) { DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode()); ExternalReference const ref = - ExternalReference::address_of_pending_message_obj(isolate()); + ExternalReference::address_of_pending_message(isolate()); Node* value = NodeProperties::GetValueInput(node, 0); node->ReplaceInput(0, jsgraph()->ExternalConstant(ref)); node->ReplaceInput(1, value); @@ -2356,16 +2344,7 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) { } Reduction JSTypedLowering::Reduce(Node* node) { - const IrOpcode::Value opcode = node->opcode(); - if (broker()->generate_full_feedback_collection() && - IrOpcode::IsFeedbackCollectingOpcode(opcode)) { - // In NCI code, it is not valid to reduce feedback-collecting JS opcodes - // into non-feedback-collecting lower-level opcodes; missed feedback would - // result in soft deopts. - return NoChange(); - } - - switch (opcode) { + switch (node->opcode()) { case IrOpcode::kJSEqual: return ReduceJSEqual(node); case IrOpcode::kJSStrictEqual: @@ -2469,18 +2448,18 @@ Reduction JSTypedLowering::Reduce(Node* node) { Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); } - Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); } +CompilationDependencies* JSTypedLowering::dependencies() const { + return broker()->dependencies(); +} Isolate* JSTypedLowering::isolate() const { return jsgraph()->isolate(); } - JSOperatorBuilder* JSTypedLowering::javascript() const { return jsgraph()->javascript(); } - CommonOperatorBuilder* JSTypedLowering::common() const { return jsgraph()->common(); } diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h index 9fa6e01a93..4c0031df3d 100644 --- a/deps/v8/src/compiler/js-typed-lowering.h +++ b/deps/v8/src/compiler/js-typed-lowering.h @@ -20,6 +20,7 @@ namespace compiler { // Forward declarations. class CommonOperatorBuilder; +class CompilationDependencies; class JSGraph; class JSOperatorBuilder; class SimplifiedOperatorBuilder; @@ -93,6 +94,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } JSHeapBroker* broker() const { return broker_; } + CompilationDependencies* dependencies() const; Isolate* isolate() const; JSOperatorBuilder* javascript() const; CommonOperatorBuilder* common() const; diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc index ee56a665db..e184534ed7 100644 --- a/deps/v8/src/compiler/loop-analysis.cc +++ b/deps/v8/src/compiler/loop-analysis.cc @@ -543,8 +543,9 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter, return loop_tree; } +#if V8_ENABLE_WEBASSEMBLY // static -ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader( +ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader( Node* loop_header, Zone* zone, size_t max_size) { auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone); std::vector<Node*> queue; @@ -580,6 +581,12 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader( loop_header); // All uses are outside the loop, do nothing. break; + case IrOpcode::kCall: + case IrOpcode::kTailCall: + case IrOpcode::kJSWasmCall: + case IrOpcode::kJSCall: + // Call nodes are considered to have unbounded size, i.e. >max_size. + return nullptr; default: for (Node* use : node->uses()) { if (visited->count(use) == 0) queue.push_back(use); @@ -614,6 +621,7 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader( return visited; } +#endif // V8_ENABLE_WEBASSEMBLY bool LoopFinder::HasMarkedExits(LoopTree* loop_tree, const LoopTree::Loop* loop) { diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h index 49db12fef3..e928e5a779 100644 --- a/deps/v8/src/compiler/loop-analysis.h +++ b/deps/v8/src/compiler/loop-analysis.h @@ -179,16 +179,19 @@ class V8_EXPORT_PRIVATE LoopFinder { static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop); - // Find all nodes of a loop given its header node. Will exit early once the - // current loop size exceed {max_size}. This is a very restricted version of - // BuildLoopTree. - // Assumptions: +#if V8_ENABLE_WEBASSEMBLY + // Find all nodes of a loop given headed by {loop_header}. Returns {nullptr} + // if the loop size in Nodes exceeds {max_size}. In that context, function + // calls are considered to have unbounded size, so if the loop contains a + // function call, {nullptr} is always returned. + // This is a very restricted version of BuildLoopTree and makes the following + // assumptions: // 1) All loop exits of the loop are marked with LoopExit, LoopExitEffect, // and LoopExitValue nodes. // 2) There are no nested loops within this loop. - static ZoneUnorderedSet<Node*>* FindUnnestedLoopFromHeader(Node* loop_header, - Zone* zone, - size_t max_size); + static ZoneUnorderedSet<Node*>* FindSmallUnnestedLoopFromHeader( + Node* loop_header, Zone* zone, size_t max_size); +#endif }; // Copies a range of nodes any number of times. diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index 3d61d70b02..411c6d4cb3 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -147,8 +147,8 @@ size_t hash_value(StoreLaneParameters params) { } std::ostream& operator<<(std::ostream& os, StoreLaneParameters params) { - return os << "(" << params.kind << " " << params.rep << " " << params.laneidx - << ")"; + return os << "(" << params.kind << " " << params.rep << " " + << static_cast<unsigned int>(params.laneidx) << ")"; } StoreLaneParameters const& StoreLaneParametersOf(Operator const* op) { diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc index b6c96163c6..f6f87cd62e 100644 --- a/deps/v8/src/compiler/map-inference.cc +++ b/deps/v8/src/compiler/map-inference.cc @@ -9,15 +9,14 @@ #include "src/compiler/js-graph.h" #include "src/compiler/simplified-operator.h" #include "src/objects/map-inl.h" -#include "src/zone/zone-handle-set.h" namespace v8 { namespace internal { namespace compiler { -MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect) - : broker_(broker), object_(object) { - ZoneHandleSet<Map> maps; +MapInference::MapInference(JSHeapBroker* broker, Node* object, Effect effect) + : broker_(broker), object_(object), maps_(broker->zone()) { + ZoneRefUnorderedSet<MapRef> maps(broker->zone()); auto result = NodeProperties::InferMapsUnsafe(broker_, object_, effect, &maps); maps_.insert(maps_.end(), maps.begin(), maps.end()); @@ -67,9 +66,8 @@ bool MapInference::AllOfInstanceTypesUnsafe( std::function<bool(InstanceType)> f) const { CHECK(HaveMaps()); - auto instance_type = [this, f](Handle<Map> map) { - MapRef map_ref = MakeRef(broker_, map); - return f(map_ref.instance_type()); + auto instance_type = [f](const MapRef& map) { + return f(map.instance_type()); }; return std::all_of(maps_.begin(), maps_.end(), instance_type); } @@ -78,22 +76,21 @@ bool MapInference::AnyOfInstanceTypesUnsafe( std::function<bool(InstanceType)> f) const { CHECK(HaveMaps()); - auto instance_type = [this, f](Handle<Map> map) { - MapRef map_ref = MakeRef(broker_, map); - return f(map_ref.instance_type()); + auto instance_type = [f](const MapRef& map) { + return f(map.instance_type()); }; return std::any_of(maps_.begin(), maps_.end(), instance_type); } -MapHandles const& MapInference::GetMaps() { +ZoneVector<MapRef> const& MapInference::GetMaps() { SetNeedGuardIfUnreliable(); return maps_; } -bool MapInference::Is(Handle<Map> expected_map) { +bool MapInference::Is(const MapRef& expected_map) { if (!HaveMaps()) return false; - const MapHandles& maps = GetMaps(); + const ZoneVector<MapRef>& maps = GetMaps(); if (maps.size() != 1) return false; return maps[0].equals(expected_map); } @@ -104,7 +101,9 @@ void MapInference::InsertMapChecks(JSGraph* jsgraph, Effect* effect, CHECK(HaveMaps()); CHECK(feedback.IsValid()); ZoneHandleSet<Map> maps; - for (Handle<Map> map : maps_) maps.insert(map, jsgraph->graph()->zone()); + for (const MapRef& map : maps_) { + maps.insert(map.object(), jsgraph->graph()->zone()); + } *effect = jsgraph->graph()->NewNode( jsgraph->simplified()->CheckMaps(CheckMapsFlag::kNone, maps, feedback), object_, *effect, control); @@ -133,14 +132,11 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies, const FeedbackSource& feedback) { if (Safe()) return true; - auto is_stable = [this](Handle<Map> map) { - MapRef map_ref = MakeRef(broker_, map); - return map_ref.is_stable(); - }; + auto is_stable = [](const MapRef& map) { return map.is_stable(); }; if (dependencies != nullptr && std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) { - for (Handle<Map> map : maps_) { - dependencies->DependOnStableMap(MakeRef(broker_, map)); + for (const MapRef& map : maps_) { + dependencies->DependOnStableMap(map); } SetGuarded(); return true; diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h index a1e2efbc22..e1392b6805 100644 --- a/deps/v8/src/compiler/map-inference.h +++ b/deps/v8/src/compiler/map-inference.h @@ -34,7 +34,7 @@ class Node; // reliable). class MapInference { public: - MapInference(JSHeapBroker* broker, Node* object, Node* effect); + MapInference(JSHeapBroker* broker, Node* object, Effect effect); // The destructor checks that the information has been made reliable (if // necessary) and force-crashes if not. @@ -52,10 +52,10 @@ class MapInference { // These queries require a guard. (Even instance types are generally not // reliable because of how the representation of a string can change.) - V8_WARN_UNUSED_RESULT MapHandles const& GetMaps(); + V8_WARN_UNUSED_RESULT ZoneVector<MapRef> const& GetMaps(); V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes( std::function<bool(InstanceType)> f); - V8_WARN_UNUSED_RESULT bool Is(Handle<Map> expected_map); + V8_WARN_UNUSED_RESULT bool Is(const MapRef& expected_map); // These methods provide a guard. // @@ -83,7 +83,7 @@ class MapInference { JSHeapBroker* const broker_; Node* const object_; - MapHandles maps_; + ZoneVector<MapRef> maps_; enum { kReliableOrGuarded, kUnreliableDontNeedGuard, diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc index ac113ddd70..9673a51844 100644 --- a/deps/v8/src/compiler/memory-lowering.cc +++ b/deps/v8/src/compiler/memory-lowering.cc @@ -15,6 +15,10 @@ #include "src/compiler/simplified-operator.h" #include "src/roots/roots-inl.h" +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-linkage.h" +#include "src/wasm/wasm-objects.h" +#endif namespace v8 { namespace internal { namespace compiler { @@ -100,6 +104,32 @@ Reduction MemoryLowering::Reduce(Node* node) { } } +void MemoryLowering::EnsureAllocateOperator() { + if (allocate_operator_.is_set()) return; + + auto descriptor = AllocateDescriptor{}; + StubCallMode mode = isolate_ != nullptr ? StubCallMode::kCallCodeObject + : StubCallMode::kCallBuiltinPointer; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph_zone(), descriptor, descriptor.GetStackParameterCount(), + CallDescriptor::kCanUseRoots, Operator::kNoThrow, mode); + allocate_operator_.set(common()->Call(call_descriptor)); +} + +#if V8_ENABLE_WEBASSEMBLY +Node* MemoryLowering::GetWasmInstanceNode() { + if (wasm_instance_node_.is_set()) return wasm_instance_node_.get(); + for (Node* use : graph()->start()->uses()) { + if (use->opcode() == IrOpcode::kParameter && + ParameterIndexOf(use->op()) == wasm::kWasmInstanceParameterIndex) { + wasm_instance_node_.set(use); + return use; + } + } + UNREACHABLE(); // The instance node must have been created before. +} +#endif // V8_ENABLE_WEBASSEMBLY + #define __ gasm()-> Reduction MemoryLowering::ReduceAllocateRaw( @@ -123,29 +153,82 @@ Reduction MemoryLowering::ReduceAllocateRaw( gasm()->InitializeEffectControl(effect, control); Node* allocate_builtin; - if (allocation_type == AllocationType::kYoung) { - if (allow_large_objects == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInYoungGenerationStubConstant(); + if (isolate_ != nullptr) { + if (allocation_type == AllocationType::kYoung) { + if (allow_large_objects == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInYoungGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); + } } else { - allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); + if (allow_large_objects == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInOldGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); + } } } else { - if (allow_large_objects == AllowLargeObjects::kTrue) { - allocate_builtin = __ AllocateInOldGenerationStubConstant(); + // This lowering is used by Wasm, where we compile isolate-independent + // code. Builtin calls simply encode the target builtin ID, which will + // be patched to the builtin's address later. +#if V8_ENABLE_WEBASSEMBLY + Builtin builtin; + if (allocation_type == AllocationType::kYoung) { + if (allow_large_objects == AllowLargeObjects::kTrue) { + builtin = Builtin::kAllocateInYoungGeneration; + } else { + builtin = Builtin::kAllocateRegularInYoungGeneration; + } } else { - allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); + if (allow_large_objects == AllowLargeObjects::kTrue) { + builtin = Builtin::kAllocateInOldGeneration; + } else { + builtin = Builtin::kAllocateRegularInOldGeneration; + } } + static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi"); + allocate_builtin = + graph()->NewNode(common()->NumberConstant(static_cast<int>(builtin))); +#else + UNREACHABLE(); +#endif } // Determine the top/limit addresses. - Node* top_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_top_address(isolate()) - : ExternalReference::old_space_allocation_top_address(isolate())); - Node* limit_address = __ ExternalConstant( - allocation_type == AllocationType::kYoung - ? ExternalReference::new_space_allocation_limit_address(isolate()) - : ExternalReference::old_space_allocation_limit_address(isolate())); + Node* top_address; + Node* limit_address; + if (isolate_ != nullptr) { + top_address = __ ExternalConstant( + allocation_type == AllocationType::kYoung + ? ExternalReference::new_space_allocation_top_address(isolate()) + : ExternalReference::old_space_allocation_top_address(isolate())); + limit_address = __ ExternalConstant( + allocation_type == AllocationType::kYoung + ? ExternalReference::new_space_allocation_limit_address(isolate()) + : ExternalReference::old_space_allocation_limit_address(isolate())); + } else { + // Wasm mode: producing isolate-independent code, loading the isolate + // address at runtime. +#if V8_ENABLE_WEBASSEMBLY + Node* instance_node = GetWasmInstanceNode(); + int top_address_offset = + allocation_type == AllocationType::kYoung + ? WasmInstanceObject::kNewAllocationTopAddressOffset + : WasmInstanceObject::kOldAllocationTopAddressOffset; + int limit_address_offset = + allocation_type == AllocationType::kYoung + ? WasmInstanceObject::kNewAllocationLimitAddressOffset + : WasmInstanceObject::kOldAllocationLimitAddressOffset; + top_address = + __ Load(MachineType::Pointer(), instance_node, + __ IntPtrConstant(top_address_offset - kHeapObjectTag)); + limit_address = + __ Load(MachineType::Pointer(), instance_node, + __ IntPtrConstant(limit_address_offset - kHeapObjectTag)); +#else + UNREACHABLE(); +#endif // V8_ENABLE_WEBASSEMBLY + } // Check if we can fold this allocation into a previous allocation represented // by the incoming {state}. @@ -216,13 +299,7 @@ Reduction MemoryLowering::ReduceAllocateRaw( __ Bind(&call_runtime); { - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph_zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } + EnsureAllocateOperator(); Node* vfalse = __ BitcastTaggedToWord( __ Call(allocate_operator_.get(), allocate_builtin, size)); vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); @@ -277,13 +354,7 @@ Reduction MemoryLowering::ReduceAllocateRaw( __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); __ Bind(&call_runtime); - if (!allocate_operator_.is_set()) { - auto descriptor = AllocateDescriptor{}; - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph_zone(), descriptor, descriptor.GetStackParameterCount(), - CallDescriptor::kCanUseRoots, Operator::kNoThrow); - allocate_operator_.set(common()->Call(call_descriptor)); - } + EnsureAllocateOperator(); __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); __ Bind(&done); diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h index 1ebbf40bc8..1fbe18abff 100644 --- a/deps/v8/src/compiler/memory-lowering.h +++ b/deps/v8/src/compiler/memory-lowering.h @@ -113,6 +113,8 @@ class MemoryLowering final : public Reducer { Reduction ReduceLoadMap(Node* encoded_pointer); Node* ComputeIndex(ElementAccess const& access, Node* node); bool NeedsPoisoning(LoadSensitivity load_sensitivity) const; + void EnsureAllocateOperator(); + Node* GetWasmInstanceNode(); Graph* graph() const { return graph_; } Isolate* isolate() const { return isolate_; } @@ -123,6 +125,7 @@ class MemoryLowering final : public Reducer { JSGraphAssembler* gasm() const { return graph_assembler_; } SetOncePointer<const Operator> allocate_operator_; + SetOncePointer<Node> wasm_instance_node_; Isolate* isolate_; Zone* zone_; Graph* graph_; diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h index 9e577eb183..c29f4dfe98 100644 --- a/deps/v8/src/compiler/node-aux-data.h +++ b/deps/v8/src/compiler/node-aux-data.h @@ -16,21 +16,26 @@ namespace compiler { class Node; template <class T> -T DefaultConstruct() { +T DefaultConstruct(Zone* zone) { return T(); } -template <class T, T def() = DefaultConstruct<T>> +template <class T> +T ZoneConstruct(Zone* zone) { + return T(zone); +} + +template <class T, T def(Zone*) = DefaultConstruct<T>> class NodeAuxData { public: - explicit NodeAuxData(Zone* zone) : aux_data_(zone) {} + explicit NodeAuxData(Zone* zone) : zone_(zone), aux_data_(zone) {} explicit NodeAuxData(size_t initial_size, Zone* zone) - : aux_data_(initial_size, zone) {} + : zone_(zone), aux_data_(initial_size, def(zone), zone) {} // Update entry. Returns true iff entry was changed. bool Set(Node* node, T const& data) { size_t const id = node->id(); - if (id >= aux_data_.size()) aux_data_.resize(id + 1, def()); + if (id >= aux_data_.size()) aux_data_.resize(id + 1, def(zone_)); if (aux_data_[id] != data) { aux_data_[id] = data; return true; @@ -40,7 +45,7 @@ class NodeAuxData { T Get(Node* node) const { size_t const id = node->id(); - return (id < aux_data_.size()) ? aux_data_[id] : def(); + return (id < aux_data_.size()) ? aux_data_[id] : def(zone_); } class const_iterator; @@ -50,10 +55,11 @@ class NodeAuxData { const_iterator end() const; private: + Zone* zone_; ZoneVector<T> aux_data_; }; -template <class T, T def()> +template <class T, T def(Zone*)> class NodeAuxData<T, def>::const_iterator { public: using iterator_category = std::forward_iterator_tag; @@ -87,13 +93,13 @@ class NodeAuxData<T, def>::const_iterator { size_t current_; }; -template <class T, T def()> +template <class T, T def(Zone*)> typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::begin() const { return typename NodeAuxData<T, def>::const_iterator(&aux_data_, 0); } -template <class T, T def()> +template <class T, T def(Zone*)> typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::end() const { return typename NodeAuxData<T, def>::const_iterator(&aux_data_, aux_data_.size()); diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h index c4b2ec3cf1..899c62d411 100644 --- a/deps/v8/src/compiler/node-origin-table.h +++ b/deps/v8/src/compiler/node-origin-table.h @@ -136,7 +136,10 @@ class V8_EXPORT_PRIVATE NodeOriginTable final NodeOrigin current_origin_; const char* current_phase_name_; - NodeAuxData<NodeOrigin, NodeOrigin::Unknown> table_; + static NodeOrigin UnknownNodeOrigin(Zone* zone) { + return NodeOrigin::Unknown(); + } + NodeAuxData<NodeOrigin, UnknownNodeOrigin> table_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index 26fbed0abb..8cc6bfee63 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -332,12 +332,9 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker, mnewtarget.Ref(broker).IsJSFunction()) { ObjectRef target = mtarget.Ref(broker); JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction(); - if (newtarget.map().has_prototype_slot() && newtarget.has_initial_map()) { - if (!newtarget.serialized()) { - TRACE_BROKER_MISSING(broker, "initial map on " << newtarget); - return base::nullopt; - } - MapRef initial_map = newtarget.initial_map(); + if (newtarget.map().has_prototype_slot() && + newtarget.has_initial_map(broker->dependencies())) { + MapRef initial_map = newtarget.initial_map(broker->dependencies()); if (initial_map.GetConstructor().equals(target)) { DCHECK(target.AsJSFunction().map().is_constructor()); DCHECK(newtarget.map().is_constructor()); @@ -348,10 +345,32 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker, return base::nullopt; } +namespace { + +// TODO(jgruber): Remove the intermediate ZoneHandleSet and then this function. +ZoneRefUnorderedSet<MapRef> ToRefSet(JSHeapBroker* broker, + const ZoneHandleSet<Map>& handles) { + ZoneRefUnorderedSet<MapRef> refs = + ZoneRefUnorderedSet<MapRef>(broker->zone()); + for (Handle<Map> handle : handles) { + refs.insert(MakeRefAssumeMemoryFence(broker, *handle)); + } + return refs; +} + +ZoneRefUnorderedSet<MapRef> RefSetOf(JSHeapBroker* broker, const MapRef& ref) { + ZoneRefUnorderedSet<MapRef> refs = + ZoneRefUnorderedSet<MapRef>(broker->zone()); + refs.insert(ref); + return refs; +} + +} // namespace + // static NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( - JSHeapBroker* broker, Node* receiver, Node* effect, - ZoneHandleSet<Map>* maps_return) { + JSHeapBroker* broker, Node* receiver, Effect effect, + ZoneRefUnorderedSet<MapRef>* maps_out) { HeapObjectMatcher m(receiver); if (m.HasResolvedValue()) { HeapObjectRef receiver = m.Ref(broker); @@ -367,7 +386,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( if (receiver.map().is_stable()) { // The {receiver_map} is only reliable when we install a stability // code dependency. - *maps_return = ZoneHandleSet<Map>(receiver.map().object()); + *maps_out = RefSetOf(broker, receiver.map()); return kUnreliableMaps; } } @@ -378,7 +397,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( case IrOpcode::kMapGuard: { Node* const object = GetValueInput(effect, 0); if (IsSame(receiver, object)) { - *maps_return = MapGuardMapsOf(effect->op()); + *maps_out = ToRefSet(broker, MapGuardMapsOf(effect->op())); return result; } break; @@ -386,7 +405,8 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( case IrOpcode::kCheckMaps: { Node* const object = GetValueInput(effect, 0); if (IsSame(receiver, object)) { - *maps_return = CheckMapsParametersOf(effect->op()).maps(); + *maps_out = + ToRefSet(broker, CheckMapsParametersOf(effect->op()).maps()); return result; } break; @@ -395,7 +415,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( if (IsSame(receiver, effect)) { base::Optional<MapRef> initial_map = GetJSCreateMap(broker, receiver); if (initial_map.has_value()) { - *maps_return = ZoneHandleSet<Map>(initial_map->object()); + *maps_out = RefSetOf(broker, initial_map.value()); return result; } // We reached the allocation of the {receiver}. @@ -406,10 +426,10 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( } case IrOpcode::kJSCreatePromise: { if (IsSame(receiver, effect)) { - *maps_return = ZoneHandleSet<Map>(broker->target_native_context() - .promise_function() - .initial_map() - .object()); + *maps_out = RefSetOf( + broker, + broker->target_native_context().promise_function().initial_map( + broker->dependencies())); return result; } break; @@ -424,7 +444,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe( Node* const value = GetValueInput(effect, 1); HeapObjectMatcher m(value); if (m.HasResolvedValue()) { - *maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object()); + *maps_out = RefSetOf(broker, m.Ref(broker).AsMap()); return result; } } @@ -503,7 +523,7 @@ bool NodeProperties::NoObservableSideEffectBetween(Node* effect, // static bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver, - Node* effect) { + Effect effect) { switch (receiver->opcode()) { #define CASE(Opcode) case IrOpcode::k##Opcode: JS_CONSTRUCT_OP_LIST(CASE) @@ -528,7 +548,7 @@ bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver, // static bool NodeProperties::CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver, - Node* effect) { + Effect effect) { if (CanBePrimitive(broker, receiver, effect)) { switch (receiver->opcode()) { case IrOpcode::kCheckInternalizedString: diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h index 50f3a17136..2d4c16370b 100644 --- a/deps/v8/src/compiler/node-properties.h +++ b/deps/v8/src/compiler/node-properties.h @@ -6,6 +6,7 @@ #define V8_COMPILER_NODE_PROPERTIES_H_ #include "src/common/globals.h" +#include "src/compiler/heap-refs.h" #include "src/compiler/node.h" #include "src/compiler/operator-properties.h" #include "src/compiler/types.h" @@ -219,9 +220,9 @@ class V8_EXPORT_PRIVATE NodeProperties { kUnreliableMaps // Maps might have changed (side-effect). }; // DO NOT USE InferMapsUnsafe IN NEW CODE. Use MapInference instead. - static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* object, - Node* effect, - ZoneHandleSet<Map>* maps); + static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* receiver, + Effect effect, + ZoneRefUnorderedSet<MapRef>* maps_out); // Return the initial map of the new-target if the allocation can be inlined. static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker, @@ -236,12 +237,12 @@ class V8_EXPORT_PRIVATE NodeProperties { // definitely a JavaScript object); might walk up the {effect} chain to // find map checks on {receiver}. static bool CanBePrimitive(JSHeapBroker* broker, Node* receiver, - Node* effect); + Effect effect); // Returns true if the {receiver} can be null or undefined. Might walk // up the {effect} chain to find map checks for {receiver}. static bool CanBeNullOrUndefined(JSHeapBroker* broker, Node* receiver, - Node* effect); + Effect effect); // --------------------------------------------------------------------------- // Context. diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h index 84e905b812..1373ff5f25 100644 --- a/deps/v8/src/compiler/persistent-map.h +++ b/deps/v8/src/compiler/persistent-map.h @@ -387,9 +387,11 @@ void PersistentMap<Key, Value, Hasher>::Set(Key key, Value value) { if (old->more) { *more = *old->more; } else { - (*more)[old->key_value.key()] = old->key_value.value(); + more->erase(old->key_value.key()); + more->emplace(old->key_value.key(), old->key_value.value()); } - (*more)[key] = value; + more->erase(key); + more->emplace(key, value); } size_t size = sizeof(FocusedTree) + std::max(0, length - 1) * sizeof(const FocusedTree*); diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index 676f338cf4..e802cd7268 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -69,7 +69,6 @@ #include "src/compiler/schedule.h" #include "src/compiler/scheduler.h" #include "src/compiler/select-lowering.h" -#include "src/compiler/serializer-for-background-compilation.h" #include "src/compiler/simplified-lowering.h" #include "src/compiler/simplified-operator-reducer.h" #include "src/compiler/simplified-operator.h" @@ -147,9 +146,6 @@ class PipelineData { may_have_unverifiable_graph_(false), zone_stats_(zone_stats), pipeline_statistics_(pipeline_statistics), - roots_relative_addressing_enabled_( - !isolate->serializer_enabled() && - !isolate->IsGeneratingEmbeddedBuiltins()), graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone), graph_zone_(graph_zone_scope_.zone()), instruction_zone_scope_(zone_stats_, kInstructionZoneName), @@ -551,7 +547,7 @@ class PipelineData { code_generator_ = new CodeGenerator( codegen_zone(), frame(), linkage, sequence(), info(), isolate(), osr_helper_, start_source_position_, jump_optimization_info_, - info()->GetPoisoningMitigationLevel(), assembler_options_, + info()->GetPoisoningMitigationLevel(), assembler_options(), info_->builtin(), max_unoptimized_frame_height(), max_pushed_argument_count(), FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr); @@ -571,10 +567,6 @@ class PipelineData { const char* debug_name() const { return debug_name_.get(); } - bool roots_relative_addressing_enabled() { - return roots_relative_addressing_enabled_; - } - const ProfileDataFromFile* profile_data() const { return profile_data_; } void set_profile_data(const ProfileDataFromFile* profile_data) { profile_data_ = profile_data; @@ -615,7 +607,6 @@ class PipelineData { CodeGenerator* code_generator_ = nullptr; Typer* typer_ = nullptr; Typer::Flags typer_flags_ = Typer::kNoFlags; - bool roots_relative_addressing_enabled_ = false; // All objects in the following group of fields are allocated in graph_zone_. // They are all set to nullptr when the graph_zone_ is destroyed. @@ -683,8 +674,8 @@ class PipelineImpl final { template <typename Phase, typename... Args> void Run(Args&&... args); - // Step A.1. Serialize the data needed for the compilation front-end. - void Serialize(); + // Step A.1. Initialize the heap broker. + void InitializeHeapBroker(); // Step A.2. Run the graph creation and initial optimization passes. bool CreateGraph(); @@ -1212,10 +1203,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( if (compilation_info()->is_osr()) data_.InitializeOsrHelper(); - // Serialize() and CreateGraph() may already use IsPendingAllocation. + // InitializeHeapBroker() and CreateGraph() may already use + // IsPendingAllocation. isolate->heap()->PublishPendingAllocations(); - pipeline_.Serialize(); + pipeline_.InitializeHeapBroker(); if (!data_.broker()->is_concurrent_inlining()) { if (!pipeline_.CreateGraph()) { @@ -1354,10 +1346,10 @@ struct GraphBuilderPhase { CallFrequency frequency(1.0f); BuildGraphFromBytecode( data->broker(), temp_zone, closure.shared(), - closure.raw_feedback_cell(), data->info()->osr_offset(), - data->jsgraph(), frequency, data->source_positions(), - SourcePosition::kNotInlined, data->info()->code_kind(), flags, - &data->info()->tick_counter(), + closure.raw_feedback_cell(data->dependencies()), + data->info()->osr_offset(), data->jsgraph(), frequency, + data->source_positions(), SourcePosition::kNotInlined, + data->info()->code_kind(), flags, &data->info()->tick_counter(), ObserveNodeInfo{data->observe_node_manager(), data->info()->node_observer()}); } @@ -1385,8 +1377,7 @@ struct InliningPhase { call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls; } JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(), - temp_zone, call_reducer_flags, - data->dependencies()); + temp_zone, call_reducer_flags); JSContextSpecialization context_specialization( &graph_reducer, data->jsgraph(), data->broker(), data->specialization_context(), @@ -1548,42 +1539,6 @@ struct CopyMetadataForConcurrentCompilePhase { } }; -struct SerializationPhase { - DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Serialization) - - void Run(PipelineData* data, Zone* temp_zone) { - SerializerForBackgroundCompilationFlags flags; - if (data->info()->bailout_on_uninitialized()) { - flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized; - } - if (data->info()->source_positions()) { - flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions; - } - if (data->info()->analyze_environment_liveness()) { - flags |= - SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness; - } - if (data->info()->inlining()) { - flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining; - } - RunSerializerForBackgroundCompilation( - data->zone_stats(), data->broker(), data->dependencies(), - data->info()->closure(), flags, data->info()->osr_offset()); - if (data->specialization_context().IsJust()) { - MakeRef(data->broker(), - data->specialization_context().FromJust().context); - } - if (FLAG_turbo_concurrent_get_property_access_info) { - data->broker()->ClearCachedPropertyAccessInfos(); - data->dependencies()->ClearForConcurrentGetPropertyAccessInfo(); - } - if (FLAG_stress_concurrent_inlining) { - // Force re-serialization from the background thread. - data->broker()->ClearReconstructibleData(); - } - } -}; - struct TypedLoweringPhase { DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering) @@ -1717,11 +1672,12 @@ struct WasmLoopUnrollingPhase { std::vector<compiler::WasmLoopInfo>* loop_infos) { for (WasmLoopInfo& loop_info : *loop_infos) { if (loop_info.is_innermost) { - ZoneUnorderedSet<Node*>* loop = LoopFinder::FindUnnestedLoopFromHeader( - loop_info.header, temp_zone, - // Only discover the loop until its size is the maximum unrolled - // size for its depth. - maximum_unrollable_size(loop_info.nesting_depth)); + ZoneUnorderedSet<Node*>* loop = + LoopFinder::FindSmallUnnestedLoopFromHeader( + loop_info.header, temp_zone, + // Only discover the loop until its size is the maximum unrolled + // size for its depth. + maximum_unrollable_size(loop_info.nesting_depth)); UnrollLoop(loop_info.header, loop, loop_info.nesting_depth, data->graph(), data->common(), temp_zone, data->source_positions(), data->node_origins()); @@ -2246,7 +2202,7 @@ struct InstructionSelectionPhase { FLAG_turbo_instruction_scheduling ? InstructionSelector::kEnableScheduling : InstructionSelector::kDisableScheduling, - data->roots_relative_addressing_enabled() + data->assembler_options().enable_root_relative_access ? InstructionSelector::kEnableRootsRelativeAddressing : InstructionSelector::kDisableRootsRelativeAddressing, data->info()->GetPoisoningMitigationLevel(), @@ -2666,8 +2622,8 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) { } } -void PipelineImpl::Serialize() { - PipelineData* data = this->data_; +void PipelineImpl::InitializeHeapBroker() { + PipelineData* data = data_; data->BeginPhaseKind("V8.TFBrokerInitAndSerialization"); @@ -2691,7 +2647,6 @@ void PipelineImpl::Serialize() { data->broker()->SetTargetNativeContextRef(data->native_context()); if (data->broker()->is_concurrent_inlining()) { Run<HeapBrokerInitializationPhase>(); - Run<SerializationPhase>(); data->broker()->StopSerializing(); } data->EndPhaseKind(); @@ -3362,7 +3317,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( CompilationHandleScope compilation_scope(isolate, info); CanonicalHandleScope canonical(isolate, info); info->ReopenHandlesInNewHandleScope(isolate); - pipeline.Serialize(); + pipeline.InitializeHeapBroker(); // Emulating the proper pipeline, we call CreateGraph on different places // (i.e before or after creating a LocalIsolateScope) depending on // is_concurrent_inlining. diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h index 78163a23a7..832fc441da 100644 --- a/deps/v8/src/compiler/processed-feedback.h +++ b/deps/v8/src/compiler/processed-feedback.h @@ -153,7 +153,7 @@ class ElementAccessFeedback : public ProcessedFeedback { // [e0, e1] [e0, e1] // ElementAccessFeedback const& Refine( - ZoneVector<Handle<Map>> const& inferred_maps, Zone* zone) const; + JSHeapBroker* broker, ZoneVector<MapRef> const& inferred_maps) const; private: KeyedAccessMode const keyed_mode_; @@ -162,15 +162,15 @@ class ElementAccessFeedback : public ProcessedFeedback { class NamedAccessFeedback : public ProcessedFeedback { public: - NamedAccessFeedback(NameRef const& name, ZoneVector<Handle<Map>> const& maps, + NamedAccessFeedback(NameRef const& name, ZoneVector<MapRef> const& maps, FeedbackSlotKind slot_kind); NameRef const& name() const { return name_; } - ZoneVector<Handle<Map>> const& maps() const { return maps_; } + ZoneVector<MapRef> const& maps() const { return maps_; } private: NameRef const name_; - ZoneVector<Handle<Map>> const maps_; + ZoneVector<MapRef> const maps_; }; class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback { @@ -178,19 +178,19 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback { MinimorphicLoadPropertyAccessFeedback(NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler, - ZoneVector<Handle<Map>> const& maps, + ZoneVector<MapRef> const& maps, bool has_migration_target_maps); NameRef const& name() const { return name_; } bool is_monomorphic() const { return maps_.size() == 1; } Handle<Object> handler() const { return handler_; } - ZoneVector<Handle<Map>> const& maps() const { return maps_; } + ZoneVector<MapRef> const& maps() const { return maps_; } bool has_migration_target_maps() const { return has_migration_target_maps_; } private: NameRef const name_; Handle<Object> const handler_; - ZoneVector<Handle<Map>> const maps_; + ZoneVector<MapRef> const maps_; bool const has_migration_target_maps_; }; diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index b1ad17a1c4..a64521d6f6 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -34,31 +34,28 @@ SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const { return jsgraph()->simplified(); } -bool HasOnlyStringMaps(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps) { - for (auto map : maps) { - MapRef map_ref = MakeRef(broker, map); - if (!map_ref.IsStringMap()) return false; +bool HasOnlyStringMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) { + for (MapRef map : maps) { + if (!map.IsStringMap()) return false; } return true; } namespace { -bool HasOnlyNumberMaps(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps) { - for (auto map : maps) { - MapRef map_ref = MakeRef(broker, map); - if (map_ref.instance_type() != HEAP_NUMBER_TYPE) return false; +bool HasOnlyNumberMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps) { + for (MapRef map : maps) { + if (map.instance_type() != HEAP_NUMBER_TYPE) return false; } return true; } } // namespace -bool PropertyAccessBuilder::TryBuildStringCheck( - JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver, - Node** effect, Node* control) { +bool PropertyAccessBuilder::TryBuildStringCheck(JSHeapBroker* broker, + ZoneVector<MapRef> const& maps, + Node** receiver, Effect* effect, + Control control) { if (HasOnlyStringMaps(broker, maps)) { // Monormorphic string access (ignoring the fact that there are multiple // String maps). @@ -70,9 +67,10 @@ bool PropertyAccessBuilder::TryBuildStringCheck( return false; } -bool PropertyAccessBuilder::TryBuildNumberCheck( - JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver, - Node** effect, Node* control) { +bool PropertyAccessBuilder::TryBuildNumberCheck(JSHeapBroker* broker, + ZoneVector<MapRef> const& maps, + Node** receiver, Effect* effect, + Control control) { if (HasOnlyNumberMaps(broker, maps)) { // Monomorphic number access (we also deal with Smis here). *receiver = *effect = @@ -83,15 +81,15 @@ bool PropertyAccessBuilder::TryBuildNumberCheck( return false; } -void PropertyAccessBuilder::BuildCheckMaps( - Node* object, Node** effect, Node* control, - ZoneVector<Handle<Map>> const& maps) { +void PropertyAccessBuilder::BuildCheckMaps(Node* object, Effect* effect, + Control control, + ZoneVector<MapRef> const& maps) { HeapObjectMatcher m(object); if (m.HasResolvedValue()) { MapRef object_map = m.Ref(broker()).map(); if (object_map.is_stable()) { - for (Handle<Map> map : maps) { - if (MakeRef(broker(), map).equals(object_map)) { + for (MapRef map : maps) { + if (map.equals(object_map)) { dependencies()->DependOnStableMap(object_map); return; } @@ -100,10 +98,9 @@ void PropertyAccessBuilder::BuildCheckMaps( } ZoneHandleSet<Map> map_set; CheckMapsFlags flags = CheckMapsFlag::kNone; - for (Handle<Map> map : maps) { - MapRef object_map = MakeRef(broker(), map); - map_set.insert(object_map.object(), graph()->zone()); - if (object_map.is_migration_target()) { + for (MapRef map : maps) { + map_set.insert(map.object(), graph()->zone()); + if (map.is_migration_target()) { flags |= CheckMapsFlag::kTryMigrateInstance; } } @@ -127,9 +124,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Effect* effect, Node* PropertyAccessBuilder::ResolveHolder( PropertyAccessInfo const& access_info, Node* lookup_start_object) { - Handle<JSObject> holder; - if (access_info.holder().ToHandle(&holder)) { - return jsgraph()->Constant(MakeRef(broker(), holder)); + base::Optional<JSObjectRef> holder = access_info.holder(); + if (holder.has_value()) { + return jsgraph()->Constant(holder.value()); } return lookup_start_object; } @@ -155,29 +152,27 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant( DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL); DCHECK(access_info.IsDictionaryProtoDataConstant()); - JSObjectRef holder = - MakeRef(broker(), access_info.holder().ToHandleChecked()); InternalIndex index = access_info.dictionary_index(); base::Optional<ObjectRef> value = - holder.GetOwnDictionaryProperty(index, dependencies()); + access_info.holder()->GetOwnDictionaryProperty(index, dependencies()); if (!value) return {}; - for (Handle<Map> map : access_info.lookup_start_object_maps()) { + for (MapRef map : access_info.lookup_start_object_maps()) { + Handle<Map> map_handle = map.object(); // Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo // must have different lookup start map. - if (!map->IsJSReceiverMap()) { + if (!map_handle->IsJSReceiverMap()) { // Perform the implicit ToObject for primitives here. // Implemented according to ES6 section 7.3.2 GetV (V, P). JSFunction constructor = Map::GetConstructorFunction( - *map, *broker()->target_native_context().object()) + *map_handle, *broker()->target_native_context().object()) .value(); - map = MakeRef(broker(), constructor.initial_map()).object(); - DCHECK(map->IsJSObjectMap()); + map = MakeRef(broker(), constructor.initial_map()); + DCHECK(map.object()->IsJSObjectMap()); } dependencies()->DependOnConstantInDictionaryPrototypeChain( - MakeRef(broker(), map), MakeRef(broker(), access_info.name()), - value.value(), PropertyKind::kData); + map, access_info.name(), value.value(), PropertyKind::kData); } return jsgraph()->Constant(value.value()); @@ -189,9 +184,10 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField( if (!access_info.IsFastDataConstant()) return nullptr; // First, determine if we have a constant holder to load from. - Handle<JSObject> holder; + base::Optional<JSObjectRef> holder = access_info.holder(); + // If {access_info} has a holder, just use it. - if (!access_info.holder().ToHandle(&holder)) { + if (!holder.has_value()) { // Otherwise, try to match the {lookup_start_object} as a constant. HeapObjectMatcher m(lookup_start_object); if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSObject()) return nullptr; @@ -199,26 +195,22 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField( // Let us make sure the actual map of the constant lookup_start_object is // among the maps in {access_info}. MapRef lookup_start_object_map = m.Ref(broker()).map(); - if (std::find_if( - access_info.lookup_start_object_maps().begin(), - access_info.lookup_start_object_maps().end(), [&](Handle<Map> map) { - return MakeRef(broker(), map).equals(lookup_start_object_map); - }) == access_info.lookup_start_object_maps().end()) { + if (std::find_if(access_info.lookup_start_object_maps().begin(), + access_info.lookup_start_object_maps().end(), + [&](MapRef map) { + return map.equals(lookup_start_object_map); + }) == access_info.lookup_start_object_maps().end()) { // The map of the lookup_start_object is not in the feedback, let us bail // out. return nullptr; } - holder = m.Ref(broker()).AsJSObject().object(); + holder = m.Ref(broker()).AsJSObject(); } - JSObjectRef holder_ref = MakeRef(broker(), holder); - base::Optional<ObjectRef> value = holder_ref.GetOwnFastDataProperty( - access_info.field_representation(), access_info.field_index(), - dependencies()); - if (!value.has_value()) { - return nullptr; - } - return jsgraph()->Constant(*value); + base::Optional<ObjectRef> value = + holder->GetOwnFastDataProperty(access_info.field_representation(), + access_info.field_index(), dependencies()); + return value.has_value() ? jsgraph()->Constant(*value) : nullptr; } Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name, @@ -333,12 +325,11 @@ Node* PropertyAccessBuilder::BuildLoadDataField( field_representation == MachineRepresentation::kCompressedPointer) { // Remember the map of the field value, if its map is stable. This is // used by the LoadElimination to eliminate map checks on the result. - Handle<Map> field_map; - if (access_info.field_map().ToHandle(&field_map)) { - MapRef field_map_ref = MakeRef(broker(), field_map); - if (field_map_ref.is_stable()) { - dependencies()->DependOnStableMap(field_map_ref); - field_access.map = field_map; + base::Optional<MapRef> field_map = access_info.field_map(); + if (field_map.has_value()) { + if (field_map->is_stable()) { + dependencies()->DependOnStableMap(field_map.value()); + field_access.map = field_map->object(); } } } diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h index 69518d9a52..d86037a578 100644 --- a/deps/v8/src/compiler/property-access-builder.h +++ b/deps/v8/src/compiler/property-access-builder.h @@ -36,25 +36,15 @@ class PropertyAccessBuilder { // Builds the appropriate string check if the maps are only string // maps. - bool TryBuildStringCheck(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps, Node** receiver, - Node** effect, Node* control); + bool TryBuildStringCheck(JSHeapBroker* broker, ZoneVector<MapRef> const& maps, + Node** receiver, Effect* effect, Control control); // Builds a number check if all maps are number maps. - bool TryBuildNumberCheck(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps, Node** receiver, - Node** effect, Node* control); - - // TODO(jgruber): Remove the untyped version once all uses are - // updated. - void BuildCheckMaps(Node* object, Node** effect, Node* control, - ZoneVector<Handle<Map>> const& maps); + bool TryBuildNumberCheck(JSHeapBroker* broker, ZoneVector<MapRef> const& maps, + Node** receiver, Effect* effect, Control control); + void BuildCheckMaps(Node* object, Effect* effect, Control control, - ZoneVector<Handle<Map>> const& maps) { - Node* e = *effect; - Node* c = control; - BuildCheckMaps(object, &e, c, maps); - *effect = e; - } + ZoneVector<MapRef> const& maps); + Node* BuildCheckValue(Node* receiver, Effect* effect, Control control, Handle<HeapObject> value); @@ -106,8 +96,7 @@ class PropertyAccessBuilder { CompilationDependencies* dependencies_; }; -bool HasOnlyStringMaps(JSHeapBroker* broker, - ZoneVector<Handle<Map>> const& maps); +bool HasOnlyStringMaps(JSHeapBroker* broker, ZoneVector<MapRef> const& maps); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc deleted file mode 100644 index 6978c6de6e..0000000000 --- a/deps/v8/src/compiler/serializer-for-background-compilation.cc +++ /dev/null @@ -1,3605 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/compiler/serializer-for-background-compilation.h" - -#include <sstream> - -#include "src/base/optional.h" -#include "src/compiler/access-info.h" -#include "src/compiler/bytecode-analysis.h" -#include "src/compiler/compilation-dependencies.h" -#include "src/compiler/js-heap-broker.h" -#include "src/compiler/serializer-hints.h" -#include "src/compiler/zone-stats.h" -#include "src/handles/handles-inl.h" -#include "src/ic/call-optimization.h" -#include "src/interpreter/bytecode-array-iterator.h" -#include "src/objects/code.h" -#include "src/objects/js-array-inl.h" -#include "src/objects/js-regexp-inl.h" -#include "src/objects/literal-objects-inl.h" -#include "src/objects/shared-function-info-inl.h" -#include "src/objects/template-objects-inl.h" -#include "src/zone/zone-containers.h" -#include "src/zone/zone.h" - -namespace v8 { -namespace internal { -namespace compiler { - -#define KILL_ENVIRONMENT_LIST(V) \ - V(Abort) \ - V(ReThrow) \ - V(Throw) - -#define CLEAR_ACCUMULATOR_LIST(V) \ - V(CallRuntime) \ - V(CloneObject) \ - V(CreateArrayFromIterable) \ - V(CreateEmptyObjectLiteral) \ - V(CreateMappedArguments) \ - V(CreateRestParameter) \ - V(CreateUnmappedArguments) \ - V(DeletePropertySloppy) \ - V(DeletePropertyStrict) \ - V(ForInContinue) \ - V(ForInEnumerate) \ - V(ForInStep) \ - V(LogicalNot) \ - V(SetPendingMessage) \ - V(TestNull) \ - V(TestReferenceEqual) \ - V(TestTypeOf) \ - V(TestUndefined) \ - V(TestUndetectable) \ - V(ToBooleanLogicalNot) \ - V(ToName) \ - V(ToString) \ - V(TypeOf) - -#define UNCONDITIONAL_JUMPS_LIST(V) \ - V(Jump) \ - V(JumpConstant) \ - V(JumpLoop) - -#define CONDITIONAL_JUMPS_LIST(V) \ - V(JumpIfFalse) \ - V(JumpIfFalseConstant) \ - V(JumpIfJSReceiver) \ - V(JumpIfJSReceiverConstant) \ - V(JumpIfNotNull) \ - V(JumpIfNotNullConstant) \ - V(JumpIfNotUndefined) \ - V(JumpIfNotUndefinedConstant) \ - V(JumpIfNull) \ - V(JumpIfNullConstant) \ - V(JumpIfToBooleanFalse) \ - V(JumpIfToBooleanFalseConstant) \ - V(JumpIfToBooleanTrue) \ - V(JumpIfToBooleanTrueConstant) \ - V(JumpIfTrue) \ - V(JumpIfTrueConstant) \ - V(JumpIfUndefined) \ - V(JumpIfUndefinedConstant) \ - V(JumpIfUndefinedOrNull) \ - V(JumpIfUndefinedOrNullConstant) - -#define IGNORED_BYTECODE_LIST(V) \ - V(CallRuntimeForPair) \ - V(CollectTypeProfile) \ - V(DebugBreak0) \ - V(DebugBreak1) \ - V(DebugBreak2) \ - V(DebugBreak3) \ - V(DebugBreak4) \ - V(DebugBreak5) \ - V(DebugBreak6) \ - V(DebugBreakExtraWide) \ - V(DebugBreakWide) \ - V(Debugger) \ - V(IncBlockCounter) \ - V(ResumeGenerator) \ - V(SuspendGenerator) \ - V(ThrowIfNotSuperConstructor) \ - V(ThrowSuperAlreadyCalledIfNotHole) \ - V(ThrowSuperNotCalledIfHole) \ - V(ToObject) - -#define UNREACHABLE_BYTECODE_LIST(V) \ - V(ExtraWide) \ - V(Illegal) \ - V(Wide) - -#define BINARY_OP_LIST(V) \ - V(Add) \ - V(AddSmi) \ - V(BitwiseAnd) \ - V(BitwiseAndSmi) \ - V(BitwiseOr) \ - V(BitwiseOrSmi) \ - V(BitwiseXor) \ - V(BitwiseXorSmi) \ - V(Div) \ - V(DivSmi) \ - V(Exp) \ - V(ExpSmi) \ - V(Mod) \ - V(ModSmi) \ - V(Mul) \ - V(MulSmi) \ - V(ShiftLeft) \ - V(ShiftLeftSmi) \ - V(ShiftRight) \ - V(ShiftRightSmi) \ - V(ShiftRightLogical) \ - V(ShiftRightLogicalSmi) \ - V(Sub) \ - V(SubSmi) - -#define UNARY_OP_LIST(V) \ - V(BitwiseNot) \ - V(Dec) \ - V(Inc) \ - V(Negate) - -#define COMPARE_OP_LIST(V) \ - V(TestEqual) \ - V(TestEqualStrict) \ - V(TestGreaterThan) \ - V(TestGreaterThanOrEqual) \ - V(TestLessThan) \ - V(TestLessThanOrEqual) - -#define SUPPORTED_BYTECODE_LIST(V) \ - V(CallAnyReceiver) \ - V(CallJSRuntime) \ - V(CallProperty) \ - V(CallProperty0) \ - V(CallProperty1) \ - V(CallProperty2) \ - V(CallUndefinedReceiver) \ - V(CallUndefinedReceiver0) \ - V(CallUndefinedReceiver1) \ - V(CallUndefinedReceiver2) \ - V(CallWithSpread) \ - V(Construct) \ - V(ConstructWithSpread) \ - V(CreateArrayLiteral) \ - V(CreateBlockContext) \ - V(CreateCatchContext) \ - V(CreateClosure) \ - V(CreateEmptyArrayLiteral) \ - V(CreateEvalContext) \ - V(CreateFunctionContext) \ - V(CreateObjectLiteral) \ - V(CreateRegExpLiteral) \ - V(CreateWithContext) \ - V(ForInNext) \ - V(ForInPrepare) \ - V(GetIterator) \ - V(GetSuperConstructor) \ - V(GetTemplateObject) \ - V(InvokeIntrinsic) \ - V(LdaConstant) \ - V(LdaContextSlot) \ - V(LdaCurrentContextSlot) \ - V(LdaImmutableContextSlot) \ - V(LdaImmutableCurrentContextSlot) \ - V(LdaModuleVariable) \ - V(LdaFalse) \ - V(LdaGlobal) \ - V(LdaGlobalInsideTypeof) \ - V(LdaKeyedProperty) \ - V(LdaLookupContextSlot) \ - V(LdaLookupContextSlotInsideTypeof) \ - V(LdaLookupGlobalSlot) \ - V(LdaLookupGlobalSlotInsideTypeof) \ - V(LdaLookupSlot) \ - V(LdaLookupSlotInsideTypeof) \ - V(LdaNamedProperty) \ - V(LdaNamedPropertyFromSuper) \ - V(LdaNull) \ - V(Ldar) \ - V(LdaSmi) \ - V(LdaTheHole) \ - V(LdaTrue) \ - V(LdaUndefined) \ - V(LdaZero) \ - V(Mov) \ - V(PopContext) \ - V(PushContext) \ - V(Return) \ - V(StaContextSlot) \ - V(StaCurrentContextSlot) \ - V(StaDataPropertyInLiteral) \ - V(StaGlobal) \ - V(StaInArrayLiteral) \ - V(StaKeyedProperty) \ - V(StaLookupSlot) \ - V(StaModuleVariable) \ - V(StaNamedOwnProperty) \ - V(StaNamedProperty) \ - V(Star) \ - V(SwitchOnGeneratorState) \ - V(SwitchOnSmiNoFeedback) \ - V(TestIn) \ - V(TestInstanceOf) \ - V(ThrowReferenceErrorIfHole) \ - V(ToNumber) \ - V(ToNumeric) \ - BINARY_OP_LIST(V) \ - COMPARE_OP_LIST(V) \ - CLEAR_ACCUMULATOR_LIST(V) \ - CONDITIONAL_JUMPS_LIST(V) \ - IGNORED_BYTECODE_LIST(V) \ - KILL_ENVIRONMENT_LIST(V) \ - UNARY_OP_LIST(V) \ - UNCONDITIONAL_JUMPS_LIST(V) \ - UNREACHABLE_BYTECODE_LIST(V) - -struct HintsImpl : public ZoneObject { - explicit HintsImpl(Zone* zone) : zone_(zone) {} - - ConstantsSet constants_; - MapsSet maps_; - VirtualClosuresSet virtual_closures_; - VirtualContextsSet virtual_contexts_; - VirtualBoundFunctionsSet virtual_bound_functions_; - - Zone* const zone_; -}; - -void Hints::EnsureAllocated(Zone* zone, bool check_zone_equality) { - if (IsAllocated()) { - if (check_zone_equality) CHECK_EQ(zone, impl_->zone_); - // ... else {zone} lives no longer than {impl_->zone_} but we have no way of - // checking that. - } else { - impl_ = zone->New<HintsImpl>(zone); - } - DCHECK(IsAllocated()); -} - -struct VirtualBoundFunction { - Hints const bound_target; - HintsVector const bound_arguments; - - VirtualBoundFunction(Hints const& target, const HintsVector& arguments) - : bound_target(target), bound_arguments(arguments) {} - - bool operator==(const VirtualBoundFunction& other) const { - if (bound_arguments.size() != other.bound_arguments.size()) return false; - if (bound_target != other.bound_target) return false; - - for (size_t i = 0; i < bound_arguments.size(); ++i) { - if (bound_arguments[i] != other.bound_arguments[i]) return false; - } - return true; - } -}; - -// A VirtualClosure is a SharedFunctionInfo and a FeedbackVector, plus -// Hints about the context in which a closure will be created from them. -class VirtualClosure { - public: - VirtualClosure(Handle<JSFunction> function, Isolate* isolate, Zone* zone); - - VirtualClosure(Handle<SharedFunctionInfo> shared, - Handle<FeedbackVector> feedback_vector, - Hints const& context_hints); - - Handle<SharedFunctionInfo> shared() const { return shared_; } - Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; } - Hints const& context_hints() const { return context_hints_; } - - bool operator==(const VirtualClosure& other) const { - // A feedback vector is never used for more than one SFI. There might, - // however, be two virtual closures with the same SFI and vector, but - // different context hints. crbug.com/1024282 has a link to a document - // describing why the context_hints_ might be different in that case. - DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_), - shared_.equals(other.shared_)); - return feedback_vector_.equals(other.feedback_vector_) && - context_hints_ == other.context_hints_; - } - - private: - Handle<SharedFunctionInfo> const shared_; - Handle<FeedbackVector> const feedback_vector_; - Hints const context_hints_; -}; - -// A CompilationSubject is a VirtualClosure, optionally with a matching -// concrete closure. -class CompilationSubject { - public: - explicit CompilationSubject(VirtualClosure virtual_closure) - : virtual_closure_(virtual_closure), closure_() {} - - // The zone parameter is to correctly initialize the virtual closure, - // which contains zone-allocated context information. - CompilationSubject(Handle<JSFunction> closure, Isolate* isolate, Zone* zone); - - const VirtualClosure& virtual_closure() const { return virtual_closure_; } - MaybeHandle<JSFunction> closure() const { return closure_; } - - private: - VirtualClosure const virtual_closure_; - MaybeHandle<JSFunction> const closure_; -}; - -// A Callee is either a JSFunction (which may not have a feedback vector), or a -// VirtualClosure. Note that this is different from CompilationSubject, which -// always has a VirtualClosure. -class Callee { - public: - explicit Callee(Handle<JSFunction> jsfunction) - : jsfunction_(jsfunction), virtual_closure_() {} - explicit Callee(VirtualClosure const& virtual_closure) - : jsfunction_(), virtual_closure_(virtual_closure) {} - - Handle<SharedFunctionInfo> shared(Isolate* isolate) const { - return virtual_closure_.has_value() - ? virtual_closure_->shared() - : handle(jsfunction_.ToHandleChecked()->shared(), isolate); - } - - bool HasFeedbackVector() const { - Handle<JSFunction> function; - return virtual_closure_.has_value() || - jsfunction_.ToHandleChecked()->has_feedback_vector(); - } - - CompilationSubject ToCompilationSubject(Isolate* isolate, Zone* zone) const { - CHECK(HasFeedbackVector()); - return virtual_closure_.has_value() - ? CompilationSubject(*virtual_closure_) - : CompilationSubject(jsfunction_.ToHandleChecked(), isolate, - zone); - } - - private: - MaybeHandle<JSFunction> const jsfunction_; - base::Optional<VirtualClosure> const virtual_closure_; -}; - -// If a list of arguments (hints) is shorter than the function's parameter -// count, this enum expresses what we know about the missing arguments. -enum MissingArgumentsPolicy { - kMissingArgumentsAreUndefined, // ... as in the JS undefined value - kMissingArgumentsAreUnknown, -}; - -// The SerializerForBackgroundCompilation makes sure that the relevant function -// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later -// optimizations in the compiler, is copied to the heap broker. -class SerializerForBackgroundCompilation { - public: - SerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, Handle<JSFunction> closure, - SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset); - Hints Run(); // NOTE: Returns empty for an - // already-serialized function. - - class Environment; - - private: - SerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments, - MissingArgumentsPolicy padding, - SerializerForBackgroundCompilationFlags flags, int nesting_level); - - bool BailoutOnUninitialized(ProcessedFeedback const& feedback); - - void TraverseBytecode(); - -#define DECLARE_VISIT_BYTECODE(name, ...) \ - void Visit##name(interpreter::BytecodeArrayIterator* iterator); - SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) -#undef DECLARE_VISIT_BYTECODE - - void VisitShortStar(interpreter::Register reg); - - Hints& register_hints(interpreter::Register reg); - - // Return a vector containing the hints for the given register range (in - // order). Also prepare these hints for feedback backpropagation by allocating - // any that aren't yet allocated. - HintsVector PrepareArgumentsHints(interpreter::Register first, size_t count); - - // Like above except that the hints have to be given directly. - template <typename... MoreHints> - HintsVector PrepareArgumentsHints(Hints* hints, MoreHints... more); - - void ProcessCalleeForCallOrConstruct(Callee const& callee, - base::Optional<Hints> new_target, - const HintsVector& arguments, - SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, - Hints* result_hints); - void ProcessCalleeForCallOrConstruct(Handle<Object> callee, - base::Optional<Hints> new_target, - const HintsVector& arguments, - SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, - Hints* result_hints); - void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target, - HintsVector* arguments, FeedbackSlot slot, - MissingArgumentsPolicy padding); - void ProcessCallOrConstructRecursive(Hints const& callee, - base::Optional<Hints> new_target, - const HintsVector& arguments, - SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, - Hints* result_hints); - void ProcessNewTargetForConstruct(Hints const& new_target, - Hints* result_hints); - void ProcessCallVarArgs( - ConvertReceiverMode receiver_mode, Hints const& callee, - interpreter::Register first_reg, int reg_count, FeedbackSlot slot, - MissingArgumentsPolicy padding = kMissingArgumentsAreUndefined); - void ProcessApiCall(Handle<SharedFunctionInfo> target, - const HintsVector& arguments); - void ProcessReceiverMapForApiCall(FunctionTemplateInfoRef target, - Handle<Map> receiver); - void ProcessBuiltinCall(Handle<SharedFunctionInfo> target, - base::Optional<Hints> new_target, - const HintsVector& arguments, - SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, Hints* result_hints); - - void ProcessJump(interpreter::BytecodeArrayIterator* iterator); - - void ProcessKeyedPropertyAccess(Hints* receiver, Hints const& key, - FeedbackSlot slot, AccessMode access_mode, - bool honor_bailout_on_uninitialized); - void ProcessNamedPropertyAccess(Hints* receiver, NameRef const& name, - FeedbackSlot slot, AccessMode access_mode); - void ProcessNamedSuperPropertyAccess(Hints* receiver, NameRef const& name, - FeedbackSlot slot, - AccessMode access_mode); - void ProcessNamedAccess(Hints* receiver, NamedAccessFeedback const& feedback, - AccessMode access_mode, Hints* result_hints); - void ProcessNamedSuperAccess(Hints* receiver, - NamedAccessFeedback const& feedback, - AccessMode access_mode, Hints* result_hints); - void ProcessElementAccess(Hints const& receiver, Hints const& key, - ElementAccessFeedback const& feedback, - AccessMode access_mode); - void ProcessMinimorphicPropertyAccess( - MinimorphicLoadPropertyAccessFeedback const& feedback, - FeedbackSource const& source); - - void ProcessModuleVariableAccess( - interpreter::BytecodeArrayIterator* iterator); - - void ProcessHintsForObjectCreate(Hints const& prototype); - void ProcessMapHintsForPromises(Hints const& receiver_hints); - void ProcessHintsForPromiseResolve(Hints const& resolution_hints); - void ProcessHintsForHasInPrototypeChain(Hints const& instance_hints); - void ProcessHintsForRegExpTest(Hints const& regexp_hints); - PropertyAccessInfo ProcessMapForRegExpTest(MapRef map); - void ProcessHintsForFunctionBind(Hints const& receiver_hints); - void ProcessHintsForObjectGetPrototype(Hints const& object_hints); - void ProcessConstantForOrdinaryHasInstance(HeapObjectRef const& constructor, - bool* walk_prototypes); - void ProcessConstantForInstanceOf(ObjectRef const& constant, - bool* walk_prototypes); - void ProcessHintsForOrdinaryHasInstance(Hints const& constructor_hints, - Hints const& instance_hints); - - void ProcessGlobalAccess(FeedbackSlot slot, bool is_load); - - void ProcessCompareOperation(FeedbackSlot slot); - void ProcessForIn(FeedbackSlot slot); - void ProcessUnaryOrBinaryOperation(FeedbackSlot slot, - bool honor_bailout_on_uninitialized); - - void ProcessMapForNamedPropertyAccess( - Hints* receiver, base::Optional<MapRef> receiver_map, - MapRef lookup_start_object_map, NameRef const& name, - AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver, - Hints* result_hints); - - void ProcessCreateContext(interpreter::BytecodeArrayIterator* iterator, - int scopeinfo_operand_index); - - enum ContextProcessingMode { - kIgnoreSlot, - kSerializeSlot, - }; - - void ProcessContextAccess(Hints const& context_hints, int slot, int depth, - ContextProcessingMode mode, - Hints* result_hints = nullptr); - void ProcessImmutableLoad(ContextRef const& context, int slot, - ContextProcessingMode mode, - Hints* new_accumulator_hints); - void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator); - void ProcessLdaLookupContextSlot( - interpreter::BytecodeArrayIterator* iterator); - - // Performs extension lookups for [0, depth) like - // BytecodeGraphBuilder::CheckContextExtensions(). - void ProcessCheckContextExtensions(int depth); - - Hints RunChildSerializer(CompilationSubject function, - base::Optional<Hints> new_target, - const HintsVector& arguments, - MissingArgumentsPolicy padding); - - // When (forward-)branching bytecodes are encountered, e.g. a conditional - // jump, we call ContributeToJumpTargetEnvironment to "remember" the current - // environment, associated with the jump target offset. When serialization - // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to - // merge that environment back into whatever is the current environment then. - // Note: Since there may be multiple jumps to the same target, - // ContributeToJumpTargetEnvironment may actually do a merge as well. - void ContributeToJumpTargetEnvironment(int target_offset); - void IncorporateJumpTargetEnvironment(int target_offset); - - VirtualClosure function() const { return function_; } - - Hints& return_value_hints() { return return_value_hints_; } - - Handle<FeedbackVector> feedback_vector() const; - Handle<BytecodeArray> bytecode_array() const; - - JSHeapBroker* broker() const { return broker_; } - CompilationDependencies* dependencies() const { return dependencies_; } - Zone* zone() { return zone_scope_.zone(); } - Environment* environment() const { return environment_; } - SerializerForBackgroundCompilationFlags flags() const { return flags_; } - BytecodeOffset osr_offset() const { return osr_offset_; } - const BytecodeAnalysis& bytecode_analysis() { return *bytecode_analysis_; } - - JSHeapBroker* const broker_; - CompilationDependencies* const dependencies_; - ZoneStats::Scope zone_scope_; - SerializerForBackgroundCompilationFlags const flags_; - // Instead of storing the virtual_closure here, we could extract it from the - // {closure_hints_} but that would be cumbersome. - VirtualClosure const function_; - BytecodeOffset const osr_offset_; - base::Optional<BytecodeAnalysis> bytecode_analysis_; - ZoneUnorderedMap<int, Environment*> jump_target_environments_; - Environment* const environment_; - HintsVector const arguments_; - Hints return_value_hints_; - Hints closure_hints_; - - int nesting_level_ = 0; -}; - -void RunSerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, Handle<JSFunction> closure, - SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset) { - SerializerForBackgroundCompilation serializer( - zone_stats, broker, dependencies, closure, flags, osr_offset); - serializer.Run(); -} - -using BytecodeArrayIterator = interpreter::BytecodeArrayIterator; - -VirtualClosure::VirtualClosure(Handle<SharedFunctionInfo> shared, - Handle<FeedbackVector> feedback_vector, - Hints const& context_hints) - : shared_(shared), - feedback_vector_(feedback_vector), - context_hints_(context_hints) { - // The checked invariant rules out recursion and thus avoids complexity. - CHECK(context_hints_.virtual_closures().IsEmpty()); -} - -VirtualClosure::VirtualClosure(Handle<JSFunction> function, Isolate* isolate, - Zone* zone) - : shared_(handle(function->shared(), isolate)), - feedback_vector_(function->feedback_vector(), isolate), - context_hints_( - Hints::SingleConstant(handle(function->context(), isolate), zone)) { - // The checked invariant rules out recursion and thus avoids complexity. - CHECK(context_hints_.virtual_closures().IsEmpty()); -} - -CompilationSubject::CompilationSubject(Handle<JSFunction> closure, - Isolate* isolate, Zone* zone) - : virtual_closure_(closure, isolate, zone), closure_(closure) { - CHECK(closure->has_feedback_vector()); -} - -Hints Hints::Copy(Zone* zone) const { - if (!IsAllocated()) return *this; - Hints result; - result.EnsureAllocated(zone); - result.impl_->constants_ = impl_->constants_; - result.impl_->maps_ = impl_->maps_; - result.impl_->virtual_contexts_ = impl_->virtual_contexts_; - result.impl_->virtual_closures_ = impl_->virtual_closures_; - result.impl_->virtual_bound_functions_ = impl_->virtual_bound_functions_; - return result; -} - -bool Hints::operator==(Hints const& other) const { - if (impl_ == other.impl_) return true; - if (IsEmpty() && other.IsEmpty()) return true; - return IsAllocated() && other.IsAllocated() && - constants() == other.constants() && - virtual_closures() == other.virtual_closures() && - maps() == other.maps() && - virtual_contexts() == other.virtual_contexts() && - virtual_bound_functions() == other.virtual_bound_functions(); -} - -bool Hints::operator!=(Hints const& other) const { return !(*this == other); } - -#ifdef ENABLE_SLOW_DCHECKS -bool Hints::Includes(Hints const& other) const { - if (impl_ == other.impl_ || other.IsEmpty()) return true; - return IsAllocated() && constants().Includes(other.constants()) && - virtual_closures().Includes(other.virtual_closures()) && - maps().Includes(other.maps()); -} -#endif - -Hints Hints::SingleConstant(Handle<Object> constant, Zone* zone) { - Hints result; - result.AddConstant(constant, zone, nullptr); - return result; -} - -Hints Hints::SingleMap(Handle<Map> map, Zone* zone) { - Hints result; - result.AddMap(map, zone, nullptr); - return result; -} - -ConstantsSet Hints::constants() const { - return IsAllocated() ? impl_->constants_ : ConstantsSet(); -} - -MapsSet Hints::maps() const { return IsAllocated() ? impl_->maps_ : MapsSet(); } - -VirtualClosuresSet Hints::virtual_closures() const { - return IsAllocated() ? impl_->virtual_closures_ : VirtualClosuresSet(); -} - -VirtualContextsSet Hints::virtual_contexts() const { - return IsAllocated() ? impl_->virtual_contexts_ : VirtualContextsSet(); -} - -VirtualBoundFunctionsSet Hints::virtual_bound_functions() const { - return IsAllocated() ? impl_->virtual_bound_functions_ - : VirtualBoundFunctionsSet(); -} - -void Hints::AddVirtualContext(VirtualContext const& virtual_context, Zone* zone, - JSHeapBroker* broker) { - EnsureAllocated(zone); - if (impl_->virtual_contexts_.Size() >= kMaxHintsSize) { - TRACE_BROKER_MISSING(broker, - "opportunity - limit for virtual contexts reached."); - return; - } - impl_->virtual_contexts_.Add(virtual_context, impl_->zone_); -} - -void Hints::AddConstant(Handle<Object> constant, Zone* zone, - JSHeapBroker* broker) { - EnsureAllocated(zone); - if (impl_->constants_.Size() >= kMaxHintsSize) { - TRACE_BROKER_MISSING(broker, "opportunity - limit for constants reached."); - return; - } - impl_->constants_.Add(constant, impl_->zone_); -} - -void Hints::AddMap(Handle<Map> map, Zone* zone, JSHeapBroker* broker, - bool check_zone_equality) { - EnsureAllocated(zone, check_zone_equality); - if (impl_->maps_.Size() >= kMaxHintsSize) { - TRACE_BROKER_MISSING(broker, "opportunity - limit for maps reached."); - return; - } - impl_->maps_.Add(map, impl_->zone_); -} - -void Hints::AddVirtualClosure(VirtualClosure const& virtual_closure, Zone* zone, - JSHeapBroker* broker) { - EnsureAllocated(zone); - if (impl_->virtual_closures_.Size() >= kMaxHintsSize) { - TRACE_BROKER_MISSING(broker, - "opportunity - limit for virtual closures reached."); - return; - } - impl_->virtual_closures_.Add(virtual_closure, impl_->zone_); -} - -void Hints::AddVirtualBoundFunction(VirtualBoundFunction const& bound_function, - Zone* zone, JSHeapBroker* broker) { - EnsureAllocated(zone); - if (impl_->virtual_bound_functions_.Size() >= kMaxHintsSize) { - TRACE_BROKER_MISSING( - broker, "opportunity - limit for virtual bound functions reached."); - return; - } - // TODO(mslekova): Consider filtering the hints in the added bound function, - // for example: a) Remove any non-JS(Bound)Function constants, b) Truncate the - // argument vector the formal parameter count. - impl_->virtual_bound_functions_.Add(bound_function, impl_->zone_); -} - -void Hints::Add(Hints const& other, Zone* zone, JSHeapBroker* broker) { - if (impl_ == other.impl_ || other.IsEmpty()) return; - EnsureAllocated(zone); - if (!Union(other)) { - TRACE_BROKER_MISSING(broker, "opportunity - hints limit reached."); - } -} - -Hints Hints::CopyToParentZone(Zone* zone, JSHeapBroker* broker) const { - if (!IsAllocated()) return *this; - - Hints result; - - for (auto const& x : constants()) result.AddConstant(x, zone, broker); - for (auto const& x : maps()) result.AddMap(x, zone, broker); - for (auto const& x : virtual_contexts()) - result.AddVirtualContext(x, zone, broker); - - // Adding hints from a child serializer run means copying data out from - // a zone that's being destroyed. VirtualClosures and VirtualBoundFunction - // have zone allocated data, so we've got to make a deep copy to eliminate - // traces of the dying zone. - for (auto const& x : virtual_closures()) { - VirtualClosure new_virtual_closure( - x.shared(), x.feedback_vector(), - x.context_hints().CopyToParentZone(zone, broker)); - result.AddVirtualClosure(new_virtual_closure, zone, broker); - } - for (auto const& x : virtual_bound_functions()) { - HintsVector new_arguments_hints(zone); - for (auto hint : x.bound_arguments) { - new_arguments_hints.push_back(hint.CopyToParentZone(zone, broker)); - } - VirtualBoundFunction new_bound_function( - x.bound_target.CopyToParentZone(zone, broker), new_arguments_hints); - result.AddVirtualBoundFunction(new_bound_function, zone, broker); - } - - return result; -} - -bool Hints::IsEmpty() const { - if (!IsAllocated()) return true; - return constants().IsEmpty() && maps().IsEmpty() && - virtual_closures().IsEmpty() && virtual_contexts().IsEmpty() && - virtual_bound_functions().IsEmpty(); -} - -std::ostream& operator<<(std::ostream& out, - const VirtualContext& virtual_context) { - out << "Distance " << virtual_context.distance << " from " - << Brief(*virtual_context.context) << std::endl; - return out; -} - -std::ostream& operator<<(std::ostream& out, const Hints& hints); - -std::ostream& operator<<(std::ostream& out, - const VirtualClosure& virtual_closure) { - out << Brief(*virtual_closure.shared()) << std::endl; - out << Brief(*virtual_closure.feedback_vector()) << std::endl; - !virtual_closure.context_hints().IsEmpty() && - out << virtual_closure.context_hints() << "):" << std::endl; - return out; -} - -std::ostream& operator<<(std::ostream& out, - const VirtualBoundFunction& virtual_bound_function) { - out << std::endl << " Target: " << virtual_bound_function.bound_target; - out << " Arguments:" << std::endl; - for (auto hint : virtual_bound_function.bound_arguments) { - out << " " << hint; - } - return out; -} - -std::ostream& operator<<(std::ostream& out, const Hints& hints) { - out << "(impl_ = " << hints.impl_ << ")\n"; - for (Handle<Object> constant : hints.constants()) { - out << " constant " << Brief(*constant) << std::endl; - } - for (Handle<Map> map : hints.maps()) { - out << " map " << Brief(*map) << std::endl; - } - for (VirtualClosure const& virtual_closure : hints.virtual_closures()) { - out << " virtual closure " << virtual_closure << std::endl; - } - for (VirtualContext const& virtual_context : hints.virtual_contexts()) { - out << " virtual context " << virtual_context << std::endl; - } - for (VirtualBoundFunction const& virtual_bound_function : - hints.virtual_bound_functions()) { - out << " virtual bound function " << virtual_bound_function << std::endl; - } - return out; -} - -void Hints::Reset(Hints* other, Zone* zone) { - other->EnsureShareable(zone); - *this = *other; - DCHECK(IsAllocated()); -} - -class SerializerForBackgroundCompilation::Environment : public ZoneObject { - public: - Environment(Zone* zone, Isolate* isolate, CompilationSubject function); - Environment(Zone* zone, Isolate* isolate, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments, - MissingArgumentsPolicy padding); - - bool IsDead() const { return !alive_; } - - void Kill() { - DCHECK(!IsDead()); - alive_ = false; - DCHECK(IsDead()); - } - - void Resurrect() { - DCHECK(IsDead()); - alive_ = true; - DCHECK(!IsDead()); - } - - // Merge {other} into {this} environment (leaving {other} unmodified). - void Merge(Environment* other, Zone* zone, JSHeapBroker* broker); - - Hints const& current_context_hints() const { return current_context_hints_; } - Hints const& accumulator_hints() const { return accumulator_hints_; } - - Hints& current_context_hints() { return current_context_hints_; } - Hints& accumulator_hints() { return accumulator_hints_; } - Hints& register_hints(interpreter::Register reg); - - private: - friend std::ostream& operator<<(std::ostream& out, const Environment& env); - - Hints current_context_hints_; - Hints accumulator_hints_; - - HintsVector parameters_hints_; // First parameter is the receiver. - HintsVector locals_hints_; - - bool alive_ = true; -}; - -SerializerForBackgroundCompilation::Environment::Environment( - Zone* zone, Isolate* isolate, CompilationSubject function) - : parameters_hints_(function.virtual_closure() - .shared() - ->GetBytecodeArray(isolate) - .parameter_count(), - Hints(), zone), - locals_hints_(function.virtual_closure() - .shared() - ->GetBytecodeArray(isolate) - .register_count(), - Hints(), zone) { - // Consume the virtual_closure's context hint information. - current_context_hints_ = function.virtual_closure().context_hints(); -} - -SerializerForBackgroundCompilation::Environment::Environment( - Zone* zone, Isolate* isolate, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments, - MissingArgumentsPolicy padding) - : Environment(zone, isolate, function) { - // Set the hints for the actually passed arguments, at most up to - // the parameter_count. - for (size_t i = 0; i < std::min(arguments.size(), parameters_hints_.size()); - ++i) { - parameters_hints_[i] = arguments[i]; - } - - if (padding == kMissingArgumentsAreUndefined) { - Hints const undefined_hint = - Hints::SingleConstant(isolate->factory()->undefined_value(), zone); - for (size_t i = arguments.size(); i < parameters_hints_.size(); ++i) { - parameters_hints_[i] = undefined_hint; - } - } else { - DCHECK_EQ(padding, kMissingArgumentsAreUnknown); - } - - // Set hints for new_target. - interpreter::Register new_target_reg = - function.virtual_closure() - .shared() - ->GetBytecodeArray(isolate) - .incoming_new_target_or_generator_register(); - if (new_target_reg.is_valid()) { - Hints& hints = register_hints(new_target_reg); - CHECK(hints.IsEmpty()); - if (new_target.has_value()) hints = *new_target; - } -} - -Hints& SerializerForBackgroundCompilation::register_hints( - interpreter::Register reg) { - if (reg.is_function_closure()) return closure_hints_; - return environment()->register_hints(reg); -} - -Hints& SerializerForBackgroundCompilation::Environment::register_hints( - interpreter::Register reg) { - if (reg.is_current_context()) return current_context_hints_; - if (reg.is_parameter()) { - return parameters_hints_[reg.ToParameterIndex( - static_cast<int>(parameters_hints_.size()))]; - } - DCHECK(!reg.is_function_closure()); - CHECK_LT(reg.index(), locals_hints_.size()); - return locals_hints_[reg.index()]; -} - -void SerializerForBackgroundCompilation::Environment::Merge( - Environment* other, Zone* zone, JSHeapBroker* broker) { - // {other} is guaranteed to have the same layout because it comes from an - // earlier bytecode in the same function. - DCHECK_EQ(parameters_hints_.size(), other->parameters_hints_.size()); - DCHECK_EQ(locals_hints_.size(), other->locals_hints_.size()); - - if (IsDead()) { - parameters_hints_ = other->parameters_hints_; - locals_hints_ = other->locals_hints_; - current_context_hints_ = other->current_context_hints_; - accumulator_hints_ = other->accumulator_hints_; - Resurrect(); - } else { - for (size_t i = 0; i < parameters_hints_.size(); ++i) { - parameters_hints_[i].Merge(other->parameters_hints_[i], zone, broker); - } - for (size_t i = 0; i < locals_hints_.size(); ++i) { - locals_hints_[i].Merge(other->locals_hints_[i], zone, broker); - } - current_context_hints_.Merge(other->current_context_hints_, zone, broker); - accumulator_hints_.Merge(other->accumulator_hints_, zone, broker); - } - - CHECK(!IsDead()); -} - -bool Hints::Union(Hints const& other) { - CHECK(IsAllocated()); - if (impl_->constants_.Size() + other.constants().Size() > kMaxHintsSize || - impl_->maps_.Size() + other.maps().Size() > kMaxHintsSize || - impl_->virtual_closures_.Size() + other.virtual_closures().Size() > - kMaxHintsSize || - impl_->virtual_contexts_.Size() + other.virtual_contexts().Size() > - kMaxHintsSize || - impl_->virtual_bound_functions_.Size() + - other.virtual_bound_functions().Size() > - kMaxHintsSize) { - return false; - } - Zone* zone = impl_->zone_; - impl_->constants_.Union(other.constants(), zone); - impl_->maps_.Union(other.maps(), zone); - impl_->virtual_closures_.Union(other.virtual_closures(), zone); - impl_->virtual_contexts_.Union(other.virtual_contexts(), zone); - impl_->virtual_bound_functions_.Union(other.virtual_bound_functions(), zone); - return true; -} - -void Hints::Merge(Hints const& other, Zone* zone, JSHeapBroker* broker) { - if (impl_ == other.impl_) { - return; - } - if (!IsAllocated()) { - *this = other.Copy(zone); - DCHECK(IsAllocated()); - return; - } - *this = this->Copy(zone); - if (!Union(other)) { - TRACE_BROKER_MISSING(broker, "opportunity - hints limit reached."); - } - DCHECK(IsAllocated()); -} - -std::ostream& operator<<( - std::ostream& out, - const SerializerForBackgroundCompilation::Environment& env) { - std::ostringstream output_stream; - - if (env.IsDead()) { - output_stream << "dead\n"; - } else { - output_stream << "alive\n"; - for (size_t i = 0; i < env.parameters_hints_.size(); ++i) { - Hints const& hints = env.parameters_hints_[i]; - if (!hints.IsEmpty()) { - if (i == 0) { - output_stream << "Hints for <this>: "; - } else { - output_stream << "Hints for a" << i - 1 << ": "; - } - output_stream << hints; - } - } - for (size_t i = 0; i < env.locals_hints_.size(); ++i) { - Hints const& hints = env.locals_hints_[i]; - if (!hints.IsEmpty()) { - output_stream << "Hints for r" << i << ": " << hints; - } - } - } - - if (!env.current_context_hints().IsEmpty()) { - output_stream << "Hints for <context>: " << env.current_context_hints(); - } - if (!env.accumulator_hints().IsEmpty()) { - output_stream << "Hints for <accumulator>: " << env.accumulator_hints(); - } - - out << output_stream.str(); - return out; -} - -SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, Handle<JSFunction> closure, - SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset) - : broker_(broker), - dependencies_(dependencies), - zone_scope_(zone_stats, ZONE_NAME), - flags_(flags), - function_(closure, broker->isolate(), zone()), - osr_offset_(osr_offset), - jump_target_environments_(zone()), - environment_(zone()->New<Environment>( - zone(), broker_->isolate(), - CompilationSubject(closure, broker_->isolate(), zone()))), - arguments_(zone()) { - closure_hints_.AddConstant(closure, zone(), broker_); - JSFunctionRef closure_ref = MakeRef(broker, closure); - closure_ref.Serialize(); - closure_ref.SerializeCodeAndFeedback(); - - TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_); - TRACE_BROKER(broker_, "Initial environment:\n" << *environment_); -} - -SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, CompilationSubject function, - base::Optional<Hints> new_target, const HintsVector& arguments, - MissingArgumentsPolicy padding, - SerializerForBackgroundCompilationFlags flags, int nesting_level) - : broker_(broker), - dependencies_(dependencies), - zone_scope_(zone_stats, ZONE_NAME), - flags_(flags), - function_(function.virtual_closure()), - osr_offset_(BytecodeOffset::None()), - jump_target_environments_(zone()), - environment_(zone()->New<Environment>(zone(), broker_->isolate(), - function, new_target, arguments, - padding)), - arguments_(arguments), - nesting_level_(nesting_level) { - Handle<JSFunction> closure; - if (function.closure().ToHandle(&closure)) { - closure_hints_.AddConstant(closure, zone(), broker); - JSFunctionRef closure_ref = MakeRef(broker, closure); - closure_ref.Serialize(); - closure_ref.SerializeCodeAndFeedback(); - } else { - closure_hints_.AddVirtualClosure(function.virtual_closure(), zone(), - broker); - } - - TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_); - TRACE_BROKER(broker_, "Initial environment:\n" << *environment_); -} - -bool SerializerForBackgroundCompilation::BailoutOnUninitialized( - ProcessedFeedback const& feedback) { - DCHECK(!environment()->IsDead()); - if (!(flags() & - SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) { - return false; - } - if (!osr_offset().IsNone()) { - // Exclude OSR from this optimization because we might end up skipping the - // OSR entry point. TODO(neis): Support OSR? - return false; - } - if (feedback.IsInsufficient()) { - environment()->Kill(); - return true; - } - return false; -} - -Hints SerializerForBackgroundCompilation::Run() { - TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run"); - if (nesting_level_ >= FLAG_max_serializer_nesting) { - TRACE_BROKER_MISSING( - broker(), - "opportunity - Reached max nesting level for " - "SerializerForBackgroundCompilation::Run, bailing out.\n"); - return Hints(); - } - - TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: " - << broker()->zone()->allocation_size()); - SharedFunctionInfoRef shared = MakeRef(broker(), function().shared()); - FeedbackVectorRef feedback_vector_ref = MakeRef(broker(), feedback_vector()); - if (!broker()->ShouldBeSerializedForCompilation(shared, feedback_vector_ref, - arguments_)) { - TRACE_BROKER(broker(), - "opportunity - Already ran serializer for SharedFunctionInfo " - << Brief(*shared.object()) << ", bailing out.\n"); - return Hints(); - } - - { - HintsVector arguments_copy_in_broker_zone(broker()->zone()); - for (auto const& hints : arguments_) { - arguments_copy_in_broker_zone.push_back( - hints.CopyToParentZone(broker()->zone(), broker())); - } - broker()->SetSerializedForCompilation(shared, feedback_vector_ref, - arguments_copy_in_broker_zone); - } - - // We eagerly call the {EnsureSourcePositionsAvailable} for all serialized - // SFIs while still on the main thread. Source positions will later be used - // by JSInliner::ReduceJSCall. - if (flags() & - SerializerForBackgroundCompilationFlag::kCollectSourcePositions) { - SharedFunctionInfo::EnsureSourcePositionsAvailable(broker()->isolate(), - shared.object()); - } - - feedback_vector_ref.Serialize(); - TraverseBytecode(); - - if (return_value_hints().IsEmpty()) { - TRACE_BROKER(broker(), "Return value hints: none"); - } else { - TRACE_BROKER(broker(), "Return value hints: " << return_value_hints()); - } - TRACE_BROKER_MEMORY(broker(), "[serializer end] Broker zone usage: " - << broker()->zone()->allocation_size()); - return return_value_hints(); -} - -class HandlerRangeMatcher { - public: - HandlerRangeMatcher(BytecodeArrayIterator const& bytecode_iterator, - Handle<BytecodeArray> bytecode_array) - : bytecode_iterator_(bytecode_iterator) { - HandlerTable table(*bytecode_array); - for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) { - ranges_.insert({table.GetRangeStart(i), table.GetRangeEnd(i), - table.GetRangeHandler(i)}); - } - ranges_iterator_ = ranges_.cbegin(); - } - - using OffsetReporter = std::function<void(int handler_offset)>; - - void HandlerOffsetForCurrentPosition(const OffsetReporter& offset_reporter) { - CHECK(!bytecode_iterator_.done()); - const int current_offset = bytecode_iterator_.current_offset(); - - // Remove outdated try ranges from the stack. - while (!stack_.empty()) { - const int end = stack_.top().end; - if (end < current_offset) { - stack_.pop(); - } else { - break; - } - } - - // Advance the iterator and maintain the stack. - while (ranges_iterator_ != ranges_.cend() && - ranges_iterator_->start <= current_offset) { - if (ranges_iterator_->end >= current_offset) { - stack_.push(*ranges_iterator_); - if (ranges_iterator_->start == current_offset) { - offset_reporter(ranges_iterator_->handler); - } - } - ranges_iterator_++; - } - - if (!stack_.empty() && stack_.top().start < current_offset) { - offset_reporter(stack_.top().handler); - } - } - - private: - BytecodeArrayIterator const& bytecode_iterator_; - - struct Range { - int start; - int end; - int handler; - friend bool operator<(const Range& a, const Range& b) { - if (a.start < b.start) return true; - if (a.start == b.start) { - if (a.end < b.end) return true; - CHECK_GT(a.end, b.end); - } - return false; - } - }; - std::set<Range> ranges_; - std::set<Range>::const_iterator ranges_iterator_; - std::stack<Range> stack_; -}; - -Handle<FeedbackVector> SerializerForBackgroundCompilation::feedback_vector() - const { - return function().feedback_vector(); -} - -Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array() - const { - return handle(function().shared()->GetBytecodeArray(broker()->isolate()), - broker()->isolate()); -} - -void SerializerForBackgroundCompilation::TraverseBytecode() { - bytecode_analysis_.emplace(bytecode_array(), zone(), osr_offset(), false); - - BytecodeArrayIterator iterator(bytecode_array()); - HandlerRangeMatcher try_start_matcher(iterator, bytecode_array()); - - for (; !iterator.done(); iterator.Advance()) { - int const current_offset = iterator.current_offset(); - - // TODO(mvstanton): we might want to ignore the current environment if we - // are at the start of a catch handler. - IncorporateJumpTargetEnvironment(current_offset); - - TRACE_BROKER(broker(), - "Handling bytecode: " << current_offset << " " - << iterator.current_bytecode()); - TRACE_BROKER(broker(), "Current environment: " << *environment()); - - if (environment()->IsDead()) { - continue; // Skip this bytecode since TF won't generate code for it. - } - - auto save_handler_environments = [&](int handler_offset) { - auto it = jump_target_environments_.find(handler_offset); - if (it == jump_target_environments_.end()) { - ContributeToJumpTargetEnvironment(handler_offset); - TRACE_BROKER(broker(), - "Handler offset for current pos: " << handler_offset); - } - }; - try_start_matcher.HandlerOffsetForCurrentPosition( - save_handler_environments); - - if (bytecode_analysis().IsLoopHeader(current_offset)) { - // Graph builder might insert jumps to resume targets in the loop body. - LoopInfo const& loop_info = - bytecode_analysis().GetLoopInfoFor(current_offset); - for (const auto& target : loop_info.resume_jump_targets()) { - ContributeToJumpTargetEnvironment(target.target_offset()); - } - } - - interpreter::Bytecode current_bytecode = iterator.current_bytecode(); - switch (current_bytecode) { -#define DEFINE_BYTECODE_CASE(name) \ - case interpreter::Bytecode::k##name: \ - Visit##name(&iterator); \ - break; - SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE) -#undef DEFINE_BYTECODE_CASE - -#define DEFINE_SHORT_STAR_CASE(Name, ...) case interpreter::Bytecode::k##Name: - SHORT_STAR_BYTECODE_LIST(DEFINE_SHORT_STAR_CASE) -#undef DEFINE_SHORT_STAR_CASE - VisitShortStar(interpreter::Register::FromShortStar(current_bytecode)); - break; - } - } -} - -void SerializerForBackgroundCompilation::VisitGetIterator( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - FeedbackSlot load_slot = iterator->GetSlotOperand(1); - FeedbackSlot call_slot = iterator->GetSlotOperand(2); - - Handle<Name> name = broker()->isolate()->factory()->iterator_symbol(); - ProcessNamedPropertyAccess(receiver, MakeRef(broker(), name), load_slot, - AccessMode::kLoad); - if (environment()->IsDead()) return; - - Hints callee; - HintsVector args = PrepareArgumentsHints(receiver); - - ProcessCallOrConstruct(callee, base::nullopt, &args, call_slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitGetSuperConstructor( - BytecodeArrayIterator* iterator) { - interpreter::Register dst = iterator->GetRegisterOperand(0); - Hints result_hints; - for (auto constant : environment()->accumulator_hints().constants()) { - // For JSNativeContextSpecialization::ReduceJSGetSuperConstructor. - if (!constant->IsJSFunction()) continue; - MapRef map = MakeRef(broker(), handle(HeapObject::cast(*constant).map(), - broker()->isolate())); - map.SerializePrototype(); - ObjectRef proto = map.prototype().value(); - if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) { - result_hints.AddConstant(proto.object(), zone(), broker()); - } - } - register_hints(dst) = result_hints; -} - -void SerializerForBackgroundCompilation::VisitGetTemplateObject( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - Handle<TemplateObjectDescription>::cast( - iterator->GetConstantForIndexOperand(0, broker()->isolate()))); - FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackSource source(feedback_vector(), slot); - - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForTemplateObject(source); - if (feedback.IsInsufficient()) { - environment()->accumulator_hints() = Hints(); - } else { - JSArrayRef template_object = feedback.AsTemplateObject().value(); - environment()->accumulator_hints() = - Hints::SingleConstant(template_object.object(), zone()); - } -} - -void SerializerForBackgroundCompilation::VisitLdaTrue( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - broker()->isolate()->factory()->true_value(), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaFalse( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - broker()->isolate()->factory()->false_value(), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaTheHole( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - broker()->isolate()->factory()->the_hole_value(), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaUndefined( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaNull( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - broker()->isolate()->factory()->null_value(), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaZero( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints() = Hints::SingleConstant( - handle(Smi::FromInt(0), broker()->isolate()), zone()); -} - -void SerializerForBackgroundCompilation::VisitLdaSmi( - BytecodeArrayIterator* iterator) { - Handle<Smi> smi(Smi::FromInt(iterator->GetImmediateOperand(0)), - broker()->isolate()); - environment()->accumulator_hints() = Hints::SingleConstant(smi, zone()); -} - -void SerializerForBackgroundCompilation::VisitInvokeIntrinsic( - BytecodeArrayIterator* iterator) { - Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0); - // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and - // JSNativeContextSpecialization::ReduceJSResolvePromise. - switch (functionId) { - case Runtime::kInlineAsyncFunctionResolve: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncFunctionResolve)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - size_t reg_count = iterator->GetRegisterCountOperand(2); - CHECK_EQ(reg_count, 3); - HintsVector args = PrepareArgumentsHints(first_reg, reg_count); - Hints const& resolution_hints = args[1]; // The resolution object. - ProcessHintsForPromiseResolve(resolution_hints); - return; - } - case Runtime::kInlineAsyncGeneratorReject: - case Runtime::kAsyncGeneratorReject: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncGeneratorReject)); - break; - } - case Runtime::kInlineAsyncGeneratorResolve: - case Runtime::kAsyncGeneratorResolve: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncGeneratorResolve)); - break; - } - case Runtime::kInlineAsyncGeneratorYield: - case Runtime::kAsyncGeneratorYield: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncGeneratorYield)); - break; - } - case Runtime::kInlineAsyncGeneratorAwaitUncaught: - case Runtime::kAsyncGeneratorAwaitUncaught: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncGeneratorAwaitUncaught)); - break; - } - case Runtime::kInlineAsyncGeneratorAwaitCaught: - case Runtime::kAsyncGeneratorAwaitCaught: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncGeneratorAwaitCaught)); - break; - } - case Runtime::kInlineAsyncFunctionAwaitUncaught: - case Runtime::kAsyncFunctionAwaitUncaught: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncFunctionAwaitUncaught)); - break; - } - case Runtime::kInlineAsyncFunctionAwaitCaught: - case Runtime::kAsyncFunctionAwaitCaught: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncFunctionAwaitCaught)); - break; - } - case Runtime::kInlineAsyncFunctionReject: - case Runtime::kAsyncFunctionReject: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncFunctionReject)); - break; - } - case Runtime::kAsyncFunctionResolve: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kAsyncFunctionResolve)); - break; - } - case Runtime::kInlineCopyDataProperties: - case Runtime::kCopyDataProperties: { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kCopyDataProperties)); - break; - } - default: { - break; - } - } - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitLdaConstant( - BytecodeArrayIterator* iterator) { - Handle<Object> constant = - iterator->GetConstantForIndexOperand(0, broker()->isolate()); - // TODO(v8:7790): FixedArrays still need to be serialized until they are - // moved to kNeverSerialized. - if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) { - MakeRef(broker(), constant); - } - environment()->accumulator_hints() = Hints::SingleConstant(constant, zone()); -} - -void SerializerForBackgroundCompilation::VisitPushContext( - BytecodeArrayIterator* iterator) { - register_hints(iterator->GetRegisterOperand(0)) - .Reset(&environment()->current_context_hints(), zone()); - environment()->current_context_hints().Reset( - &environment()->accumulator_hints(), zone()); -} - -void SerializerForBackgroundCompilation::VisitPopContext( - BytecodeArrayIterator* iterator) { - environment()->current_context_hints().Reset( - ®ister_hints(iterator->GetRegisterOperand(0)), zone()); -} - -void SerializerForBackgroundCompilation::ProcessImmutableLoad( - ContextRef const& context_ref, int slot, ContextProcessingMode mode, - Hints* result_hints) { - DCHECK_EQ(mode, kSerializeSlot); - base::Optional<ObjectRef> slot_value = context_ref.get(slot); - - // If requested, record the object as a hint for the result value. - if (result_hints != nullptr && slot_value.has_value()) { - result_hints->AddConstant(slot_value.value().object(), zone(), broker()); - } -} - -void SerializerForBackgroundCompilation::ProcessContextAccess( - Hints const& context_hints, int slot, int depth, ContextProcessingMode mode, - Hints* result_hints) { - // This function is for JSContextSpecialization::ReduceJSLoadContext and - // ReduceJSStoreContext. Those reductions attempt to eliminate as many - // loads as possible by making use of constant Context objects. In the - // case of an immutable load, ReduceJSLoadContext even attempts to load - // the value at {slot}, replacing the load with a constant. - for (auto x : context_hints.constants()) { - if (x->IsContext()) { - // Walk this context to the given depth and serialize the slot found. - ContextRef context_ref = MakeRef(broker(), Handle<Context>::cast(x)); - size_t remaining_depth = depth; - context_ref = context_ref.previous(&remaining_depth); - if (remaining_depth == 0 && mode != kIgnoreSlot) { - ProcessImmutableLoad(context_ref, slot, mode, result_hints); - } - } - } - for (auto x : context_hints.virtual_contexts()) { - if (x.distance <= static_cast<unsigned int>(depth)) { - ContextRef context_ref = - MakeRef(broker(), Handle<Context>::cast(x.context)); - size_t remaining_depth = depth - x.distance; - context_ref = context_ref.previous(&remaining_depth); - if (remaining_depth == 0 && mode != kIgnoreSlot) { - ProcessImmutableLoad(context_ref, slot, mode, result_hints); - } - } - } -} - -void SerializerForBackgroundCompilation::VisitLdaContextSlot( - BytecodeArrayIterator* iterator) { - Hints const& context_hints = register_hints(iterator->GetRegisterOperand(0)); - const int slot = iterator->GetIndexOperand(1); - const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints new_accumulator_hints; - ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, - &new_accumulator_hints); - environment()->accumulator_hints() = new_accumulator_hints; -} - -void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( - BytecodeArrayIterator* iterator) { - const int slot = iterator->GetIndexOperand(0); - const int depth = 0; - Hints const& context_hints = environment()->current_context_hints(); - Hints new_accumulator_hints; - ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot, - &new_accumulator_hints); - environment()->accumulator_hints() = new_accumulator_hints; -} - -void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( - BytecodeArrayIterator* iterator) { - const int slot = iterator->GetIndexOperand(1); - const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints const& context_hints = register_hints(iterator->GetRegisterOperand(0)); - Hints new_accumulator_hints; - ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, - &new_accumulator_hints); - environment()->accumulator_hints() = new_accumulator_hints; -} - -void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( - BytecodeArrayIterator* iterator) { - const int slot = iterator->GetIndexOperand(0); - const int depth = 0; - Hints const& context_hints = environment()->current_context_hints(); - Hints new_accumulator_hints; - ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, - &new_accumulator_hints); - environment()->accumulator_hints() = new_accumulator_hints; -} - -void SerializerForBackgroundCompilation::ProcessModuleVariableAccess( - BytecodeArrayIterator* iterator) { - const int slot = Context::EXTENSION_INDEX; - const int depth = iterator->GetUnsignedImmediateOperand(1); - Hints const& context_hints = environment()->current_context_hints(); - - Hints result_hints; - ProcessContextAccess(context_hints, slot, depth, kSerializeSlot, - &result_hints); - for (Handle<Object> constant : result_hints.constants()) { - MakeRef(broker(), constant); - } -} - -void SerializerForBackgroundCompilation::VisitLdaModuleVariable( - BytecodeArrayIterator* iterator) { - ProcessModuleVariableAccess(iterator); -} - -void SerializerForBackgroundCompilation::VisitStaModuleVariable( - BytecodeArrayIterator* iterator) { - ProcessModuleVariableAccess(iterator); -} - -void SerializerForBackgroundCompilation::VisitStaLookupSlot( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitStaContextSlot( - BytecodeArrayIterator* iterator) { - const int slot = iterator->GetIndexOperand(1); - const int depth = iterator->GetUnsignedImmediateOperand(2); - Hints const& hints = register_hints(iterator->GetRegisterOperand(0)); - ProcessContextAccess(hints, slot, depth, kIgnoreSlot); -} - -void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot( - BytecodeArrayIterator* iterator) { - const int slot = iterator->GetIndexOperand(0); - const int depth = 0; - Hints const& context_hints = environment()->current_context_hints(); - ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); -} - -void SerializerForBackgroundCompilation::VisitLdar( - BytecodeArrayIterator* iterator) { - environment()->accumulator_hints().Reset( - ®ister_hints(iterator->GetRegisterOperand(0)), zone()); -} - -void SerializerForBackgroundCompilation::VisitStar( - BytecodeArrayIterator* iterator) { - interpreter::Register reg = iterator->GetRegisterOperand(0); - register_hints(reg).Reset(&environment()->accumulator_hints(), zone()); -} - -void SerializerForBackgroundCompilation::VisitShortStar( - interpreter::Register reg) { - register_hints(reg).Reset(&environment()->accumulator_hints(), zone()); -} - -void SerializerForBackgroundCompilation::VisitMov( - BytecodeArrayIterator* iterator) { - interpreter::Register src = iterator->GetRegisterOperand(0); - interpreter::Register dst = iterator->GetRegisterOperand(1); - register_hints(dst).Reset(®ister_hints(src), zone()); -} - -void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral( - BytecodeArrayIterator* iterator) { - Handle<String> constant_pattern = Handle<String>::cast( - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - MakeRef(broker(), constant_pattern); - FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackSource source(feedback_vector(), slot); - broker()->ProcessFeedbackForRegExpLiteral(source); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitCreateArrayLiteral( - BytecodeArrayIterator* iterator) { - Handle<ArrayBoilerplateDescription> array_boilerplate_description = - Handle<ArrayBoilerplateDescription>::cast( - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - MakeRef(broker(), array_boilerplate_description); - FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackSource source(feedback_vector(), slot); - broker()->ProcessFeedbackForArrayOrObjectLiteral(source); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitCreateEmptyArrayLiteral( - BytecodeArrayIterator* iterator) { - FeedbackSlot slot = iterator->GetSlotOperand(0); - FeedbackSource source(feedback_vector(), slot); - broker()->ProcessFeedbackForArrayOrObjectLiteral(source); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitCreateObjectLiteral( - BytecodeArrayIterator* iterator) { - Handle<ObjectBoilerplateDescription> constant_properties = - Handle<ObjectBoilerplateDescription>::cast( - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - MakeRef(broker(), constant_properties); - FeedbackSlot slot = iterator->GetSlotOperand(1); - FeedbackSource source(feedback_vector(), slot); - broker()->ProcessFeedbackForArrayOrObjectLiteral(source); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitCreateFunctionContext( - BytecodeArrayIterator* iterator) { - ProcessCreateContext(iterator, 0); -} - -void SerializerForBackgroundCompilation::VisitCreateBlockContext( - BytecodeArrayIterator* iterator) { - ProcessCreateContext(iterator, 0); -} - -void SerializerForBackgroundCompilation::VisitCreateEvalContext( - BytecodeArrayIterator* iterator) { - ProcessCreateContext(iterator, 0); -} - -void SerializerForBackgroundCompilation::VisitCreateWithContext( - BytecodeArrayIterator* iterator) { - ProcessCreateContext(iterator, 1); -} - -void SerializerForBackgroundCompilation::VisitCreateCatchContext( - BytecodeArrayIterator* iterator) { - ProcessCreateContext(iterator, 1); -} - -void SerializerForBackgroundCompilation::VisitForInNext( - BytecodeArrayIterator* iterator) { - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessForIn(slot); -} - -void SerializerForBackgroundCompilation::VisitForInPrepare( - BytecodeArrayIterator* iterator) { - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessForIn(slot); -} - -void SerializerForBackgroundCompilation::ProcessCreateContext( - interpreter::BytecodeArrayIterator* iterator, int scopeinfo_operand_index) { - Hints const& current_context_hints = environment()->current_context_hints(); - Hints result_hints; - - // For each constant context, we must create a virtual context from - // it of distance one. - for (auto x : current_context_hints.constants()) { - if (x->IsContext()) { - Handle<Context> as_context(Handle<Context>::cast(x)); - result_hints.AddVirtualContext(VirtualContext(1, as_context), zone(), - broker()); - } - } - - // For each virtual context, we must create a virtual context from - // it of distance {existing distance} + 1. - for (auto x : current_context_hints.virtual_contexts()) { - result_hints.AddVirtualContext(VirtualContext(x.distance + 1, x.context), - zone(), broker()); - } - - environment()->accumulator_hints() = result_hints; -} - -void SerializerForBackgroundCompilation::VisitCreateClosure( - BytecodeArrayIterator* iterator) { - Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast( - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - Handle<FeedbackCell> feedback_cell = - feedback_vector()->GetClosureFeedbackCell(iterator->GetIndexOperand(1)); - MakeRef(broker(), feedback_cell); - Handle<Object> cell_value(feedback_cell->value(), broker()->isolate()); - MakeRef(broker(), cell_value); - - Hints result_hints; - if (cell_value->IsFeedbackVector()) { - VirtualClosure virtual_closure(shared, - Handle<FeedbackVector>::cast(cell_value), - environment()->current_context_hints()); - result_hints.AddVirtualClosure(virtual_closure, zone(), broker()); - } - environment()->accumulator_hints() = result_hints; -} - -void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg, - reg_count, slot); -} - -void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver0( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - FeedbackSlot slot = iterator->GetSlotOperand(1); - - Hints const receiver = Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()); - HintsVector parameters({receiver}, zone()); - - ProcessCallOrConstruct(callee, base::nullopt, ¶meters, slot, - kMissingArgumentsAreUndefined); -} - -namespace { -void PrepareArgumentsHintsInternal(Zone* zone, HintsVector* args) {} - -template <typename... MoreHints> -void PrepareArgumentsHintsInternal(Zone* zone, HintsVector* args, Hints* hints, - MoreHints... more) { - hints->EnsureShareable(zone); - args->push_back(*hints); - PrepareArgumentsHintsInternal(zone, args, more...); -} -} // namespace - -template <typename... MoreHints> -HintsVector SerializerForBackgroundCompilation::PrepareArgumentsHints( - Hints* hints, MoreHints... more) { - HintsVector args(zone()); - PrepareArgumentsHintsInternal(zone(), &args, hints, more...); - return args; -} - -HintsVector SerializerForBackgroundCompilation::PrepareArgumentsHints( - interpreter::Register first, size_t count) { - HintsVector result(zone()); - const int reg_base = first.index(); - for (int i = 0; i < static_cast<int>(count); ++i) { - Hints& hints = register_hints(interpreter::Register(reg_base + i)); - hints.EnsureShareable(zone()); - result.push_back(hints); - } - return result; -} - -void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver1( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - Hints* arg0 = ®ister_hints(iterator->GetRegisterOperand(1)); - FeedbackSlot slot = iterator->GetSlotOperand(2); - - Hints receiver = Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()); - HintsVector args = PrepareArgumentsHints(&receiver, arg0); - - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitCallUndefinedReceiver2( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - Hints* arg0 = ®ister_hints(iterator->GetRegisterOperand(1)); - Hints* arg1 = ®ister_hints(iterator->GetRegisterOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - - Hints receiver = Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()); - HintsVector args = PrepareArgumentsHints(&receiver, arg0, arg1); - - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitCallAnyReceiver( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, - slot); -} - -void SerializerForBackgroundCompilation::VisitCallProperty( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined, callee, - first_reg, reg_count, slot); -} - -void SerializerForBackgroundCompilation::VisitCallProperty0( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(1)); - FeedbackSlot slot = iterator->GetSlotOperand(2); - - HintsVector args = PrepareArgumentsHints(receiver); - - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitCallProperty1( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(1)); - Hints* arg0 = ®ister_hints(iterator->GetRegisterOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - - HintsVector args = PrepareArgumentsHints(receiver, arg0); - - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitCallProperty2( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(1)); - Hints* arg0 = ®ister_hints(iterator->GetRegisterOperand(2)); - Hints* arg1 = ®ister_hints(iterator->GetRegisterOperand(3)); - FeedbackSlot slot = iterator->GetSlotOperand(4); - - HintsVector args = PrepareArgumentsHints(receiver, arg0, arg1); - - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitCallWithSpread( - BytecodeArrayIterator* iterator) { - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessCallVarArgs(ConvertReceiverMode::kAny, callee, first_reg, reg_count, - slot, kMissingArgumentsAreUnknown); -} - -void SerializerForBackgroundCompilation::VisitCallJSRuntime( - BytecodeArrayIterator* iterator) { - const int runtime_index = iterator->GetNativeContextIndexOperand(0); - ObjectRef constant = - broker()->target_native_context().get(runtime_index).value(); - Hints const callee = Hints::SingleConstant(constant.object(), zone()); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - int reg_count = static_cast<int>(iterator->GetRegisterCountOperand(2)); - ProcessCallVarArgs(ConvertReceiverMode::kNullOrUndefined, callee, first_reg, - reg_count, FeedbackSlot::Invalid()); -} - -Hints SerializerForBackgroundCompilation::RunChildSerializer( - CompilationSubject function, base::Optional<Hints> new_target, - const HintsVector& arguments, MissingArgumentsPolicy padding) { - SerializerForBackgroundCompilation child_serializer( - zone_scope_.zone_stats(), broker(), dependencies(), function, new_target, - arguments, padding, flags(), nesting_level_ + 1); - Hints result = child_serializer.Run(); - // The Hints returned by the call to Run are allocated in the zone - // created by the child serializer. Adding those hints to a hints - // object created in our zone will preserve the information. - return result.CopyToParentZone(zone(), broker()); -} - -void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct( - Callee const& callee, base::Optional<Hints> new_target, - const HintsVector& arguments, SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, Hints* result_hints) { - Handle<SharedFunctionInfo> shared = callee.shared(broker()->isolate()); - if (shared->IsApiFunction()) { - ProcessApiCall(shared, arguments); - DCHECK_NE( - shared->GetInlineability(broker()->isolate(), broker()->is_turboprop()), - SharedFunctionInfo::kIsInlineable); - } else if (shared->HasBuiltinId()) { - ProcessBuiltinCall(shared, new_target, arguments, speculation_mode, padding, - result_hints); - DCHECK_NE( - shared->GetInlineability(broker()->isolate(), broker()->is_turboprop()), - SharedFunctionInfo::kIsInlineable); - } else if ((flags() & - SerializerForBackgroundCompilationFlag::kEnableTurboInlining) && - shared->GetInlineability(broker()->isolate(), - broker()->is_turboprop()) == - SharedFunctionInfo::kIsInlineable && - callee.HasFeedbackVector()) { - CompilationSubject subject = - callee.ToCompilationSubject(broker()->isolate(), zone()); - result_hints->Add( - RunChildSerializer(subject, new_target, arguments, padding), zone(), - broker()); - } -} - -namespace { -// Returns the innermost bound target and inserts all bound arguments and -// {original_arguments} into {expanded_arguments} in the appropriate order. -JSReceiverRef UnrollBoundFunction(JSBoundFunctionRef const& bound_function, - JSHeapBroker* broker, - const HintsVector& original_arguments, - HintsVector* expanded_arguments, Zone* zone) { - DCHECK(expanded_arguments->empty()); - - JSReceiverRef target = bound_function.AsJSReceiver(); - HintsVector reversed_bound_arguments(zone); - for (; target.IsJSBoundFunction(); - target = target.AsJSBoundFunction().bound_target_function().value()) { - for (int i = target.AsJSBoundFunction().bound_arguments().length() - 1; - i >= 0; --i) { - Hints const arg = Hints::SingleConstant( - target.AsJSBoundFunction().bound_arguments().get(i).object(), zone); - reversed_bound_arguments.push_back(arg); - } - Hints const arg = Hints::SingleConstant( - target.AsJSBoundFunction().bound_this().value().object(), zone); - reversed_bound_arguments.push_back(arg); - } - - expanded_arguments->insert(expanded_arguments->end(), - reversed_bound_arguments.rbegin(), - reversed_bound_arguments.rend()); - expanded_arguments->insert(expanded_arguments->end(), - original_arguments.begin(), - original_arguments.end()); - - return target; -} -} // namespace - -void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct( - Handle<Object> callee, base::Optional<Hints> new_target, - const HintsVector& arguments, SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, Hints* result_hints) { - const HintsVector* actual_arguments = &arguments; - HintsVector expanded_arguments(zone()); - if (callee->IsJSBoundFunction()) { - JSBoundFunctionRef bound_function = - MakeRef(broker(), Handle<JSBoundFunction>::cast(callee)); - if (!bound_function.Serialize()) return; - callee = UnrollBoundFunction(bound_function, broker(), arguments, - &expanded_arguments, zone()) - .object(); - actual_arguments = &expanded_arguments; - } - if (!callee->IsJSFunction()) return; - - JSFunctionRef function = MakeRef(broker(), Handle<JSFunction>::cast(callee)); - function.Serialize(); - Callee new_callee(function.object()); - ProcessCalleeForCallOrConstruct(new_callee, new_target, *actual_arguments, - speculation_mode, padding, result_hints); -} - -void SerializerForBackgroundCompilation::ProcessCallOrConstruct( - Hints callee, base::Optional<Hints> new_target, HintsVector* arguments, - FeedbackSlot slot, MissingArgumentsPolicy padding) { - SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation; - - if (!slot.IsInvalid()) { - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForCall(source); - if (BailoutOnUninitialized(feedback)) return; - - if (!feedback.IsInsufficient()) { - // Incorporate feedback into hints copy to simplify processing. - // TODO(neis): Modify the original hints instead? - speculation_mode = feedback.AsCall().speculation_mode(); - // Incorporate target feedback into hints copy to simplify processing. - base::Optional<HeapObjectRef> target = feedback.AsCall().target(); - if (target.has_value() && - (target->map().is_callable() || target->IsFeedbackCell())) { - callee = callee.Copy(zone()); - // TODO(mvstanton): if the map isn't callable then we have an allocation - // site, and it may make sense to add the Array JSFunction constant. - if (new_target.has_value()) { - // Construct; feedback is new_target, which often is also the callee. - new_target = new_target->Copy(zone()); - new_target->AddConstant(target->object(), zone(), broker()); - callee.AddConstant(target->object(), zone(), broker()); - } else { - // Call; target is feedback cell or callee. - if (target->IsFeedbackCell() && target->AsFeedbackCell().value()) { - FeedbackVectorRef vector = *target->AsFeedbackCell().value(); - vector.Serialize(); - VirtualClosure virtual_closure( - vector.shared_function_info().object(), vector.object(), - Hints()); - callee.AddVirtualClosure(virtual_closure, zone(), broker()); - } else { - callee.AddConstant(target->object(), zone(), broker()); - } - } - } - } - } - - Hints result_hints_from_new_target; - if (new_target.has_value()) { - ProcessNewTargetForConstruct(*new_target, &result_hints_from_new_target); - // These hints are a good guess at the resulting object, so they are useful - // for both the accumulator and the constructor call's receiver. The latter - // is still missing completely in {arguments} so add it now. - arguments->insert(arguments->begin(), result_hints_from_new_target); - } - - // For JSNativeContextSpecialization::InferRootMap - Hints new_accumulator_hints = result_hints_from_new_target.Copy(zone()); - - ProcessCallOrConstructRecursive(callee, new_target, *arguments, - speculation_mode, padding, - &new_accumulator_hints); - environment()->accumulator_hints() = new_accumulator_hints; -} - -void SerializerForBackgroundCompilation::ProcessCallOrConstructRecursive( - Hints const& callee, base::Optional<Hints> new_target, - const HintsVector& arguments, SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, Hints* result_hints) { - // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. - for (auto constant : callee.constants()) { - ProcessCalleeForCallOrConstruct(constant, new_target, arguments, - speculation_mode, padding, result_hints); - } - - // For JSCallReducer::ReduceJSCall and JSCallReducer::ReduceJSConstruct. - for (auto hint : callee.virtual_closures()) { - ProcessCalleeForCallOrConstruct(Callee(hint), new_target, arguments, - speculation_mode, padding, result_hints); - } - - for (auto hint : callee.virtual_bound_functions()) { - HintsVector new_arguments = hint.bound_arguments; - new_arguments.insert(new_arguments.end(), arguments.begin(), - arguments.end()); - ProcessCallOrConstructRecursive(hint.bound_target, new_target, - new_arguments, speculation_mode, padding, - result_hints); - } -} - -void SerializerForBackgroundCompilation::ProcessNewTargetForConstruct( - Hints const& new_target_hints, Hints* result_hints) { - for (Handle<Object> target : new_target_hints.constants()) { - if (target->IsJSBoundFunction()) { - // Unroll the bound function. - while (target->IsJSBoundFunction()) { - target = handle( - Handle<JSBoundFunction>::cast(target)->bound_target_function(), - broker()->isolate()); - } - } - if (target->IsJSFunction()) { - Handle<JSFunction> new_target(Handle<JSFunction>::cast(target)); - if (new_target->has_prototype_slot(broker()->isolate()) && - new_target->has_initial_map()) { - result_hints->AddMap( - handle(new_target->initial_map(), broker()->isolate()), zone(), - broker()); - } - } - } - - for (auto const& virtual_bound_function : - new_target_hints.virtual_bound_functions()) { - ProcessNewTargetForConstruct(virtual_bound_function.bound_target, - result_hints); - } -} - -void SerializerForBackgroundCompilation::ProcessCallVarArgs( - ConvertReceiverMode receiver_mode, Hints const& callee, - interpreter::Register first_reg, int reg_count, FeedbackSlot slot, - MissingArgumentsPolicy padding) { - HintsVector args = PrepareArgumentsHints(first_reg, reg_count); - // The receiver is either given in the first register or it is implicitly - // the {undefined} value. - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - args.insert(args.begin(), - Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone())); - } - ProcessCallOrConstruct(callee, base::nullopt, &args, slot, padding); -} - -void SerializerForBackgroundCompilation::ProcessApiCall( - Handle<SharedFunctionInfo> target, const HintsVector& arguments) { - for (const auto b : - {Builtin::kCallFunctionTemplate_CheckAccess, - Builtin::kCallFunctionTemplate_CheckCompatibleReceiver, - Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver}) { - MakeRef(broker(), broker()->isolate()->builtins()->code_handle(b)); - } - FunctionTemplateInfoRef target_template_info = - MakeRef(broker(), - FunctionTemplateInfo::cast(target->function_data(kAcquireLoad))); - if (!target_template_info.has_call_code()) return; - target_template_info.SerializeCallCode(); - - if (target_template_info.accept_any_receiver() && - target_template_info.is_signature_undefined()) { - return; - } - - if (arguments.empty()) return; - Hints const& receiver_hints = arguments[0]; - for (auto hint : receiver_hints.constants()) { - if (hint->IsUndefined()) { - // The receiver is the global proxy. - Handle<JSGlobalProxy> global_proxy = - broker()->target_native_context().global_proxy_object().object(); - ProcessReceiverMapForApiCall( - target_template_info, - handle(global_proxy->map(), broker()->isolate())); - continue; - } - - if (!hint->IsJSReceiver()) continue; - Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint)); - - ProcessReceiverMapForApiCall(target_template_info, - handle(receiver->map(), broker()->isolate())); - } - - for (auto receiver_map : receiver_hints.maps()) { - ProcessReceiverMapForApiCall(target_template_info, receiver_map); - } -} - -void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall( - FunctionTemplateInfoRef target, Handle<Map> receiver) { - if (!receiver->is_access_check_needed()) { - MapRef receiver_map = MakeRef(broker(), receiver); - TRACE_BROKER(broker(), "Serializing holder for target: " << target); - target.LookupHolderOfExpectedType(receiver_map, - SerializationPolicy::kSerializeIfNeeded); - } -} - -void SerializerForBackgroundCompilation::ProcessHintsForObjectCreate( - Hints const& prototype) { - for (Handle<Object> constant_handle : prototype.constants()) { - ObjectRef constant = MakeRef(broker(), constant_handle); - if (constant.IsJSObject()) constant.AsJSObject().SerializeObjectCreateMap(); - } -} - -void SerializerForBackgroundCompilation::ProcessBuiltinCall( - Handle<SharedFunctionInfo> target, base::Optional<Hints> new_target, - const HintsVector& arguments, SpeculationMode speculation_mode, - MissingArgumentsPolicy padding, Hints* result_hints) { - DCHECK(target->HasBuiltinId()); - const Builtin builtin = target->builtin_id(); - const char* name = Builtins::name(builtin); - TRACE_BROKER(broker(), "Serializing for call to builtin " << name); - switch (builtin) { - case Builtin::kObjectCreate: { - if (arguments.size() >= 2) { - ProcessHintsForObjectCreate(arguments[1]); - } else { - ProcessHintsForObjectCreate(Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone())); - } - break; - } - case Builtin::kPromisePrototypeCatch: { - // For JSCallReducer::ReducePromisePrototypeCatch. - if (speculation_mode != SpeculationMode::kDisallowSpeculation) { - if (arguments.size() >= 1) { - ProcessMapHintsForPromises(arguments[0]); - } - } - break; - } - case Builtin::kPromisePrototypeFinally: { - // For JSCallReducer::ReducePromisePrototypeFinally. - if (speculation_mode != SpeculationMode::kDisallowSpeculation) { - if (arguments.size() >= 1) { - ProcessMapHintsForPromises(arguments[0]); - } - MakeRef( - broker(), - broker()->isolate()->factory()->promise_catch_finally_shared_fun()); - MakeRef( - broker(), - broker()->isolate()->factory()->promise_then_finally_shared_fun()); - } - break; - } - case Builtin::kPromisePrototypeThen: { - // For JSCallReducer::ReducePromisePrototypeThen. - if (speculation_mode != SpeculationMode::kDisallowSpeculation) { - if (arguments.size() >= 1) { - ProcessMapHintsForPromises(arguments[0]); - } - } - break; - } - case Builtin::kPromiseResolveTrampoline: - // For JSCallReducer::ReducePromiseInternalResolve and - // JSNativeContextSpecialization::ReduceJSResolvePromise. - if (arguments.size() >= 1) { - Hints const resolution_hints = - arguments.size() >= 2 - ? arguments[1] - : Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), - zone()); - ProcessHintsForPromiseResolve(resolution_hints); - } - break; - case Builtin::kRegExpPrototypeTest: - case Builtin::kRegExpPrototypeTestFast: - // For JSCallReducer::ReduceRegExpPrototypeTest. - if (arguments.size() >= 1 && - speculation_mode != SpeculationMode::kDisallowSpeculation) { - Hints const& regexp_hints = arguments[0]; - ProcessHintsForRegExpTest(regexp_hints); - } - break; - case Builtin::kArrayEvery: - case Builtin::kArrayFilter: - case Builtin::kArrayForEach: - case Builtin::kArrayPrototypeFind: - case Builtin::kArrayPrototypeFindIndex: - case Builtin::kArrayMap: - case Builtin::kArraySome: - if (arguments.size() >= 2 && - speculation_mode != SpeculationMode::kDisallowSpeculation) { - Hints const& callback = arguments[1]; - // "Call(callbackfn, T, « kValue, k, O »)" - HintsVector new_arguments(zone()); - new_arguments.push_back( - arguments.size() < 3 - ? Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()) - : arguments[2]); // T - new_arguments.push_back(Hints()); // kValue - new_arguments.push_back(Hints()); // k - new_arguments.push_back(arguments[0]); // O - for (auto constant : callback.constants()) { - ProcessCalleeForCallOrConstruct( - constant, base::nullopt, new_arguments, speculation_mode, - kMissingArgumentsAreUndefined, result_hints); - } - for (auto virtual_closure : callback.virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - speculation_mode, kMissingArgumentsAreUndefined, result_hints); - } - } - break; - case Builtin::kArrayReduce: - case Builtin::kArrayReduceRight: - if (arguments.size() >= 2 && - speculation_mode != SpeculationMode::kDisallowSpeculation) { - Hints const& callback = arguments[1]; - // "Call(callbackfn, undefined, « accumulator, kValue, k, O »)" - HintsVector new_arguments(zone()); - new_arguments.push_back(Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone())); - new_arguments.push_back(Hints()); // accumulator - new_arguments.push_back(Hints()); // kValue - new_arguments.push_back(Hints()); // k - new_arguments.push_back(arguments[0]); // O - for (auto constant : callback.constants()) { - ProcessCalleeForCallOrConstruct( - constant, base::nullopt, new_arguments, speculation_mode, - kMissingArgumentsAreUndefined, result_hints); - } - for (auto virtual_closure : callback.virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - speculation_mode, kMissingArgumentsAreUndefined, result_hints); - } - } - break; - case Builtin::kFunctionPrototypeApply: - if (arguments.size() >= 1) { - // Drop hints for all arguments except the user-given receiver. - Hints const new_receiver = - arguments.size() >= 2 - ? arguments[1] - : Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), - zone()); - HintsVector new_arguments({new_receiver}, zone()); - for (auto constant : arguments[0].constants()) { - ProcessCalleeForCallOrConstruct( - constant, base::nullopt, new_arguments, speculation_mode, - kMissingArgumentsAreUnknown, result_hints); - } - for (auto const& virtual_closure : arguments[0].virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - speculation_mode, kMissingArgumentsAreUnknown, result_hints); - } - } - break; - case Builtin::kPromiseConstructor: - if (arguments.size() >= 1) { - // "Call(executor, undefined, « resolvingFunctions.[[Resolve]], - // resolvingFunctions.[[Reject]] »)" - HintsVector new_arguments( - {Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone())}, - zone()); - for (auto constant : arguments[0].constants()) { - ProcessCalleeForCallOrConstruct( - constant, base::nullopt, new_arguments, - SpeculationMode::kDisallowSpeculation, - kMissingArgumentsAreUnknown, result_hints); - } - for (auto const& virtual_closure : arguments[0].virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - SpeculationMode::kDisallowSpeculation, - kMissingArgumentsAreUnknown, result_hints); - } - } - MakeRef(broker(), broker() - ->isolate() - ->factory() - ->promise_capability_default_reject_shared_fun()); - MakeRef(broker(), broker() - ->isolate() - ->factory() - ->promise_capability_default_resolve_shared_fun()); - - break; - case Builtin::kFunctionPrototypeCall: - if (arguments.size() >= 1) { - HintsVector new_arguments(arguments.begin() + 1, arguments.end(), - zone()); - for (auto constant : arguments[0].constants()) { - ProcessCalleeForCallOrConstruct(constant, base::nullopt, - new_arguments, speculation_mode, - padding, result_hints); - } - for (auto const& virtual_closure : arguments[0].virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - speculation_mode, padding, result_hints); - } - } - break; - case Builtin::kReflectApply: - if (arguments.size() >= 2) { - // Drop hints for all arguments except the user-given receiver. - Hints const new_receiver = - arguments.size() >= 3 - ? arguments[2] - : Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), - zone()); - HintsVector new_arguments({new_receiver}, zone()); - for (auto constant : arguments[1].constants()) { - ProcessCalleeForCallOrConstruct( - constant, base::nullopt, new_arguments, speculation_mode, - kMissingArgumentsAreUnknown, result_hints); - } - for (auto const& virtual_closure : arguments[1].virtual_closures()) { - ProcessCalleeForCallOrConstruct( - Callee(virtual_closure), base::nullopt, new_arguments, - speculation_mode, kMissingArgumentsAreUnknown, result_hints); - } - } - break; - - case Builtin::kReflectConstruct: - if (arguments.size() >= 2) { - for (auto constant : arguments[1].constants()) { - if (constant->IsJSFunction()) { - MakeRef(broker(), Handle<JSFunction>::cast(constant)).Serialize(); - } - } - } - break; - case Builtin::kObjectPrototypeIsPrototypeOf: - if (arguments.size() >= 2) { - ProcessHintsForHasInPrototypeChain(arguments[1]); - } - break; - case Builtin::kFunctionPrototypeHasInstance: - // For JSCallReducer::ReduceFunctionPrototypeHasInstance. - if (arguments.size() >= 2) { - ProcessHintsForOrdinaryHasInstance(arguments[0], arguments[1]); - } - break; - case Builtin::kFastFunctionPrototypeBind: - if (arguments.size() >= 1 && - speculation_mode != SpeculationMode::kDisallowSpeculation) { - Hints const& bound_target = arguments[0]; - ProcessHintsForFunctionBind(bound_target); - HintsVector new_arguments(arguments.begin() + 1, arguments.end(), - zone()); - result_hints->AddVirtualBoundFunction( - VirtualBoundFunction(bound_target, new_arguments), zone(), - broker()); - - broker() - ->target_native_context() - .bound_function_with_constructor_map() - .SerializePrototype(); - broker() - ->target_native_context() - .bound_function_without_constructor_map() - .SerializePrototype(); - } - break; - case Builtin::kObjectGetPrototypeOf: - case Builtin::kReflectGetPrototypeOf: - if (arguments.size() >= 2) { - ProcessHintsForObjectGetPrototype(arguments[1]); - } else { - Hints const undefined_hint = Hints::SingleConstant( - broker()->isolate()->factory()->undefined_value(), zone()); - ProcessHintsForObjectGetPrototype(undefined_hint); - } - break; - case Builtin::kObjectPrototypeGetProto: - if (arguments.size() >= 1) { - ProcessHintsForObjectGetPrototype(arguments[0]); - } - break; - case Builtin::kMapIteratorPrototypeNext: - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kOrderedHashTableHealIndex)); - MakeRef<FixedArray>( - broker(), broker()->isolate()->factory()->empty_ordered_hash_map()); - break; - case Builtin::kSetIteratorPrototypeNext: - MakeRef(broker(), broker()->isolate()->builtins()->code_handle( - Builtin::kOrderedHashTableHealIndex)); - MakeRef<FixedArray>( - broker(), broker()->isolate()->factory()->empty_ordered_hash_set()); - break; - default: - break; - } -} - -void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance( - Hints const& constructor_hints, Hints const& instance_hints) { - bool walk_prototypes = false; - for (Handle<Object> constructor : constructor_hints.constants()) { - // For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance. - if (constructor->IsHeapObject()) { - ProcessConstantForOrdinaryHasInstance( - MakeRef(broker(), Handle<HeapObject>::cast(constructor)), - &walk_prototypes); - } - } - // For JSNativeContextSpecialization::ReduceJSHasInPrototypeChain. - if (walk_prototypes) ProcessHintsForHasInPrototypeChain(instance_hints); -} - -void SerializerForBackgroundCompilation::ProcessHintsForHasInPrototypeChain( - Hints const& instance_hints) { - auto processMap = [&](Handle<Map> map_handle) { - MapRef map = MakeRef(broker(), map_handle); - while (map.IsJSObjectMap()) { - map.SerializePrototype(); - map = map.prototype().value().map(); - } - }; - - for (auto hint : instance_hints.constants()) { - if (!hint->IsHeapObject()) continue; - Handle<HeapObject> object(Handle<HeapObject>::cast(hint)); - processMap(handle(object->map(), broker()->isolate())); - } - for (auto map_hint : instance_hints.maps()) { - processMap(map_hint); - } -} - -void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve( - Hints const& resolution_hints) { - auto processMap = [&](Handle<Map> map) { - broker()->GetPropertyAccessInfo( - MakeRef(broker(), map), - MakeRef(broker(), broker()->isolate()->factory()->then_string()), - AccessMode::kLoad, dependencies(), - SerializationPolicy::kSerializeIfNeeded); - }; - - for (auto hint : resolution_hints.constants()) { - if (!hint->IsHeapObject()) continue; - Handle<HeapObject> resolution(Handle<HeapObject>::cast(hint)); - processMap(handle(resolution->map(), broker()->isolate())); - } - for (auto map_hint : resolution_hints.maps()) { - processMap(map_hint); - } -} - -void SerializerForBackgroundCompilation::ProcessMapHintsForPromises( - Hints const& receiver_hints) { - // We need to serialize the prototypes on each receiver map. - for (auto constant : receiver_hints.constants()) { - if (!constant->IsJSPromise()) continue; - Handle<Map> map(Handle<HeapObject>::cast(constant)->map(), - broker()->isolate()); - MakeRef(broker(), map).SerializePrototype(); - } - for (auto map : receiver_hints.maps()) { - if (!map->IsJSPromiseMap()) continue; - MakeRef(broker(), map).SerializePrototype(); - } -} - -PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest( - MapRef map) { - PropertyAccessInfo ai_exec = broker()->GetPropertyAccessInfo( - map, MakeRef(broker(), broker()->isolate()->factory()->exec_string()), - AccessMode::kLoad, dependencies(), - SerializationPolicy::kSerializeIfNeeded); - - Handle<JSObject> holder; - if (ai_exec.IsFastDataConstant() && ai_exec.holder().ToHandle(&holder)) { - // The property is on the prototype chain. - JSObjectRef holder_ref = MakeRef(broker(), holder); - holder_ref.GetOwnFastDataProperty(ai_exec.field_representation(), - ai_exec.field_index(), nullptr, - SerializationPolicy::kSerializeIfNeeded); - } - return ai_exec; -} - -void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( - Hints const& regexp_hints) { - for (auto hint : regexp_hints.constants()) { - if (!hint->IsJSRegExp()) continue; - Handle<JSObject> regexp(Handle<JSObject>::cast(hint)); - Handle<Map> regexp_map(regexp->map(), broker()->isolate()); - PropertyAccessInfo ai_exec = - ProcessMapForRegExpTest(MakeRef(broker(), regexp_map)); - Handle<JSObject> holder; - if (ai_exec.IsFastDataConstant() && !ai_exec.holder().ToHandle(&holder)) { - // The property is on the object itself. - JSObjectRef holder_ref = MakeRef(broker(), regexp); - holder_ref.GetOwnFastDataProperty( - ai_exec.field_representation(), ai_exec.field_index(), nullptr, - SerializationPolicy::kSerializeIfNeeded); - } - } - - for (auto map : regexp_hints.maps()) { - if (!map->IsJSRegExpMap()) continue; - ProcessMapForRegExpTest(MakeRef(broker(), map)); - } -} - -namespace { -void ProcessMapForFunctionBind(MapRef map) { - map.SerializePrototype(); - int min_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex, - JSFunction::kNameDescriptorIndex}) + - 1; - if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) { - map.SerializeOwnDescriptor( - InternalIndex(JSFunctionOrBoundFunction::kLengthDescriptorIndex)); - map.SerializeOwnDescriptor( - InternalIndex(JSFunctionOrBoundFunction::kNameDescriptorIndex)); - } -} -} // namespace - -void SerializerForBackgroundCompilation::ProcessHintsForFunctionBind( - Hints const& receiver_hints) { - for (auto constant : receiver_hints.constants()) { - if (constant->IsJSFunction()) { - JSFunctionRef function = - MakeRef(broker(), Handle<JSFunction>::cast(constant)); - function.Serialize(); - ProcessMapForFunctionBind(function.map()); - } else if (constant->IsJSBoundFunction()) { - JSBoundFunctionRef function = - MakeRef(broker(), Handle<JSBoundFunction>::cast(constant)); - function.Serialize(); - ProcessMapForFunctionBind(function.map()); - } - } - - for (auto map : receiver_hints.maps()) { - if (!map->IsJSFunctionMap() && !map->IsJSBoundFunctionMap()) continue; - ProcessMapForFunctionBind(MakeRef(broker(), map)); - } -} - -void SerializerForBackgroundCompilation::ProcessHintsForObjectGetPrototype( - Hints const& object_hints) { - for (auto constant : object_hints.constants()) { - if (!constant->IsHeapObject()) continue; - HeapObjectRef object = - MakeRef(broker(), Handle<HeapObject>::cast(constant)); - object.map().SerializePrototype(); - } - - for (auto map : object_hints.maps()) { - MakeRef(broker(), map).SerializePrototype(); - } -} - -void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment( - int target_offset) { - auto it = jump_target_environments_.find(target_offset); - if (it == jump_target_environments_.end()) { - jump_target_environments_[target_offset] = - zone()->New<Environment>(*environment()); - } else { - it->second->Merge(environment(), zone(), broker()); - } -} - -void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment( - int target_offset) { - auto it = jump_target_environments_.find(target_offset); - if (it != jump_target_environments_.end()) { - environment()->Merge(it->second, zone(), broker()); - jump_target_environments_.erase(it); - } -} - -void SerializerForBackgroundCompilation::ProcessJump( - interpreter::BytecodeArrayIterator* iterator) { - int jump_target = iterator->GetJumpTargetOffset(); - if (iterator->current_offset() < jump_target) { - ContributeToJumpTargetEnvironment(jump_target); - } -} - -void SerializerForBackgroundCompilation::VisitReturn( - BytecodeArrayIterator* iterator) { - return_value_hints().Add(environment()->accumulator_hints(), zone(), - broker()); - environment()->Kill(); -} - -void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback( - interpreter::BytecodeArrayIterator* iterator) { - interpreter::JumpTableTargetOffsets targets = - iterator->GetJumpTableTargetOffsets(); - for (interpreter::JumpTableTargetOffset target : targets) { - ContributeToJumpTargetEnvironment(target.target_offset); - } -} - -void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState( - interpreter::BytecodeArrayIterator* iterator) { - for (const auto& target : bytecode_analysis().resume_jump_targets()) { - ContributeToJumpTargetEnvironment(target.target_offset()); - } -} - -void SerializerForBackgroundCompilation::VisitConstruct( - BytecodeArrayIterator* iterator) { - Hints& new_target = environment()->accumulator_hints(); - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - size_t reg_count = iterator->GetRegisterCountOperand(2); - FeedbackSlot slot = iterator->GetSlotOperand(3); - - HintsVector args = PrepareArgumentsHints(first_reg, reg_count); - - ProcessCallOrConstruct(callee, new_target, &args, slot, - kMissingArgumentsAreUndefined); -} - -void SerializerForBackgroundCompilation::VisitConstructWithSpread( - BytecodeArrayIterator* iterator) { - Hints const& new_target = environment()->accumulator_hints(); - Hints const& callee = register_hints(iterator->GetRegisterOperand(0)); - interpreter::Register first_reg = iterator->GetRegisterOperand(1); - size_t reg_count = iterator->GetRegisterCountOperand(2); - FeedbackSlot slot = iterator->GetSlotOperand(3); - - DCHECK_GT(reg_count, 0); - reg_count--; // Pop the spread element. - HintsVector args = PrepareArgumentsHints(first_reg, reg_count); - - ProcessCallOrConstruct(callee, new_target, &args, slot, - kMissingArgumentsAreUnknown); -} - -void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot, - bool is_load) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForGlobalAccess(source); - - if (is_load) { - Hints result_hints; - if (feedback.kind() == ProcessedFeedback::kGlobalAccess) { - // We may be able to contribute to accumulator constant hints. - base::Optional<ObjectRef> value = - feedback.AsGlobalAccess().GetConstantHint(); - if (value.has_value()) { - result_hints.AddConstant(value->object(), zone(), broker()); - } - } else { - DCHECK(feedback.IsInsufficient()); - } - environment()->accumulator_hints() = result_hints; - } -} - -void SerializerForBackgroundCompilation::VisitLdaGlobal( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessGlobalAccess(slot, true); -} - -void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof( - BytecodeArrayIterator* iterator) { - VisitLdaGlobal(iterator); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupSlot( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupSlotInsideTypeof( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::ProcessCheckContextExtensions( - int depth) { - // for BytecodeGraphBuilder::CheckContextExtensions. - Hints const& context_hints = environment()->current_context_hints(); - for (int i = 0; i < depth; i++) { - ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i, - kSerializeSlot); - } -} - -void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot( - BytecodeArrayIterator* iterator) { - ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2)); - // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal. - VisitLdaGlobal(iterator); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot( - BytecodeArrayIterator* iterator) { - ProcessLdaLookupGlobalSlot(iterator); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof( - BytecodeArrayIterator* iterator) { - ProcessLdaLookupGlobalSlot(iterator); -} - -void SerializerForBackgroundCompilation::VisitStaGlobal( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessGlobalAccess(slot, false); -} - -void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot( - BytecodeArrayIterator* iterator) { - const int slot_index = iterator->GetIndexOperand(1); - const int depth = iterator->GetUnsignedImmediateOperand(2); - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); - ProcessCheckContextExtensions(depth); - environment()->accumulator_hints() = Hints(); - ProcessContextAccess(environment()->current_context_hints(), slot_index, - depth, kIgnoreSlot); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot( - BytecodeArrayIterator* iterator) { - ProcessLdaLookupContextSlot(iterator); -} - -void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof( - BytecodeArrayIterator* iterator) { - ProcessLdaLookupContextSlot(iterator); -} - -void SerializerForBackgroundCompilation::ProcessCompareOperation( - FeedbackSlot slot) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(function().feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForCompareOperation(source); - if (BailoutOnUninitialized(feedback)) return; - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::ProcessForIn(FeedbackSlot slot) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = broker()->ProcessFeedbackForForIn(source); - if (BailoutOnUninitialized(feedback)) return; - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation( - FeedbackSlot slot, bool honor_bailout_on_uninitialized) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - // Internally V8 uses binary op feedback also for unary ops. - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForBinaryOperation(source); - if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) { - return; - } - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess( - Hints* receiver, base::Optional<MapRef> receiver_map, - MapRef lookup_start_object_map, NameRef const& name, AccessMode access_mode, - base::Optional<JSObjectRef> concrete_receiver, Hints* result_hints) { - DCHECK_IMPLIES(concrete_receiver.has_value(), receiver_map.has_value()); - - { - Handle<Map> map; - if (!Map::TryUpdate(broker()->isolate(), lookup_start_object_map.object()) - .ToHandle(&map) || - map->is_abandoned_prototype_map()) { - return; - } - lookup_start_object_map = MakeRef(broker(), map); - } - CHECK(!lookup_start_object_map.is_deprecated()); - - // For JSNativeContextSpecialization::InferRootMap - lookup_start_object_map.SerializeRootMap(); - - // For JSNativeContextSpecialization::ReduceNamedAccess. - JSGlobalProxyRef global_proxy = - broker()->target_native_context().global_proxy_object(); - JSGlobalObjectRef global_object = - broker()->target_native_context().global_object(); - if (lookup_start_object_map.equals(global_proxy.map())) { - base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell( - name, SerializationPolicy::kSerializeIfNeeded); - if (cell.has_value()) { - CHECK(cell->Serialize()); - if (access_mode == AccessMode::kLoad) { - result_hints->AddConstant( - handle(cell->object()->value(), broker()->isolate()), zone(), - broker()); - } - } - } - - PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( - lookup_start_object_map, name, access_mode, dependencies(), - SerializationPolicy::kSerializeIfNeeded); - - // For JSNativeContextSpecialization::InlinePropertySetterCall - // and InlinePropertyGetterCall. - if ((access_info.IsFastAccessorConstant() || - access_info.IsDictionaryProtoAccessorConstant()) && - !access_info.constant().is_null()) { - if (access_info.constant()->IsJSFunction()) { - JSFunctionRef function = - MakeRef(broker(), Handle<JSFunction>::cast(access_info.constant())); - - if (receiver_map.has_value()) { - // For JSCallReducer and JSInlining(Heuristic). - HintsVector arguments( - {Hints::SingleMap(receiver_map->object(), zone())}, zone()); - // In the case of a setter any added result hints won't make sense, but - // they will be ignored anyways by Process*PropertyAccess due to the - // access mode not being kLoad. - ProcessCalleeForCallOrConstruct( - function.object(), base::nullopt, arguments, - SpeculationMode::kDisallowSpeculation, - kMissingArgumentsAreUndefined, result_hints); - - // For JSCallReducer::ReduceCallApiFunction. - Handle<SharedFunctionInfo> sfi = function.shared().object(); - if (sfi->IsApiFunction()) { - FunctionTemplateInfoRef fti_ref = - MakeRef(broker(), sfi->get_api_func_data()); - if (fti_ref.has_call_code()) { - fti_ref.SerializeCallCode(); - ProcessReceiverMapForApiCall(fti_ref, receiver_map->object()); - } - } - } - } else if (access_info.constant()->IsJSBoundFunction()) { - // For JSCallReducer::ReduceJSCall. - JSBoundFunctionRef function = MakeRef( - broker(), Handle<JSBoundFunction>::cast(access_info.constant())); - function.Serialize(); - } else { - FunctionTemplateInfoRef fti = MakeRef( - broker(), FunctionTemplateInfo::cast(*access_info.constant())); - if (fti.has_call_code()) fti.SerializeCallCode(); - } - } else if (access_info.IsModuleExport()) { - // For JSNativeContextSpecialization::BuildPropertyLoad - DCHECK(!access_info.constant().is_null()); - MakeRef(broker(), Handle<Cell>::cast(access_info.constant())); - } - - switch (access_mode) { - case AccessMode::kLoad: - // For PropertyAccessBuilder::TryBuildLoadConstantDataField and - // PropertyAccessBuilder::BuildLoadDictPrototypeConstant - if (access_info.IsFastDataConstant() || - access_info.IsDictionaryProtoDataConstant()) { - base::Optional<JSObjectRef> holder; - Handle<JSObject> prototype; - if (access_info.holder().ToHandle(&prototype)) { - holder = MakeRef(broker(), prototype); - } else { - CHECK_IMPLIES(concrete_receiver.has_value(), - concrete_receiver->map().equals(*receiver_map)); - holder = concrete_receiver; - } - - if (holder.has_value()) { - SerializationPolicy policy = SerializationPolicy::kSerializeIfNeeded; - base::Optional<ObjectRef> constant = - access_info.IsFastDataConstant() - ? holder->GetOwnFastDataProperty( - access_info.field_representation(), - access_info.field_index(), nullptr, policy) - : holder->GetOwnDictionaryProperty( - access_info.dictionary_index(), nullptr, policy); - if (constant.has_value()) { - result_hints->AddConstant(constant->object(), zone(), broker()); - } - } - } - break; - case AccessMode::kStore: - case AccessMode::kStoreInLiteral: - // For MapInference (StoreField case). - if (access_info.IsDataField() || access_info.IsFastDataConstant()) { - Handle<Map> transition_map; - if (access_info.transition_map().ToHandle(&transition_map)) { - MapRef map_ref = MakeRef(broker(), transition_map); - TRACE_BROKER(broker(), "Propagating transition map " - << map_ref << " to receiver hints."); - receiver->AddMap(transition_map, zone(), broker_, false); - } - } - break; - case AccessMode::kHas: - break; - } -} - -void SerializerForBackgroundCompilation::ProcessMinimorphicPropertyAccess( - MinimorphicLoadPropertyAccessFeedback const& feedback, - FeedbackSource const& source) { - broker()->GetPropertyAccessInfo(feedback, source, - SerializationPolicy::kSerializeIfNeeded); -} - -void SerializerForBackgroundCompilation::VisitLdaKeyedProperty( - BytecodeArrayIterator* iterator) { - Hints const& key = environment()->accumulator_hints(); - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kLoad, true); -} - -void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess( - Hints* receiver, Hints const& key, FeedbackSlot slot, - AccessMode access_mode, bool honor_bailout_on_uninitialized) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForPropertyAccess(source, access_mode, - base::nullopt); - if (honor_bailout_on_uninitialized && BailoutOnUninitialized(feedback)) { - return; - } - - Hints new_accumulator_hints; - switch (feedback.kind()) { - case ProcessedFeedback::kElementAccess: - ProcessElementAccess(*receiver, key, feedback.AsElementAccess(), - access_mode); - break; - case ProcessedFeedback::kNamedAccess: - ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode, - &new_accumulator_hints); - break; - case ProcessedFeedback::kInsufficient: - break; - default: - UNREACHABLE(); - } - - if (access_mode == AccessMode::kLoad) { - environment()->accumulator_hints() = new_accumulator_hints; - } -} - -void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( - Hints* receiver, NameRef const& name, FeedbackSlot slot, - AccessMode access_mode) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name); - if (BailoutOnUninitialized(feedback)) return; - - Hints new_accumulator_hints; - switch (feedback.kind()) { - case ProcessedFeedback::kNamedAccess: - DCHECK(name.equals(feedback.AsNamedAccess().name())); - ProcessNamedAccess(receiver, feedback.AsNamedAccess(), access_mode, - &new_accumulator_hints); - break; - case ProcessedFeedback::kMinimorphicPropertyAccess: - DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name())); - ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(), - source); - break; - case ProcessedFeedback::kInsufficient: - break; - default: - UNREACHABLE(); - } - - if (access_mode == AccessMode::kLoad) { - environment()->accumulator_hints() = new_accumulator_hints; - } -} - -void SerializerForBackgroundCompilation::ProcessNamedSuperPropertyAccess( - Hints* receiver, NameRef const& name, FeedbackSlot slot, - AccessMode access_mode) { - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name); - if (BailoutOnUninitialized(feedback)) return; - - Hints new_accumulator_hints; - switch (feedback.kind()) { - case ProcessedFeedback::kNamedAccess: - DCHECK(name.equals(feedback.AsNamedAccess().name())); - ProcessNamedSuperAccess(receiver, feedback.AsNamedAccess(), access_mode, - &new_accumulator_hints); - break; - case ProcessedFeedback::kMinimorphicPropertyAccess: - DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name())); - ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(), - source); - break; - case ProcessedFeedback::kInsufficient: - break; - default: - UNREACHABLE(); - } - - if (access_mode == AccessMode::kLoad) { - environment()->accumulator_hints() = new_accumulator_hints; - } -} - -void SerializerForBackgroundCompilation::ProcessNamedAccess( - Hints* receiver, NamedAccessFeedback const& feedback, - AccessMode access_mode, Hints* result_hints) { - for (Handle<Map> map : feedback.maps()) { - MapRef map_ref = MakeRef(broker(), map); - TRACE_BROKER(broker(), "Propagating feedback map " - << map_ref << " to receiver hints."); - receiver->AddMap(map, zone(), broker_, false); - } - - for (Handle<Map> map : receiver->maps()) { - MapRef map_ref = MakeRef(broker(), map); - ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref, - feedback.name(), access_mode, - base::nullopt, result_hints); - } - - for (Handle<Object> hint : receiver->constants()) { - ObjectRef object = MakeRef(broker(), hint); - if (access_mode == AccessMode::kLoad && object.IsJSObject()) { - MapRef map_ref = object.AsJSObject().map(); - ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref, - feedback.name(), access_mode, - object.AsJSObject(), result_hints); - } - // For JSNativeContextSpecialization::ReduceJSLoadNamed. - if (access_mode == AccessMode::kLoad && object.IsJSFunction() && - feedback.name().equals(MakeRef( - broker(), broker()->isolate()->factory()->prototype_string()))) { - JSFunctionRef function = object.AsJSFunction(); - function.Serialize(); - if (result_hints != nullptr && function.has_prototype()) { - result_hints->AddConstant(function.prototype().object(), zone(), - broker()); - } - } - // TODO(neis): Also record accumulator hint for string.length and maybe - // more? - } -} - -void SerializerForBackgroundCompilation::ProcessNamedSuperAccess( - Hints* receiver, NamedAccessFeedback const& feedback, - AccessMode access_mode, Hints* result_hints) { - MapsSet receiver_maps = receiver->maps(); - for (Handle<Map> receiver_map : receiver_maps) { - MapRef receiver_map_ref = MakeRef(broker(), receiver_map); - for (Handle<Map> feedback_map : feedback.maps()) { - MapRef feedback_map_ref = MakeRef(broker(), feedback_map); - ProcessMapForNamedPropertyAccess( - receiver, receiver_map_ref, feedback_map_ref, feedback.name(), - access_mode, base::nullopt, result_hints); - } - } - if (receiver_maps.IsEmpty()) { - for (Handle<Map> feedback_map : feedback.maps()) { - MapRef feedback_map_ref = MakeRef(broker(), feedback_map); - ProcessMapForNamedPropertyAccess( - receiver, base::nullopt, feedback_map_ref, feedback.name(), - access_mode, base::nullopt, result_hints); - } - } -} - -void SerializerForBackgroundCompilation::ProcessElementAccess( - Hints const& receiver, Hints const& key, - ElementAccessFeedback const& feedback, AccessMode access_mode) { - for (auto const& group : feedback.transition_groups()) { - for (Handle<Map> map_handle : group) { - MapRef map = MakeRef(broker(), map_handle); - switch (access_mode) { - case AccessMode::kHas: - case AccessMode::kLoad: - map.SerializePrototype(); - break; - case AccessMode::kStore: - map.SerializeForElementStore(); - break; - case AccessMode::kStoreInLiteral: - // This operation is fairly local and simple, nothing to serialize. - break; - } - } - } - - for (Handle<Object> hint : receiver.constants()) { - ObjectRef receiver_ref = MakeRef(broker(), hint); - - // For JSNativeContextSpecialization::InferRootMap - if (receiver_ref.IsHeapObject()) { - receiver_ref.AsHeapObject().map().SerializeRootMap(); - } - - // For JSNativeContextSpecialization::ReduceElementAccess. - if (receiver_ref.IsJSTypedArray()) { - receiver_ref.AsJSTypedArray().Serialize(); - } - - // For JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant. - if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) { - for (Handle<Object> hint : key.constants()) { - ObjectRef key_ref = MakeRef(broker(), hint); - // TODO(neis): Do this for integer-HeapNumbers too? - if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) { - base::Optional<ObjectRef> element; - if (receiver_ref.IsJSObject()) { - JSObjectRef jsobject_ref = receiver_ref.AsJSObject(); - jsobject_ref.SerializeElements(); - element = receiver_ref.AsJSObject().GetOwnConstantElement( - jsobject_ref.elements(kRelaxedLoad).value(), key_ref.AsSmi(), - nullptr, SerializationPolicy::kSerializeIfNeeded); - if (!element.has_value() && receiver_ref.IsJSArray()) { - // We didn't find a constant element, but if the receiver is a - // cow-array we can exploit the fact that any future write to the - // element will replace the whole elements storage. - JSArrayRef array_ref = receiver_ref.AsJSArray(); - array_ref.GetOwnCowElement( - array_ref.elements(kRelaxedLoad).value(), key_ref.AsSmi(), - SerializationPolicy::kSerializeIfNeeded); - } - } else if (receiver_ref.IsString()) { - element = receiver_ref.AsString().GetCharAsStringOrUndefined( - key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded); - } - } - } - } - } - - // For JSNativeContextSpecialization::InferRootMap - for (Handle<Map> map : receiver.maps()) { - MapRef map_ref = MakeRef(broker(), map); - map_ref.SerializeRootMap(); - } -} - -void SerializerForBackgroundCompilation::VisitLdaNamedProperty( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - NameRef name = - MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand( - 1, broker()->isolate()))); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad); -} - -void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - NameRef name = - MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand( - 1, broker()->isolate()))); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessNamedSuperPropertyAccess(receiver, name, slot, AccessMode::kLoad); -} - -void SerializerForBackgroundCompilation::VisitStaNamedProperty( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - NameRef name = - MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand( - 1, broker()->isolate()))); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStore); -} - -void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - NameRef name = - MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand( - 1, broker()->isolate()))); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStoreInLiteral); -} - -void SerializerForBackgroundCompilation::VisitTestIn( - BytecodeArrayIterator* iterator) { - Hints* receiver = &environment()->accumulator_hints(); - Hints const& key = register_hints(iterator->GetRegisterOperand(0)); - FeedbackSlot slot = iterator->GetSlotOperand(1); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kHas, false); -} - -// For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance. -void SerializerForBackgroundCompilation::ProcessConstantForOrdinaryHasInstance( - HeapObjectRef const& constructor, bool* walk_prototypes) { - if (constructor.IsJSBoundFunction()) { - constructor.AsJSBoundFunction().Serialize(); - ProcessConstantForInstanceOf( - constructor.AsJSBoundFunction().bound_target_function().value(), - walk_prototypes); - } else if (constructor.IsJSFunction()) { - constructor.AsJSFunction().Serialize(); - *walk_prototypes = - *walk_prototypes || - (constructor.map().has_prototype_slot() && - constructor.AsJSFunction().has_prototype() && - !constructor.AsJSFunction().PrototypeRequiresRuntimeLookup()); - } -} - -void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf( - ObjectRef const& constructor, bool* walk_prototypes) { - if (!constructor.IsHeapObject()) return; - HeapObjectRef constructor_heap_object = constructor.AsHeapObject(); - - PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( - constructor_heap_object.map(), - MakeRef(broker(), broker()->isolate()->factory()->has_instance_symbol()), - AccessMode::kLoad, dependencies(), - SerializationPolicy::kSerializeIfNeeded); - - if (access_info.IsNotFound()) { - ProcessConstantForOrdinaryHasInstance(constructor_heap_object, - walk_prototypes); - } else if (access_info.IsFastDataConstant()) { - Handle<JSObject> holder; - bool found_on_proto = access_info.holder().ToHandle(&holder); - JSObjectRef holder_ref = - found_on_proto ? MakeRef(broker(), holder) : constructor.AsJSObject(); - base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty( - access_info.field_representation(), access_info.field_index(), nullptr, - SerializationPolicy::kSerializeIfNeeded); - CHECK(constant.has_value()); - if (constant->IsJSFunction()) { - JSFunctionRef function = constant->AsJSFunction(); - function.Serialize(); - if (function.shared().HasBuiltinId() && - function.shared().builtin_id() == - Builtin::kFunctionPrototypeHasInstance) { - // For JSCallReducer::ReduceFunctionPrototypeHasInstance. - ProcessConstantForOrdinaryHasInstance(constructor_heap_object, - walk_prototypes); - } - } - } -} - -void SerializerForBackgroundCompilation::VisitTestInstanceOf( - BytecodeArrayIterator* iterator) { - Hints const& lhs = register_hints(iterator->GetRegisterOperand(0)); - Hints rhs = environment()->accumulator_hints(); - FeedbackSlot slot = iterator->GetSlotOperand(1); - - if (slot.IsInvalid() || feedback_vector().is_null()) return; - FeedbackSource source(feedback_vector(), slot); - ProcessedFeedback const& feedback = - broker()->ProcessFeedbackForInstanceOf(source); - - // Incorporate feedback (about rhs) into hints copy to simplify processing. - // TODO(neis): Propagate into original hints? - if (!feedback.IsInsufficient()) { - InstanceOfFeedback const& rhs_feedback = feedback.AsInstanceOf(); - if (rhs_feedback.value().has_value()) { - rhs = rhs.Copy(zone()); - Handle<JSObject> constructor = rhs_feedback.value()->object(); - rhs.AddConstant(constructor, zone(), broker()); - } - } - - bool walk_prototypes = false; - for (Handle<Object> constant : rhs.constants()) { - ProcessConstantForInstanceOf(MakeRef(broker(), constant), &walk_prototypes); - } - if (walk_prototypes) ProcessHintsForHasInPrototypeChain(lhs); - - environment()->accumulator_hints() = Hints(); -} - -void SerializerForBackgroundCompilation::VisitToNumeric( - BytecodeArrayIterator* iterator) { - FeedbackSlot slot = iterator->GetSlotOperand(0); - ProcessUnaryOrBinaryOperation(slot, false); -} - -void SerializerForBackgroundCompilation::VisitToNumber( - BytecodeArrayIterator* iterator) { - FeedbackSlot slot = iterator->GetSlotOperand(0); - ProcessUnaryOrBinaryOperation(slot, false); -} - -void SerializerForBackgroundCompilation::VisitThrowReferenceErrorIfHole( - BytecodeArrayIterator* iterator) { - MakeRef(broker(), - iterator->GetConstantForIndexOperand(0, broker()->isolate())); -} - -void SerializerForBackgroundCompilation::VisitStaKeyedProperty( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - Hints const& key = register_hints(iterator->GetRegisterOperand(1)); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStore, true); -} - -void SerializerForBackgroundCompilation::VisitStaInArrayLiteral( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - Hints const& key = register_hints(iterator->GetRegisterOperand(1)); - FeedbackSlot slot = iterator->GetSlotOperand(2); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral, - true); -} - -void SerializerForBackgroundCompilation::VisitStaDataPropertyInLiteral( - BytecodeArrayIterator* iterator) { - Hints* receiver = ®ister_hints(iterator->GetRegisterOperand(0)); - Hints const& key = register_hints(iterator->GetRegisterOperand(1)); - FeedbackSlot slot = iterator->GetSlotOperand(3); - ProcessKeyedPropertyAccess(receiver, key, slot, AccessMode::kStoreInLiteral, - false); -} - -#define DEFINE_CLEAR_ACCUMULATOR(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - environment()->accumulator_hints() = Hints(); \ - } -CLEAR_ACCUMULATOR_LIST(DEFINE_CLEAR_ACCUMULATOR) -#undef DEFINE_CLEAR_ACCUMULATOR - -#define DEFINE_CONDITIONAL_JUMP(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - ProcessJump(iterator); \ - } -CONDITIONAL_JUMPS_LIST(DEFINE_CONDITIONAL_JUMP) -#undef DEFINE_CONDITIONAL_JUMP - -#define DEFINE_UNCONDITIONAL_JUMP(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - ProcessJump(iterator); \ - environment()->Kill(); \ - } -UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP) -#undef DEFINE_UNCONDITIONAL_JUMP - -#define DEFINE_IGNORE(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) {} -IGNORED_BYTECODE_LIST(DEFINE_IGNORE) -#undef DEFINE_IGNORE - -#define DEFINE_UNREACHABLE(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - UNREACHABLE(); \ - } -UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE) -#undef DEFINE_UNREACHABLE - -#define DEFINE_KILL(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - environment()->Kill(); \ - } -KILL_ENVIRONMENT_LIST(DEFINE_KILL) -#undef DEFINE_KILL - -#define DEFINE_BINARY_OP(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - FeedbackSlot slot = iterator->GetSlotOperand(1); \ - ProcessUnaryOrBinaryOperation(slot, true); \ - } -BINARY_OP_LIST(DEFINE_BINARY_OP) -#undef DEFINE_BINARY_OP - -#define DEFINE_COMPARE_OP(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - FeedbackSlot slot = iterator->GetSlotOperand(1); \ - ProcessCompareOperation(slot); \ - } -COMPARE_OP_LIST(DEFINE_COMPARE_OP) -#undef DEFINE_COMPARE_OP - -#define DEFINE_UNARY_OP(name, ...) \ - void SerializerForBackgroundCompilation::Visit##name( \ - BytecodeArrayIterator* iterator) { \ - FeedbackSlot slot = iterator->GetSlotOperand(0); \ - ProcessUnaryOrBinaryOperation(slot, true); \ - } -UNARY_OP_LIST(DEFINE_UNARY_OP) -#undef DEFINE_UNARY_OP - -#undef BINARY_OP_LIST -#undef CLEAR_ACCUMULATOR_LIST -#undef COMPARE_OP_LIST -#undef CONDITIONAL_JUMPS_LIST -#undef IGNORED_BYTECODE_LIST -#undef KILL_ENVIRONMENT_LIST -#undef SUPPORTED_BYTECODE_LIST -#undef UNARY_OP_LIST -#undef UNCONDITIONAL_JUMPS_LIST -#undef UNREACHABLE_BYTECODE_LIST - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h deleted file mode 100644 index f01e73452e..0000000000 --- a/deps/v8/src/compiler/serializer-for-background-compilation.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ -#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ - -#include "src/handles/handles.h" - -namespace v8 { -namespace internal { - -class BytecodeOffset; -class Zone; - -namespace compiler { - -class CompilationDependencies; -class JSHeapBroker; -class ZoneStats; - -enum class SerializerForBackgroundCompilationFlag : uint8_t { - kBailoutOnUninitialized = 1 << 0, - kCollectSourcePositions = 1 << 1, - kAnalyzeEnvironmentLiveness = 1 << 2, - kEnableTurboInlining = 1 << 3, -}; -using SerializerForBackgroundCompilationFlags = - base::Flags<SerializerForBackgroundCompilationFlag>; - -void RunSerializerForBackgroundCompilation( - ZoneStats* zone_stats, JSHeapBroker* broker, - CompilationDependencies* dependencies, Handle<JSFunction> closure, - SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset); - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ diff --git a/deps/v8/src/compiler/serializer-hints.h b/deps/v8/src/compiler/serializer-hints.h deleted file mode 100644 index 4cb1309832..0000000000 --- a/deps/v8/src/compiler/serializer-hints.h +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2019 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// This file defines the hints classed gathered temporarily by the -// SerializerForBackgroundCompilation while it's analysing the bytecode -// and copying the necessary data to the JSHeapBroker for further usage -// by the reducers that run on the background thread. - -#ifndef V8_COMPILER_SERIALIZER_HINTS_H_ -#define V8_COMPILER_SERIALIZER_HINTS_H_ - -#include "src/compiler/functional-list.h" -#include "src/handles/handles.h" -#include "src/zone/zone-containers.h" - -namespace v8 { -namespace internal { - -class Context; -class Object; -class Map; - -namespace compiler { - -template <typename T, typename EqualTo> -class FunctionalSet { - public: - void Add(T const& elem, Zone* zone) { - for (auto const& l : data_) { - if (equal_to(l, elem)) return; - } - data_.PushFront(elem, zone); - } - - void Union(FunctionalSet<T, EqualTo> other, Zone* zone) { - if (!data_.TriviallyEquals(other.data_)) { - // Choose the larger side as tail. - if (data_.Size() < other.data_.Size()) std::swap(data_, other.data_); - for (auto const& elem : other.data_) Add(elem, zone); - } - } - - bool IsEmpty() const { return data_.begin() == data_.end(); } - - // Warning: quadratic time complexity. - bool Includes(FunctionalSet<T, EqualTo> const& other) const { - return std::all_of(other.begin(), other.end(), [&](T const& other_elem) { - return std::any_of(this->begin(), this->end(), [&](T const& this_elem) { - return equal_to(this_elem, other_elem); - }); - }); - } - bool operator==(const FunctionalSet<T, EqualTo>& other) const { - return this->data_.TriviallyEquals(other.data_) || - (this->data_.Size() == other.data_.Size() && this->Includes(other) && - other.Includes(*this)); - } - bool operator!=(const FunctionalSet<T, EqualTo>& other) const { - return !(*this == other); - } - - size_t Size() const { return data_.Size(); } - - using iterator = typename FunctionalList<T>::iterator; - - iterator begin() const { return data_.begin(); } - iterator end() const { return data_.end(); } - - private: - static EqualTo equal_to; - FunctionalList<T> data_; -}; - -template <typename T, typename EqualTo> -EqualTo FunctionalSet<T, EqualTo>::equal_to; - -struct VirtualContext { - unsigned int distance; - Handle<Context> context; - - VirtualContext(unsigned int distance_in, Handle<Context> context_in) - : distance(distance_in), context(context_in) { - CHECK_GT(distance, 0); - } - bool operator==(const VirtualContext& other) const { - return context.equals(other.context) && distance == other.distance; - } -}; - -class VirtualClosure; -struct VirtualBoundFunction; - -using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>; -using VirtualContextsSet = - FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>; -using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>; -using VirtualClosuresSet = - FunctionalSet<VirtualClosure, std::equal_to<VirtualClosure>>; -using VirtualBoundFunctionsSet = - FunctionalSet<VirtualBoundFunction, std::equal_to<VirtualBoundFunction>>; - -struct HintsImpl; -class JSHeapBroker; - -class Hints { - public: - Hints() = default; // Empty. - static Hints SingleConstant(Handle<Object> constant, Zone* zone); - static Hints SingleMap(Handle<Map> map, Zone* zone); - - // For inspection only. - ConstantsSet constants() const; - MapsSet maps() const; - VirtualClosuresSet virtual_closures() const; - VirtualContextsSet virtual_contexts() const; - VirtualBoundFunctionsSet virtual_bound_functions() const; - - bool IsEmpty() const; - bool operator==(Hints const& other) const; - bool operator!=(Hints const& other) const; - -#ifdef ENABLE_SLOW_DCHECKS - bool Includes(Hints const& other) const; -#endif - - Hints Copy(Zone* zone) const; // Shallow. - Hints CopyToParentZone(Zone* zone, JSHeapBroker* broker) const; // Deep. - - // As an optimization, empty hints can be represented as {impl_} being - // {nullptr}, i.e., as not having allocated a {HintsImpl} object. As a - // consequence, some operations need to force allocation prior to doing their - // job. In particular, backpropagation from a child serialization - // can only work if the hints were already allocated in the parent zone. - bool IsAllocated() const { return impl_ != nullptr; } - void EnsureShareable(Zone* zone) { EnsureAllocated(zone, false); } - - // Make {this} an alias of {other}. - void Reset(Hints* other, Zone* zone); - - void Merge(Hints const& other, Zone* zone, JSHeapBroker* broker); - - // Destructive updates: if the hints are shared by several registers, - // then the following updates will be seen by all of them: - void AddConstant(Handle<Object> constant, Zone* zone, JSHeapBroker* broker); - void AddMap(Handle<Map> map, Zone* zone, JSHeapBroker* broker, - bool check_zone_equality = true); - void AddVirtualClosure(VirtualClosure const& virtual_closure, Zone* zone, - JSHeapBroker* broker); - void AddVirtualContext(VirtualContext const& virtual_context, Zone* zone, - JSHeapBroker* broker); - void AddVirtualBoundFunction(VirtualBoundFunction const& bound_function, - Zone* zone, JSHeapBroker* broker); - void Add(Hints const& other, Zone* zone, JSHeapBroker* broker); - - private: - friend std::ostream& operator<<(std::ostream&, const Hints& hints); - HintsImpl* impl_ = nullptr; - - void EnsureAllocated(Zone* zone, bool check_zone_equality = true); - - // Helper for Add and Merge. - bool Union(Hints const& other); - - static const size_t kMaxHintsSize = 50; - static_assert(kMaxHintsSize >= 1, "must allow for at least one hint"); -}; - -using HintsVector = ZoneVector<Hints>; - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_COMPILER_SERIALIZER_HINTS_H_ diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 381bf2a75a..529f1cc7bb 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -1510,10 +1510,6 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) { return Type::NonInternal(); } JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction(); - if (!function.serialized()) { - TRACE_BROKER_MISSING(t->broker(), "data for function " << function); - return Type::NonInternal(); - } if (!function.shared().HasBuiltinId()) { return Type::NonInternal(); } diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc index 39f54763ba..a1f9b93dce 100644 --- a/deps/v8/src/compiler/types.cc +++ b/deps/v8/src/compiler/types.cc @@ -258,7 +258,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_PROMISE_TYPE: #if V8_ENABLE_WEBASSEMBLY case WASM_ARRAY_TYPE: - case WASM_EXCEPTION_OBJECT_TYPE: + case WASM_TAG_OBJECT_TYPE: case WASM_GLOBAL_OBJECT_TYPE: case WASM_INSTANCE_OBJECT_TYPE: case WASM_MEMORY_OBJECT_TYPE: diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index 5010a221a9..f91c21fd1d 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -244,6 +244,21 @@ class WasmGraphAssembler : public GraphAssembler { // Rule of thumb: if access to a given field in an object is required in // at least two places, put a helper function here. + Node* Allocate(int size) { + AllowLargeObjects allow_large = size < kMaxRegularHeapObjectSize + ? AllowLargeObjects::kFalse + : AllowLargeObjects::kTrue; + return Allocate(Int32Constant(size), allow_large); + } + + Node* Allocate(Node* size, + AllowLargeObjects allow_large = AllowLargeObjects::kTrue) { + return AddNode( + graph()->NewNode(simplified_.AllocateRaw( + Type::Any(), AllocationType::kYoung, allow_large), + size, effect(), control())); + } + Node* LoadFromObject(MachineType type, Node* base, Node* offset) { return AddNode(graph()->NewNode( simplified_.LoadFromObject(ObjectAccess(type, kNoWriteBarrier)), base, @@ -2349,12 +2364,11 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) { return diamond_result; } -Node* WasmGraphBuilder::Throw(uint32_t exception_index, - const wasm::WasmException* exception, +Node* WasmGraphBuilder::Throw(uint32_t tag_index, const wasm::WasmTag* tag, const base::Vector<Node*> values, wasm::WasmCodePosition position) { needs_stack_check_ = true; - uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception); + uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(tag); Node* values_array = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAllocateFixedArray, @@ -2362,7 +2376,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index, SetSourcePosition(values_array, position); uint32_t index = 0; - const wasm::WasmExceptionSig* sig = exception->sig; + const wasm::WasmTagSig* sig = tag->sig; MachineOperatorBuilder* m = mcgraph()->machine(); for (size_t i = 0; i < sig->parameter_count(); ++i) { Node* value = values[i]; @@ -2414,7 +2428,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index, } DCHECK_EQ(encoded_size, index); - Node* exception_tag = LoadExceptionTagFromTable(exception_index); + Node* exception_tag = LoadTagFromTable(tag_index); Node* throw_call = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmThrow, exception_tag, values_array); @@ -2471,11 +2485,10 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag, return gasm_->WordEqual(caught_tag, expected_tag); } -Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) { - Node* exceptions_table = - LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer()); - Node* tag = - gasm_->LoadFixedArrayElementPtr(exceptions_table, exception_index); +Node* WasmGraphBuilder::LoadTagFromTable(uint32_t tag_index) { + Node* tags_table = + LOAD_INSTANCE_FIELD(TagsTable, MachineType::TaggedPointer()); + Node* tag = gasm_->LoadFixedArrayElementPtr(tags_table, tag_index); return tag; } @@ -2487,14 +2500,14 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) { } Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj, - const wasm::WasmException* exception, + const wasm::WasmTag* tag, base::Vector<Node*> values) { Node* values_array = gasm_->CallBuiltin( Builtin::kWasmGetOwnProperty, Operator::kEliminatable, except_obj, LOAD_ROOT(wasm_exception_values_symbol, wasm_exception_values_symbol), LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer())); uint32_t index = 0; - const wasm::WasmExceptionSig* sig = exception->sig; + const wasm::WasmTagSig* sig = tag->sig; DCHECK_EQ(sig->parameter_count(), values.size()); for (size_t i = 0; i < sig->parameter_count(); ++i) { Node* value; @@ -2544,7 +2557,7 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj, } values[i] = value; } - DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception)); + DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(tag)); return values_array; } @@ -5560,8 +5573,13 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index, const wasm::StructType* type, Node* rtt, base::Vector<Node*> fields) { - Node* s = gasm_->CallBuiltin(Builtin::kWasmAllocateStructWithRtt, - Operator::kEliminatable, rtt); + int size = WasmStruct::Size(type); + Node* s = gasm_->Allocate(size); + gasm_->StoreMap(s, TNode<Map>::UncheckedCast(rtt)); + gasm_->StoreToObject( + ObjectAccess(MachineType::TaggedPointer(), kNoWriteBarrier), s, + wasm::ObjectAccess::ToTagged(JSReceiver::kPropertiesOrHashOffset), + LOAD_ROOT(EmptyFixedArray, empty_fixed_array)); for (uint32_t i = 0; i < type->field_count(); i++) { gasm_->StoreStructField(s, type, i, fields[i]); } @@ -5600,6 +5618,9 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index, gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length, Int32Constant(element_type.element_size_bytes())); if (initial_value != nullptr) { + // TODO(manoskouk): If the loop is ever removed here, we have to update + // ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current + // loop as non-innermost. auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32); auto done = gasm_->MakeLabel(); Node* start_offset = @@ -7897,6 +7918,8 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation( std::vector<WasmLoopInfo> loop_infos; + wasm::WasmFeatures unused_detected_features; + if (!detected) detected = &unused_detected_features; if (!BuildGraphForWasmFunction(env, func_body, func_index, detected, mcgraph, &loop_infos, node_origins, source_positions)) { return wasm::WasmCompilationResult{}; @@ -7921,8 +7944,13 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation( func_body, env->module, func_index, &loop_infos); if (counters) { - counters->wasm_compile_function_peak_memory_bytes()->AddSample( - static_cast<int>(mcgraph->graph()->zone()->allocation_size())); + int zone_bytes = + static_cast<int>(mcgraph->graph()->zone()->allocation_size()); + counters->wasm_compile_function_peak_memory_bytes()->AddSample(zone_bytes); + if (func_body.end - func_body.start >= 100 * KB) { + counters->wasm_compile_huge_function_peak_memory_bytes()->AddSample( + zone_bytes); + } } auto result = info.ReleaseWasmCompilationResult(); CHECK_NOT_NULL(result); // Compilation expected to succeed. @@ -8173,7 +8201,7 @@ AssemblerOptions WasmAssemblerOptions() { AssemblerOptions options; // Relocation info required to serialize {WasmCode} for proper functions. options.record_reloc_info_for_serialization = true; - options.enable_root_array_delta_access = false; + options.enable_root_relative_access = false; return options; } @@ -8181,7 +8209,7 @@ AssemblerOptions WasmStubAssemblerOptions() { AssemblerOptions options; // Relocation info not necessary because stubs are not serialized. options.record_reloc_info_for_serialization = false; - options.enable_root_array_delta_access = false; + options.enable_root_relative_access = false; return options; } diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index e1993fbf42..71e3111c8c 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -266,15 +266,14 @@ class WasmGraphBuilder { Node* Unop(wasm::WasmOpcode opcode, Node* input, wasm::WasmCodePosition position = wasm::kNoCodePosition); Node* MemoryGrow(Node* input); - Node* Throw(uint32_t exception_index, const wasm::WasmException* exception, + Node* Throw(uint32_t tag_index, const wasm::WasmTag* tag, const base::Vector<Node*> values, wasm::WasmCodePosition position); Node* Rethrow(Node* except_obj); Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag); - Node* LoadExceptionTagFromTable(uint32_t exception_index); + Node* LoadTagFromTable(uint32_t tag_index); Node* GetExceptionTag(Node* except_obj); - Node* GetExceptionValues(Node* except_obj, - const wasm::WasmException* exception, + Node* GetExceptionValues(Node* except_obj, const wasm::WasmTag* tag, base::Vector<Node*> values_out); bool IsPhiWithMerge(Node* phi, Node* merge); bool ThrowsException(Node* node, Node** if_success, Node** if_exception); |