// Copyright 2015 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_COMPILER_CODE_ASSEMBLER_H_ #define V8_COMPILER_CODE_ASSEMBLER_H_ #include #include #include #include // Clients of this interface shouldn't depend on lots of compiler internals. // Do not include anything from src/compiler here! #include "include/cppgc/source-location.h" #include "src/base/macros.h" #include "src/base/optional.h" #include "src/base/type-traits.h" #include "src/builtins/builtins.h" #include "src/codegen/code-factory.h" #include "src/codegen/machine-type.h" #include "src/codegen/source-position.h" #include "src/codegen/tnode.h" #include "src/heap/heap.h" #include "src/objects/arguments.h" #include "src/objects/data-handler.h" #include "src/objects/heap-number.h" #include "src/objects/js-array-buffer.h" #include "src/objects/js-collection.h" #include "src/objects/js-proxy.h" #include "src/objects/map.h" #include "src/objects/maybe-object.h" #include "src/objects/object-type.h" #include "src/objects/objects.h" #include "src/objects/oddball.h" #include "src/objects/smi.h" #include "src/objects/tagged-index.h" #include "src/runtime/runtime.h" #include "src/utils/allocation.h" #include "src/zone/zone-containers.h" namespace v8 { namespace internal { // Forward declarations. class AsmWasmData; class AsyncGeneratorRequest; struct AssemblerOptions; class BigInt; class CallInterfaceDescriptor; class Callable; class Factory; class InterpreterData; class Isolate; class JSAsyncFunctionObject; class JSAsyncGeneratorObject; class JSCollator; class JSCollection; class JSDateTimeFormat; class JSDisplayNames; class JSListFormat; class JSLocale; class JSNumberFormat; class JSPluralRules; class JSRegExpStringIterator; class JSRelativeTimeFormat; class JSSegmentIterator; class JSSegmenter; class JSSegments; class JSV8BreakIterator; class JSWeakCollection; class JSFinalizationRegistry; class JSWeakMap; class JSWeakRef; class JSWeakSet; class ProfileDataFromFile; class PromiseCapability; class PromiseFulfillReactionJobTask; class PromiseReaction; class PromiseReactionJobTask; class PromiseRejectReactionJobTask; class Zone; #define MAKE_FORWARD_DECLARATION(Name) class Name; TORQUE_DEFINED_CLASS_LIST(MAKE_FORWARD_DECLARATION) #undef MAKE_FORWARD_DECLARATION template class Signature; enum class CheckBounds { kAlways, kDebugOnly }; inline bool NeedsBoundsCheck(CheckBounds check_bounds) { switch (check_bounds) { case CheckBounds::kAlways: return true; case CheckBounds::kDebugOnly: return DEBUG_BOOL; } } enum class StoreToObjectWriteBarrier { kNone, kMap, kFull }; class AccessCheckNeeded; class BigIntBase; class BigIntWrapper; class ClassBoilerplate; class BooleanWrapper; class CompilationCacheTable; class Constructor; class Filler; class FunctionTemplateRareData; class HeapNumber; class InternalizedString; class JSArgumentsObject; class JSArrayBufferView; class JSContextExtensionObject; class JSError; class JSSloppyArgumentsObject; class MapCache; class NativeContext; class NumberWrapper; class ScriptWrapper; class SloppyArgumentsElements; class StringWrapper; class SymbolWrapper; class Undetectable; class UniqueName; class WasmCapiFunctionData; class WasmTagObject; class WasmExceptionPackage; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; class WasmIndirectFunctionTable; class WasmJSFunctionData; class WasmMemoryObject; class WasmModuleObject; class WasmTableObject; template struct ObjectTypeOf {}; #define OBJECT_TYPE_CASE(Name) \ template <> \ struct ObjectTypeOf { \ static const ObjectType value = ObjectType::k##Name; \ }; #define OBJECT_TYPE_STRUCT_CASE(NAME, Name, name) \ template <> \ struct ObjectTypeOf { \ static const ObjectType value = ObjectType::k##Name; \ }; #define OBJECT_TYPE_TEMPLATE_CASE(Name) \ template \ struct ObjectTypeOf> { \ static const ObjectType value = ObjectType::k##Name; \ }; OBJECT_TYPE_CASE(Object) OBJECT_TYPE_CASE(Smi) OBJECT_TYPE_CASE(TaggedIndex) OBJECT_TYPE_CASE(HeapObject) OBJECT_TYPE_LIST(OBJECT_TYPE_CASE) HEAP_OBJECT_ORDINARY_TYPE_LIST(OBJECT_TYPE_CASE) STRUCT_LIST(OBJECT_TYPE_STRUCT_CASE) HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE) #undef OBJECT_TYPE_CASE #undef OBJECT_TYPE_STRUCT_CASE #undef OBJECT_TYPE_TEMPLATE_CASE #if defined(V8_HOST_ARCH_32_BIT) #define BINT_IS_SMI using BInt = Smi; using AtomicInt64 = PairT; using AtomicUint64 = PairT; #elif defined(V8_HOST_ARCH_64_BIT) #define BINT_IS_INTPTR using BInt = IntPtrT; using AtomicInt64 = IntPtrT; using AtomicUint64 = UintPtrT; #else #error Unknown architecture. #endif namespace compiler { class CallDescriptor; class CodeAssemblerLabel; class CodeAssemblerVariable; template class TypedCodeAssemblerVariable; class CodeAssemblerState; class JSGraph; class Node; class RawMachineAssembler; class RawMachineLabel; class SourcePositionTable; using CodeAssemblerVariableList = ZoneVector; using CodeAssemblerCallback = std::function; template class CodeAssemblerParameterizedLabel; // This macro alias allows to use PairT as a macro argument. #define PAIR_TYPE(T1, T2) PairT #define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ V(Float32Equal, BoolT, Float32T, Float32T) \ V(Float32LessThan, BoolT, Float32T, Float32T) \ V(Float32LessThanOrEqual, BoolT, Float32T, Float32T) \ V(Float32GreaterThan, BoolT, Float32T, Float32T) \ V(Float32GreaterThanOrEqual, BoolT, Float32T, Float32T) \ V(Float64Equal, BoolT, Float64T, Float64T) \ V(Float64NotEqual, BoolT, Float64T, Float64T) \ V(Float64LessThan, BoolT, Float64T, Float64T) \ V(Float64LessThanOrEqual, BoolT, Float64T, Float64T) \ V(Float64GreaterThan, BoolT, Float64T, Float64T) \ V(Float64GreaterThanOrEqual, BoolT, Float64T, Float64T) \ /* Use Word32Equal if you need Int32Equal */ \ V(Int32GreaterThan, BoolT, Word32T, Word32T) \ V(Int32GreaterThanOrEqual, BoolT, Word32T, Word32T) \ V(Int32LessThan, BoolT, Word32T, Word32T) \ V(Int32LessThanOrEqual, BoolT, Word32T, Word32T) \ /* Use WordEqual if you need IntPtrEqual */ \ V(IntPtrLessThan, BoolT, WordT, WordT) \ V(IntPtrLessThanOrEqual, BoolT, WordT, WordT) \ V(IntPtrGreaterThan, BoolT, WordT, WordT) \ V(IntPtrGreaterThanOrEqual, BoolT, WordT, WordT) \ /* Use Word32Equal if you need Uint32Equal */ \ V(Uint32LessThan, BoolT, Word32T, Word32T) \ V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \ V(Uint32GreaterThan, BoolT, Word32T, Word32T) \ V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \ /* Use WordEqual if you need UintPtrEqual */ \ V(UintPtrLessThan, BoolT, WordT, WordT) \ V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \ V(UintPtrGreaterThan, BoolT, WordT, WordT) \ V(UintPtrGreaterThanOrEqual, BoolT, WordT, WordT) #define CODE_ASSEMBLER_BINARY_OP_LIST(V) \ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \ V(Float64Add, Float64T, Float64T, Float64T) \ V(Float64Sub, Float64T, Float64T, Float64T) \ V(Float64Mul, Float64T, Float64T, Float64T) \ V(Float64Div, Float64T, Float64T, Float64T) \ V(Float64Mod, Float64T, Float64T, Float64T) \ V(Float64Atan2, Float64T, Float64T, Float64T) \ V(Float64Pow, Float64T, Float64T, Float64T) \ V(Float64Max, Float64T, Float64T, Float64T) \ V(Float64Min, Float64T, Float64T, Float64T) \ V(Float64InsertLowWord32, Float64T, Float64T, Word32T) \ V(Float64InsertHighWord32, Float64T, Float64T, Word32T) \ V(I8x16Eq, I8x16T, I8x16T, I8x16T) \ V(IntPtrAdd, WordT, WordT, WordT) \ V(IntPtrSub, WordT, WordT, WordT) \ V(IntPtrMul, WordT, WordT, WordT) \ V(IntPtrDiv, IntPtrT, IntPtrT, IntPtrT) \ V(IntPtrAddWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT, IntPtrT) \ V(IntPtrSubWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT, IntPtrT) \ V(Int32Add, Word32T, Word32T, Word32T) \ V(Int32AddWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \ V(Int32Sub, Word32T, Word32T, Word32T) \ V(Int32SubWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \ V(Int32Mul, Word32T, Word32T, Word32T) \ V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \ V(Int32Div, Int32T, Int32T, Int32T) \ V(Int32Mod, Int32T, Int32T, Int32T) \ V(Int64Add, Word64T, Word64T, Word64T) \ V(Int64Sub, Word64T, Word64T, Word64T) \ V(Int64SubWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T, Int64T) \ V(Int64Mul, Word64T, Word64T, Word64T) \ V(Int64Div, Int64T, Int64T, Int64T) \ V(Int64Mod, Int64T, Int64T, Int64T) \ V(WordOr, WordT, WordT, WordT) \ V(WordAnd, WordT, WordT, WordT) \ V(WordXor, WordT, WordT, WordT) \ V(WordRor, WordT, WordT, IntegralT) \ V(WordShl, WordT, WordT, IntegralT) \ V(WordShr, WordT, WordT, IntegralT) \ V(WordSar, WordT, WordT, IntegralT) \ V(WordSarShiftOutZeros, WordT, WordT, IntegralT) \ V(Word32Or, Word32T, Word32T, Word32T) \ V(Word32And, Word32T, Word32T, Word32T) \ V(Word32Xor, Word32T, Word32T, Word32T) \ V(Word32Ror, Word32T, Word32T, Word32T) \ V(Word32Shl, Word32T, Word32T, Word32T) \ V(Word32Shr, Word32T, Word32T, Word32T) \ V(Word32Sar, Word32T, Word32T, Word32T) \ V(Word32SarShiftOutZeros, Word32T, Word32T, Word32T) \ V(Word64And, Word64T, Word64T, Word64T) \ V(Word64Or, Word64T, Word64T, Word64T) \ V(Word64Xor, Word64T, Word64T, Word64T) \ V(Word64Shl, Word64T, Word64T, Word64T) \ V(Word64Shr, Word64T, Word64T, Word64T) \ V(Word64Sar, Word64T, Word64T, Word64T) TNode Float64Add(TNode a, TNode b); #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \ V(Float64Abs, Float64T, Float64T) \ V(Float64Acos, Float64T, Float64T) \ V(Float64Acosh, Float64T, Float64T) \ V(Float64Asin, Float64T, Float64T) \ V(Float64Asinh, Float64T, Float64T) \ V(Float64Atan, Float64T, Float64T) \ V(Float64Atanh, Float64T, Float64T) \ V(Float64Cos, Float64T, Float64T) \ V(Float64Cosh, Float64T, Float64T) \ V(Float64Exp, Float64T, Float64T) \ V(Float64Expm1, Float64T, Float64T) \ V(Float64Log, Float64T, Float64T) \ V(Float64Log1p, Float64T, Float64T) \ V(Float64Log2, Float64T, Float64T) \ V(Float64Log10, Float64T, Float64T) \ V(Float64Cbrt, Float64T, Float64T) \ V(Float64Neg, Float64T, Float64T) \ V(Float64Sin, Float64T, Float64T) \ V(Float64Sinh, Float64T, Float64T) \ V(Float64Sqrt, Float64T, Float64T) \ V(Float64Tan, Float64T, Float64T) \ V(Float64Tanh, Float64T, Float64T) \ V(Float64ExtractLowWord32, Uint32T, Float64T) \ V(Float64ExtractHighWord32, Uint32T, Float64T) \ V(BitcastTaggedToWord, IntPtrT, Object) \ V(BitcastTaggedToWordForTagAndSmiBits, IntPtrT, AnyTaggedT) \ V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \ V(BitcastWordToTagged, Object, WordT) \ V(BitcastWordToTaggedSigned, Smi, WordT) \ V(TruncateFloat64ToFloat32, Float32T, Float64T) \ V(TruncateFloat64ToWord32, Uint32T, Float64T) \ V(TruncateInt64ToInt32, Int32T, Int64T) \ V(ChangeFloat32ToFloat64, Float64T, Float32T) \ V(ChangeFloat64ToUint32, Uint32T, Float64T) \ V(ChangeFloat64ToUint64, Uint64T, Float64T) \ V(ChangeInt32ToFloat64, Float64T, Int32T) \ V(ChangeInt32ToInt64, Int64T, Int32T) \ V(ChangeUint32ToFloat64, Float64T, Word32T) \ V(ChangeUint32ToUint64, Uint64T, Word32T) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ V(RoundInt32ToFloat32, Float32T, Int32T) \ V(Float64SilenceNaN, Float64T, Float64T) \ V(Float64RoundDown, Float64T, Float64T) \ V(Float64RoundUp, Float64T, Float64T) \ V(Float64RoundTiesEven, Float64T, Float64T) \ V(Float64RoundTruncate, Float64T, Float64T) \ V(Word32Clz, Int32T, Word32T) \ V(Word64Clz, Int64T, Word64T) \ V(Word32Ctz, Int32T, Word32T) \ V(Word64Ctz, Int64T, Word64T) \ V(Word32Popcnt, Int32T, Word32T) \ V(Word64Popcnt, Int64T, Word64T) \ V(Word32BitwiseNot, Word32T, Word32T) \ V(WordNot, WordT, WordT) \ V(Word64Not, Word64T, Word64T) \ V(I8x16BitMask, Int32T, I8x16T) \ V(I8x16Splat, I8x16T, Int32T) \ V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \ V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \ V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \ V(Word32BinaryNot, BoolT, Word32T) \ V(StackPointerGreaterThan, BoolT, WordT) // A "public" interface used by components outside of compiler directory to // create code objects with TurboFan's backend. This class is mostly a thin // shim around the RawMachineAssembler, and its primary job is to ensure that // the innards of the RawMachineAssembler and other compiler implementation // details don't leak outside of the the compiler directory.. // // V8 components that need to generate low-level code using this interface // should include this header--and this header only--from the compiler // directory (this is actually enforced). Since all interesting data // structures are forward declared, it's not possible for clients to peek // inside the compiler internals. // // In addition to providing isolation between TurboFan and code generation // clients, CodeAssembler also provides an abstraction for creating variables // and enhanced Label functionality to merge variable values along paths where // they have differing values, including loops. // // The CodeAssembler itself is stateless (and instances are expected to be // temporary-scoped and short-lived); all its state is encapsulated into // a CodeAssemblerState instance. class V8_EXPORT_PRIVATE CodeAssembler { public: explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {} ~CodeAssembler(); CodeAssembler(const CodeAssembler&) = delete; CodeAssembler& operator=(const CodeAssembler&) = delete; static Handle GenerateCode(CodeAssemblerState* state, const AssemblerOptions& options, const ProfileDataFromFile* profile_data); bool Is64() const; bool Is32() const; bool IsFloat64RoundUpSupported() const; bool IsFloat64RoundDownSupported() const; bool IsFloat64RoundTiesEvenSupported() const; bool IsFloat64RoundTruncateSupported() const; bool IsInt32AbsWithOverflowSupported() const; bool IsInt64AbsWithOverflowSupported() const; bool IsIntPtrAbsWithOverflowSupported() const; bool IsWord32PopcntSupported() const; bool IsWord64PopcntSupported() const; bool IsWord32CtzSupported() const; bool IsWord64CtzSupported() const; // Shortened aliases for use in CodeAssembler subclasses. using Label = CodeAssemblerLabel; template using TVariable = TypedCodeAssemblerVariable; using VariableList = CodeAssemblerVariableList; // =========================================================================== // Base Assembler // =========================================================================== template class CheckedNode { public: #ifdef DEBUG CheckedNode(Node* node, CodeAssembler* code_assembler, const char* location) : node_(node), code_assembler_(code_assembler), location_(location) {} #else CheckedNode(compiler::Node* node, CodeAssembler*, const char*) : node_(node) {} #endif template operator TNode() { static_assert( !std::is_same::value, "Can't cast to MaybeObject, use explicit conversion functions. "); static_assert(types_have_common_values::value, "Incompatible types: this cast can never succeed."); static_assert(std::is_convertible, TNode>::value, "Coercion to untagged values cannot be " "checked."); static_assert( !FromTyped || !std::is_convertible, TNode>::value, "Unnecessary CAST: types are convertible."); #ifdef DEBUG if (FLAG_debug_code) { if (std::is_same::value) { code_assembler_->GenerateCheckMaybeObjectIsObject( TNode::UncheckedCast(node_), location_); } TNode function = code_assembler_->ExternalConstant( ExternalReference::check_object_type()); code_assembler_->CallCFunction( function, MachineType::AnyTagged(), std::make_pair(MachineType::AnyTagged(), node_), std::make_pair(MachineType::TaggedSigned(), code_assembler_->SmiConstant( static_cast(ObjectTypeOf::value))), std::make_pair(MachineType::AnyTagged(), code_assembler_->StringConstant(location_))); } #endif return TNode::UncheckedCast(node_); } Node* node() const { return node_; } private: Node* node_; #ifdef DEBUG CodeAssembler* code_assembler_; const char* location_; #endif }; template TNode UncheckedCast(Node* value) { return TNode::UncheckedCast(value); } template TNode UncheckedCast(TNode value) { static_assert(types_have_common_values::value, "Incompatible types: this cast can never succeed."); return TNode::UncheckedCast(value); } // ReinterpretCast(v) has the power to cast even when the type of v is // unrelated to T. Use with care. template TNode ReinterpretCast(Node* value) { return TNode::UncheckedCast(value); } CheckedNode Cast(Node* value, const char* location = "") { return {value, this, location}; } template CheckedNode Cast(TNode value, const char* location = "") { return {value, this, location}; } #ifdef DEBUG #define STRINGIFY(x) #x #define TO_STRING_LITERAL(x) STRINGIFY(x) #define CAST(x) \ Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__)) #define TORQUE_CAST(x) \ ca_.Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__)) #else #define CAST(x) Cast(x) #define TORQUE_CAST(x) ca_.Cast(x) #endif #ifdef DEBUG void GenerateCheckMaybeObjectIsObject(TNode node, const char* location); #endif // Constants. TNode Int32Constant(int32_t value); TNode Int64Constant(int64_t value); TNode Uint64Constant(uint64_t value) { return Unsigned(Int64Constant(bit_cast(value))); } TNode IntPtrConstant(intptr_t value); TNode Uint32Constant(uint32_t value) { return Unsigned(Int32Constant(bit_cast(value))); } TNode UintPtrConstant(uintptr_t value) { return Unsigned(IntPtrConstant(bit_cast(value))); } TNode TaggedIndexConstant(intptr_t value); TNode PointerConstant(void* value) { return ReinterpretCast(IntPtrConstant(bit_cast(value))); } TNode NumberConstant(double value); TNode SmiConstant(Smi value); TNode SmiConstant(int value); template ::value>::type> TNode SmiConstant(E value) { STATIC_ASSERT(sizeof(E) <= sizeof(int)); return SmiConstant(static_cast(value)); } TNode UntypedHeapConstant(Handle object); template TNode HeapConstant(Handle object) { return UncheckedCast(UntypedHeapConstant(object)); } TNode StringConstant(const char* str); TNode BooleanConstant(bool value); TNode ExternalConstant(ExternalReference address); TNode Float32Constant(double value); TNode Float64Constant(double value); TNode Int32TrueConstant() { return ReinterpretCast(Int32Constant(1)); } TNode Int32FalseConstant() { return ReinterpretCast(Int32Constant(0)); } TNode BoolConstant(bool value) { return value ? Int32TrueConstant() : Int32FalseConstant(); } bool IsMapOffsetConstant(Node* node); bool TryToInt32Constant(TNode node, int32_t* out_value); bool TryToInt64Constant(TNode node, int64_t* out_value); bool TryToIntPtrConstant(TNode node, intptr_t* out_value); bool TryToIntPtrConstant(TNode tnode, intptr_t* out_value); bool TryToSmiConstant(TNode node, Smi* out_value); bool TryToSmiConstant(TNode node, Smi* out_value); bool IsUndefinedConstant(TNode node); bool IsNullConstant(TNode node); TNode Signed(TNode x) { return UncheckedCast(x); } TNode Signed(TNode x) { return UncheckedCast(x); } TNode Signed(TNode x) { return UncheckedCast(x); } TNode Unsigned(TNode x) { return UncheckedCast(x); } TNode Unsigned(TNode x) { return UncheckedCast(x); } TNode Unsigned(TNode x) { return UncheckedCast(x); } static constexpr int kTargetParameterIndex = -1; template TNode Parameter( int value, cppgc::SourceLocation loc = cppgc::SourceLocation::Current()) { static_assert( std::is_convertible, TNode>::value, "Parameter is only for tagged types. Use UncheckedParameter instead."); std::stringstream message; message << "Parameter " << value; if (loc.FileName()) { message << " at " << loc.FileName() << ":" << loc.Line(); } size_t buf_size = message.str().size() + 1; char* message_dup = zone()->NewArray(buf_size); snprintf(message_dup, buf_size, "%s", message.str().c_str()); return Cast(UntypedParameter(value), message_dup); } template TNode UncheckedParameter(int value) { return UncheckedCast(UntypedParameter(value)); } Node* UntypedParameter(int value); TNode GetJSContextParameter(); void Return(TNode value); void Return(TNode value1, TNode value2); void Return(TNode value1, TNode value2, TNode value3); void Return(TNode value); void Return(TNode value); void Return(TNode value); void Return(TNode value); void Return(TNode value); void Return(TNode value1, TNode value2); void PopAndReturn(Node* pop, Node* value); void ReturnIf(TNode condition, TNode value); void AbortCSAAssert(Node* message); void DebugBreak(); void Unreachable(); void Comment(const char* msg) { if (!FLAG_code_comments) return; Comment(std::string(msg)); } void Comment(std::string msg); template void Comment(Args&&... args) { if (!FLAG_code_comments) return; std::ostringstream s; USE((s << std::forward(args))...); Comment(s.str()); } void StaticAssert(TNode value, const char* source = "unknown position"); // The following methods refer to source positions in CSA or Torque code // compiled during mksnapshot, not JS compiled at runtime. void SetSourcePosition(const char* file, int line); void PushSourcePosition(); void PopSourcePosition(); class V8_NODISCARD SourcePositionScope { public: explicit SourcePositionScope(CodeAssembler* ca) : ca_(ca) { ca->PushSourcePosition(); } ~SourcePositionScope() { ca_->PopSourcePosition(); } private: CodeAssembler* ca_; }; const std::vector& GetMacroSourcePositionStack() const; void Bind(Label* label); #if DEBUG void Bind(Label* label, AssemblerDebugInfo debug_info); #endif // DEBUG void Goto(Label* label); void GotoIf(TNode condition, Label* true_label); void GotoIfNot(TNode condition, Label* false_label); void Branch(TNode condition, Label* true_label, Label* false_label); template TNode Uninitialized() { return {}; } template void Bind(CodeAssemblerParameterizedLabel* label, TNode*... phis) { Bind(label->plain_label()); label->CreatePhis(phis...); } template void Branch(TNode condition, CodeAssemblerParameterizedLabel* if_true, CodeAssemblerParameterizedLabel* if_false, Args... args) { if_true->AddInputs(args...); if_false->AddInputs(args...); Branch(condition, if_true->plain_label(), if_false->plain_label()); } template void Branch(TNode condition, CodeAssemblerParameterizedLabel* if_true, std::vector args_true, CodeAssemblerParameterizedLabel* if_false, std::vector args_false) { if_true->AddInputsVector(std::move(args_true)); if_false->AddInputsVector(std::move(args_false)); Branch(condition, if_true->plain_label(), if_false->plain_label()); } template void Goto(CodeAssemblerParameterizedLabel* label, Args... args) { label->AddInputs(args...); Goto(label->plain_label()); } void Branch(TNode condition, const std::function& true_body, const std::function& false_body); void Branch(TNode condition, Label* true_label, const std::function& false_body); void Branch(TNode condition, const std::function& true_body, Label* false_label); void Switch(Node* index, Label* default_label, const int32_t* case_values, Label** case_labels, size_t case_count); // Access to the frame pointer TNode LoadFramePointer(); TNode LoadParentFramePointer(); // Poison |value| on speculative paths. TNode TaggedPoisonOnSpeculation(TNode value); TNode WordPoisonOnSpeculation(TNode value); // Load raw memory location. Node* Load(MachineType type, Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); template TNode Load(MachineType type, TNode> base) { DCHECK( IsSubtype(type.representation(), MachineRepresentationOf::value)); return UncheckedCast(Load(type, static_cast(base))); } Node* Load(MachineType type, Node* base, Node* offset, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); template TNode Load(Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { return UncheckedCast( Load(MachineTypeOf::value, base, needs_poisoning)); } template TNode Load(Node* base, TNode offset, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { return UncheckedCast( Load(MachineTypeOf::value, base, offset, needs_poisoning)); } template TNode AtomicLoad(TNode base, TNode offset) { return UncheckedCast( AtomicLoad(MachineTypeOf::value, base, offset)); } template TNode AtomicLoad64(TNode base, TNode offset); // Load uncompressed tagged value from (most likely off JS heap) memory // location. TNode LoadFullTagged( Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); TNode LoadFullTagged( Node* base, TNode offset, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); Node* LoadFromObject(MachineType type, TNode object, TNode offset); #ifdef V8_MAP_PACKING Node* PackMapWord(Node* value); #endif // Load a value from the root array. // If map packing is enabled, LoadRoot for a root map returns the unpacked map // word (i.e., the map). Use LoadRootMapWord to obtain the packed map word // instead. TNode LoadRoot(RootIndex root_index); TNode LoadRootMapWord(RootIndex root_index); template TNode UnalignedLoad(TNode base, TNode offset) { MachineType mt = MachineTypeOf::value; return UncheckedCast(UnalignedLoad(mt, base, offset)); } // Store value to raw memory location. void Store(Node* base, Node* value); void Store(Node* base, Node* offset, Node* value); void StoreEphemeronKey(Node* base, Node* offset, Node* value); void StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value); void StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset, Node* value); void UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value); void UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset, Node* value); // Stores uncompressed tagged value to (most likely off JS heap) memory // location without write barrier. void StoreFullTaggedNoWriteBarrier(TNode base, TNode tagged_value); void StoreFullTaggedNoWriteBarrier(TNode base, TNode offset, TNode tagged_value); // Optimized memory operations that map to Turbofan simplified nodes. TNode OptimizedAllocate(TNode size, AllocationType allocation, AllowLargeObjects allow_large_objects); void StoreToObject(MachineRepresentation rep, TNode object, TNode offset, Node* value, StoreToObjectWriteBarrier write_barrier); void OptimizedStoreField(MachineRepresentation rep, TNode object, int offset, Node* value); void OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation rep, TNode object, int offset, Node* value); void OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation rep, TNode object, int offset, Node* value); void OptimizedStoreMap(TNode object, TNode); void AtomicStore(MachineRepresentation rep, TNode base, TNode offset, TNode value); // {value_high} is used for 64-bit stores on 32-bit platforms, must be // nullptr in other cases. void AtomicStore64(TNode base, TNode offset, TNode value, TNode value_high); TNode AtomicAdd(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicAdd64(TNode base, TNode offset, TNode value, TNode value_high); TNode AtomicSub(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicSub64(TNode base, TNode offset, TNode value, TNode value_high); TNode AtomicAnd(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicAnd64(TNode base, TNode offset, TNode value, TNode value_high); TNode AtomicOr(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicOr64(TNode base, TNode offset, TNode value, TNode value_high); TNode AtomicXor(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicXor64(TNode base, TNode offset, TNode value, TNode value_high); // Exchange value at raw memory location TNode AtomicExchange(MachineType type, TNode base, TNode offset, TNode value); template TNode AtomicExchange64(TNode base, TNode offset, TNode value, TNode value_high); // Compare and Exchange value at raw memory location TNode AtomicCompareExchange(MachineType type, TNode base, TNode offset, TNode old_value, TNode new_value); template TNode AtomicCompareExchange64(TNode base, TNode offset, TNode old_value, TNode new_value, TNode old_value_high, TNode new_value_high); // Store a value to the root array. void StoreRoot(RootIndex root_index, TNode value); // Basic arithmetic operations. #define DECLARE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \ TNode name(TNode a, TNode b); CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP) #undef DECLARE_CODE_ASSEMBLER_BINARY_OP TNode WordShr(TNode left, TNode right) { return Unsigned(WordShr(static_cast>(left), right)); } TNode WordSar(TNode left, TNode right) { return Signed(WordSar(static_cast>(left), right)); } TNode WordShl(TNode left, TNode right) { return Signed(WordShl(static_cast>(left), right)); } TNode WordShl(TNode left, TNode right) { return Unsigned(WordShl(static_cast>(left), right)); } TNode Word32Shl(TNode left, TNode right) { return Signed(Word32Shl(static_cast>(left), right)); } TNode Word32Shl(TNode left, TNode right) { return Unsigned(Word32Shl(static_cast>(left), right)); } TNode Word32Shr(TNode left, TNode right) { return Unsigned(Word32Shr(static_cast>(left), right)); } TNode Word32Sar(TNode left, TNode right) { return Signed(Word32Sar(static_cast>(left), right)); } TNode Word64Shl(TNode left, TNode right) { return Signed(Word64Shl(static_cast>(left), right)); } TNode Word64Shl(TNode left, TNode right) { return Unsigned(Word64Shl(static_cast>(left), right)); } TNode Word64Shr(TNode left, TNode right) { return Unsigned(Word64Shr(static_cast>(left), right)); } TNode Word64Sar(TNode left, TNode right) { return Signed(Word64Sar(static_cast>(left), right)); } TNode Word64And(TNode left, TNode right) { return Signed(Word64And(static_cast>(left), right)); } TNode Word64And(TNode left, TNode right) { return Unsigned(Word64And(static_cast>(left), right)); } TNode Word64Xor(TNode left, TNode right) { return Signed(Word64Xor(static_cast>(left), right)); } TNode Word64Xor(TNode left, TNode right) { return Unsigned(Word64Xor(static_cast>(left), right)); } TNode Word64Not(TNode value) { return Signed(Word64Not(static_cast>(value))); } TNode Word64Not(TNode value) { return Unsigned(Word64Not(static_cast>(value))); } TNode WordAnd(TNode left, TNode right) { return Signed(WordAnd(static_cast>(left), static_cast>(right))); } TNode WordAnd(TNode left, TNode right) { return Unsigned(WordAnd(static_cast>(left), static_cast>(right))); } TNode Word32And(TNode left, TNode right) { return Signed(Word32And(static_cast>(left), static_cast>(right))); } TNode Word32And(TNode left, TNode right) { return Unsigned(Word32And(static_cast>(left), static_cast>(right))); } TNode WordOr(TNode left, TNode right) { return Signed(WordOr(static_cast>(left), static_cast>(right))); } TNode Word32Or(TNode left, TNode right) { return Signed(Word32Or(static_cast>(left), static_cast>(right))); } TNode Word32Or(TNode left, TNode right) { return Unsigned(Word32Or(static_cast>(left), static_cast>(right))); } TNode IntPtrEqual(TNode left, TNode right); TNode WordEqual(TNode left, TNode right); TNode WordNotEqual(TNode left, TNode right); TNode Word32Equal(TNode left, TNode right); TNode Word32NotEqual(TNode left, TNode right); TNode Word64Equal(TNode left, TNode right); TNode Word64NotEqual(TNode left, TNode right); TNode WordNot(TNode a) { return Signed(WordNot(static_cast>(a))); } TNode Word32Or(TNode left, TNode right) { return UncheckedCast(Word32Or(static_cast>(left), static_cast>(right))); } TNode Word32And(TNode left, TNode right) { return UncheckedCast(Word32And(static_cast>(left), static_cast>(right))); } TNode Int32Add(TNode left, TNode right) { return Signed(Int32Add(static_cast>(left), static_cast>(right))); } TNode Uint32Add(TNode left, TNode right) { return Unsigned(Int32Add(static_cast>(left), static_cast>(right))); } TNode Uint32Sub(TNode left, TNode right) { return Unsigned(Int32Sub(static_cast>(left), static_cast>(right))); } TNode Int32Sub(TNode left, TNode right) { return Signed(Int32Sub(static_cast>(left), static_cast>(right))); } TNode Int32Mul(TNode left, TNode right) { return Signed(Int32Mul(static_cast>(left), static_cast>(right))); } TNode Int64Add(TNode left, TNode right) { return Signed(Int64Add(static_cast>(left), right)); } TNode Uint64Add(TNode left, TNode right) { return Unsigned(Int64Add(static_cast>(left), right)); } TNode Int64Sub(TNode left, TNode right) { return Signed(Int64Sub(static_cast>(left), right)); } TNode Uint64Sub(TNode left, TNode right) { return Unsigned(Int64Sub(static_cast>(left), right)); } TNode Int64Mul(TNode left, TNode right) { return Signed(Int64Mul(static_cast>(left), right)); } TNode Uint64Mul(TNode left, TNode right) { return Unsigned(Int64Mul(static_cast>(left), right)); } TNode IntPtrAdd(TNode left, TNode right) { return Signed(IntPtrAdd(static_cast>(left), static_cast>(right))); } TNode IntPtrSub(TNode left, TNode right) { return Signed(IntPtrSub(static_cast>(left), static_cast>(right))); } TNode IntPtrMul(TNode left, TNode right) { return Signed(IntPtrMul(static_cast>(left), static_cast>(right))); } TNode UintPtrAdd(TNode left, TNode right) { return Unsigned(IntPtrAdd(static_cast>(left), static_cast>(right))); } TNode UintPtrSub(TNode left, TNode right) { return Unsigned(IntPtrSub(static_cast>(left), static_cast>(right))); } TNode RawPtrAdd(TNode left, TNode right) { return ReinterpretCast(IntPtrAdd(left, right)); } TNode RawPtrSub(TNode left, TNode right) { return ReinterpretCast(IntPtrSub(left, right)); } TNode RawPtrSub(TNode left, TNode right) { return Signed(IntPtrSub(static_cast>(left), static_cast>(right))); } TNode WordShl(TNode value, int shift); TNode WordShr(TNode value, int shift); TNode WordSar(TNode value, int shift); TNode WordShr(TNode value, int shift) { return UncheckedCast(WordShr(TNode(value), shift)); } TNode WordSar(TNode value, int shift) { return UncheckedCast(WordSar(TNode(value), shift)); } TNode Word32Shr(TNode value, int shift); TNode Word32Sar(TNode value, int shift); // Unary #define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \ TNode name(TNode a); CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP) #undef DECLARE_CODE_ASSEMBLER_UNARY_OP template TNode BitcastTaggedToWord(TNode node) { static_assert(sizeof(Dummy) < 0, "Should use BitcastTaggedToWordForTagAndSmiBits instead."); } // Changes a double to an inptr_t for pointer arithmetic outside of Smi range. // Assumes that the double can be exactly represented as an int. TNode ChangeFloat64ToIntPtr(TNode value); TNode ChangeFloat64ToUintPtr(TNode value); // Same in the opposite direction. TNode ChangeUintPtrToFloat64(TNode value); // Changes an intptr_t to a double, e.g. for storing an element index // outside Smi range in a HeapNumber. Lossless on 32-bit, // rounds on 64-bit (which doesn't affect valid element indices). TNode RoundIntPtrToFloat64(Node* value); // No-op on 32-bit, otherwise zero extend. TNode ChangeUint32ToWord(TNode value); // No-op on 32-bit, otherwise sign extend. TNode ChangeInt32ToIntPtr(TNode value); // Truncates a float to a 32-bit integer. If the float is outside of 32-bit // range, make sure that overflow detection is easy. In particular, return // int_min instead of int_max on arm platforms by using parameter // kSetOverflowToMin. TNode TruncateFloat32ToInt32(TNode value); // Projections template TNode>::type> Projection(TNode> value) { return UncheckedCast< typename std::tuple_element>::type>( Projection(index, value)); } // Calls template TNode CallRuntime(Runtime::FunctionId function, TNode context, TArgs... args) { return UncheckedCast(CallRuntimeImpl( function, context, {implicit_cast>(args)...})); } template void TailCallRuntime(Runtime::FunctionId function, TNode context, TArgs... args) { int argc = static_cast(sizeof...(args)); TNode arity = Int32Constant(argc); return TailCallRuntimeImpl(function, arity, context, {implicit_cast>(args)...}); } template void TailCallRuntime(Runtime::FunctionId function, TNode arity, TNode context, TArgs... args) { return TailCallRuntimeImpl(function, arity, context, {implicit_cast>(args)...}); } // // If context passed to CallStub is nullptr, it won't be passed to the stub. // template TNode CallStub(Callable const& callable, TNode context, TArgs... args) { TNode target = HeapConstant(callable.code()); return CallStub(callable.descriptor(), target, context, args...); } template TNode CallStub(const CallInterfaceDescriptor& descriptor, TNode target, TNode context, TArgs... args) { return UncheckedCast(CallStubR(StubCallMode::kCallCodeObject, descriptor, target, context, args...)); } template TNode CallBuiltinPointer(const CallInterfaceDescriptor& descriptor, TNode target, TNode context, TArgs... args) { return UncheckedCast(CallStubR(StubCallMode::kCallBuiltinPointer, descriptor, target, context, args...)); } template void TailCallStub(Callable const& callable, TNode context, TArgs... args) { TNode target = HeapConstant(callable.code()); TailCallStub(callable.descriptor(), target, context, args...); } template void TailCallStub(const CallInterfaceDescriptor& descriptor, TNode target, TNode context, TArgs... args) { TailCallStubImpl(descriptor, target, context, {args...}); } template void TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, TNode target, TArgs... args); template void TailCallStubThenBytecodeDispatch( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, TArgs... args) { TailCallStubThenBytecodeDispatchImpl(descriptor, target, context, {args...}); } // Tailcalls to the given code object with JSCall linkage. The JS arguments // (including receiver) are supposed to be already on the stack. // This is a building block for implementing trampoline stubs that are // installed instead of code objects with JSCall linkage. // Note that no arguments adaption is going on here - all the JavaScript // arguments are left on the stack unmodified. Therefore, this tail call can // only be used after arguments adaptation has been performed already. void TailCallJSCode(TNode code, TNode context, TNode function, TNode new_target, TNode arg_count); template TNode CallJS(Callable const& callable, Node* context, Node* function, Node* receiver, TArgs... args) { int argc = static_cast(sizeof...(args)); TNode arity = Int32Constant(argc); TNode target = HeapConstant(callable.code()); return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context), CAST(function), {}, arity, {receiver, args...})); } template Node* ConstructJSWithTarget(Callable const& callable, Node* context, Node* function, Node* new_target, TArgs... args) { int argc = static_cast(sizeof...(args)); TNode arity = Int32Constant(argc); TNode receiver = LoadRoot(RootIndex::kUndefinedValue); TNode target = HeapConstant(callable.code()); return CallJSStubImpl(callable.descriptor(), target, CAST(context), CAST(function), CAST(new_target), arity, {receiver, args...}); } template Node* ConstructJS(Callable const& callable, Node* context, Node* new_target, TArgs... args) { return ConstructJSWithTarget(callable, context, new_target, new_target, args...); } Node* CallCFunctionN(Signature* signature, int input_count, Node* const* inputs); // Type representing C function argument with type info. using CFunctionArg = std::pair; // Call to a C function. template Node* CallCFunction(Node* function, base::Optional return_type, CArgs... cargs) { static_assert(v8::internal::conjunction< std::is_convertible...>::value, "invalid argument types"); return CallCFunction(function, return_type, {cargs...}); } // Call to a C function without a function discriptor on AIX. template Node* CallCFunctionWithoutFunctionDescriptor(Node* function, MachineType return_type, CArgs... cargs) { static_assert(v8::internal::conjunction< std::is_convertible...>::value, "invalid argument types"); return CallCFunctionWithoutFunctionDescriptor(function, return_type, {cargs...}); } // Call to a C function, while saving/restoring caller registers. template Node* CallCFunctionWithCallerSavedRegisters(Node* function, MachineType return_type, SaveFPRegsMode mode, CArgs... cargs) { static_assert(v8::internal::conjunction< std::is_convertible...>::value, "invalid argument types"); return CallCFunctionWithCallerSavedRegisters(function, return_type, mode, {cargs...}); } // Helpers which delegate to RawMachineAssembler. Factory* factory() const; Isolate* isolate() const; Zone* zone() const; CodeAssemblerState* state() { return state_; } void BreakOnNode(int node_id); bool UnalignedLoadSupported(MachineRepresentation rep) const; bool UnalignedStoreSupported(MachineRepresentation rep) const; bool IsExceptionHandlerActive() const; protected: void RegisterCallGenerationCallbacks( const CodeAssemblerCallback& call_prologue, const CodeAssemblerCallback& call_epilogue); void UnregisterCallGenerationCallbacks(); bool Word32ShiftIsSafe() const; PoisoningMitigationLevel poisoning_level() const; bool IsJSFunctionCall() const; private: void HandleException(Node* result); Node* CallCFunction(Node* function, base::Optional return_type, std::initializer_list args); Node* CallCFunctionWithoutFunctionDescriptor( Node* function, MachineType return_type, std::initializer_list args); Node* CallCFunctionWithCallerSavedRegisters( Node* function, MachineType return_type, SaveFPRegsMode mode, std::initializer_list args); Node* CallRuntimeImpl(Runtime::FunctionId function, TNode context, std::initializer_list> args); void TailCallRuntimeImpl(Runtime::FunctionId function, TNode arity, TNode context, std::initializer_list> args); void TailCallStubImpl(const CallInterfaceDescriptor& descriptor, TNode target, TNode context, std::initializer_list args); void TailCallStubThenBytecodeDispatchImpl( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, std::initializer_list args); template Node* CallStubR(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, TNode target, TNode context, TArgs... args) { return CallStubRImpl(call_mode, descriptor, target, context, {args...}); } Node* CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, TNode target, TNode context, std::initializer_list args); Node* CallJSStubImpl(const CallInterfaceDescriptor& descriptor, TNode target, TNode context, TNode function, base::Optional> new_target, TNode arity, std::initializer_list args); Node* CallStubN(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, int input_count, Node* const* inputs); Node* AtomicLoad(MachineType type, TNode base, TNode offset); Node* UnalignedLoad(MachineType type, TNode base, TNode offset); // These two don't have definitions and are here only for catching use cases // where the cast is not necessary. TNode Signed(TNode x); TNode Unsigned(TNode x); Node* Projection(int index, Node* value); RawMachineAssembler* raw_assembler() const; JSGraph* jsgraph() const; // Calls respective callback registered in the state. void CallPrologue(); void CallEpilogue(); CodeAssemblerState* state_; }; // TODO(solanes, v8:6949): this class should be merged into // TypedCodeAssemblerVariable. It's required to be separate for // CodeAssemblerVariableLists. class V8_EXPORT_PRIVATE CodeAssemblerVariable { public: CodeAssemblerVariable(const CodeAssemblerVariable&) = delete; CodeAssemblerVariable& operator=(const CodeAssemblerVariable&) = delete; Node* value() const; MachineRepresentation rep() const; bool IsBound() const; protected: explicit CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep); CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep, Node* initial_value); #if DEBUG CodeAssemblerVariable(CodeAssembler* assembler, AssemblerDebugInfo debug_info, MachineRepresentation rep); CodeAssemblerVariable(CodeAssembler* assembler, AssemblerDebugInfo debug_info, MachineRepresentation rep, Node* initial_value); #endif // DEBUG ~CodeAssemblerVariable(); void Bind(Node* value); private: class Impl; friend class CodeAssemblerLabel; friend class CodeAssemblerState; friend std::ostream& operator<<(std::ostream&, const Impl&); friend std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&); struct ImplComparator { bool operator()(const CodeAssemblerVariable::Impl* a, const CodeAssemblerVariable::Impl* b) const; }; Impl* impl_; CodeAssemblerState* state_; }; std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&); std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable::Impl&); template class TypedCodeAssemblerVariable : public CodeAssemblerVariable { public: TypedCodeAssemblerVariable(TNode initial_value, CodeAssembler* assembler) : CodeAssemblerVariable(assembler, PhiMachineRepresentationOf, initial_value) {} explicit TypedCodeAssemblerVariable(CodeAssembler* assembler) : CodeAssemblerVariable(assembler, PhiMachineRepresentationOf) {} #if DEBUG TypedCodeAssemblerVariable(AssemblerDebugInfo debug_info, CodeAssembler* assembler) : CodeAssemblerVariable(assembler, debug_info, PhiMachineRepresentationOf) {} TypedCodeAssemblerVariable(AssemblerDebugInfo debug_info, TNode initial_value, CodeAssembler* assembler) : CodeAssemblerVariable(assembler, debug_info, PhiMachineRepresentationOf, initial_value) {} #endif // DEBUG TNode value() const { return TNode::UncheckedCast(CodeAssemblerVariable::value()); } void operator=(TNode value) { Bind(value); } void operator=(const TypedCodeAssemblerVariable& variable) { Bind(variable.value()); } private: using CodeAssemblerVariable::Bind; }; class V8_EXPORT_PRIVATE CodeAssemblerLabel { public: enum Type { kDeferred, kNonDeferred }; explicit CodeAssemblerLabel( CodeAssembler* assembler, CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred) : CodeAssemblerLabel(assembler, 0, nullptr, type) {} CodeAssemblerLabel( CodeAssembler* assembler, const CodeAssemblerVariableList& merged_variables, CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred) : CodeAssemblerLabel(assembler, merged_variables.size(), &(merged_variables[0]), type) {} CodeAssemblerLabel( CodeAssembler* assembler, size_t count, CodeAssemblerVariable* const* vars, CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred); CodeAssemblerLabel( CodeAssembler* assembler, std::initializer_list vars, CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred) : CodeAssemblerLabel(assembler, vars.size(), vars.begin(), type) {} CodeAssemblerLabel( CodeAssembler* assembler, CodeAssemblerVariable* merged_variable, CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred) : CodeAssemblerLabel(assembler, 1, &merged_variable, type) {} ~CodeAssemblerLabel(); // Cannot be copied because the destructor explicitly call the destructor of // the underlying {RawMachineLabel}, hence only one pointer can point to it. CodeAssemblerLabel(const CodeAssemblerLabel&) = delete; CodeAssemblerLabel& operator=(const CodeAssemblerLabel&) = delete; inline bool is_bound() const { return bound_; } inline bool is_used() const { return merge_count_ != 0; } private: friend class CodeAssembler; void Bind(); #if DEBUG void Bind(AssemblerDebugInfo debug_info); #endif // DEBUG void UpdateVariablesAfterBind(); void MergeVariables(); bool bound_; size_t merge_count_; CodeAssemblerState* state_; RawMachineLabel* label_; // Map of variables that need to be merged to their phi nodes (or placeholders // for those phis). std::map variable_phis_; // Map of variables to the list of value nodes that have been added from each // merge path in their order of merging. std::map, CodeAssemblerVariable::ImplComparator> variable_merges_; }; class CodeAssemblerParameterizedLabelBase { public: bool is_used() const { return plain_label_.is_used(); } explicit CodeAssemblerParameterizedLabelBase(CodeAssembler* assembler, size_t arity, CodeAssemblerLabel::Type type) : state_(assembler->state()), phi_inputs_(arity), plain_label_(assembler, type) {} protected: CodeAssemblerLabel* plain_label() { return &plain_label_; } void AddInputs(std::vector inputs); Node* CreatePhi(MachineRepresentation rep, const std::vector& inputs); const std::vector& CreatePhis( std::vector representations); private: CodeAssemblerState* state_; std::vector> phi_inputs_; std::vector phi_nodes_; CodeAssemblerLabel plain_label_; }; template class CodeAssemblerParameterizedLabel : public CodeAssemblerParameterizedLabelBase { public: static constexpr size_t kArity = sizeof...(Types); explicit CodeAssemblerParameterizedLabel(CodeAssembler* assembler, CodeAssemblerLabel::Type type) : CodeAssemblerParameterizedLabelBase(assembler, kArity, type) {} private: friend class CodeAssembler; void AddInputsVector(std::vector inputs) { CodeAssemblerParameterizedLabelBase::AddInputs(std::move(inputs)); } void AddInputs(TNode... inputs) { CodeAssemblerParameterizedLabelBase::AddInputs( std::vector{inputs...}); } void CreatePhis(TNode*... results) { const std::vector& phi_nodes = CodeAssemblerParameterizedLabelBase::CreatePhis( {PhiMachineRepresentationOf...}); auto it = phi_nodes.begin(); USE(it); ITERATE_PACK(AssignPhi(results, *(it++))); } template static void AssignPhi(TNode* result, Node* phi) { if (phi != nullptr) *result = TNode::UncheckedCast(phi); } }; using CodeAssemblerExceptionHandlerLabel = CodeAssemblerParameterizedLabel; class V8_EXPORT_PRIVATE CodeAssemblerState { public: // Create with CallStub linkage. // |result_size| specifies the number of results returned by the stub. // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor. CodeAssemblerState(Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level, Builtin builtin = Builtin::kNoBuiltinId); // Create with JSCall linkage. CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count, CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level, Builtin builtin = Builtin::kNoBuiltinId); ~CodeAssemblerState(); CodeAssemblerState(const CodeAssemblerState&) = delete; CodeAssemblerState& operator=(const CodeAssemblerState&) = delete; const char* name() const { return name_; } int parameter_count() const; #if DEBUG void PrintCurrentBlock(std::ostream& os); #endif // DEBUG bool InsideBlock(); void SetInitialDebugInformation(const char* msg, const char* file, int line); private: friend class CodeAssembler; friend class CodeAssemblerLabel; friend class CodeAssemblerVariable; friend class CodeAssemblerTester; friend class CodeAssemblerParameterizedLabelBase; friend class ScopedExceptionHandler; CodeAssemblerState(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor, CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level, Builtin builtin); void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label); void PopExceptionHandler(); std::unique_ptr raw_assembler_; CodeKind kind_; const char* name_; Builtin builtin_; bool code_generated_; ZoneSet variables_; CodeAssemblerCallback call_prologue_; CodeAssemblerCallback call_epilogue_; std::vector exception_handler_labels_; using VariableId = uint32_t; VariableId next_variable_id_ = 0; JSGraph* jsgraph_; // Only used by CodeStubAssembler builtins. std::vector macro_call_stack_; VariableId NextVariableId() { return next_variable_id_++; } }; class V8_EXPORT_PRIVATE V8_NODISCARD ScopedExceptionHandler { public: ScopedExceptionHandler(CodeAssembler* assembler, CodeAssemblerExceptionHandlerLabel* label); // Use this constructor for compatability/ports of old CSA code only. New code // should use the CodeAssemblerExceptionHandlerLabel version. ScopedExceptionHandler(CodeAssembler* assembler, CodeAssemblerLabel* label, TypedCodeAssemblerVariable* exception); ~ScopedExceptionHandler(); private: bool has_handler_; CodeAssembler* assembler_; CodeAssemblerLabel* compatibility_label_; std::unique_ptr label_; TypedCodeAssemblerVariable* exception_; }; } // namespace compiler } // namespace internal } // namespace v8 #endif // V8_COMPILER_CODE_ASSEMBLER_H_