summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/ftl
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/ftl
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/ftl')
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h32
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbbreviations.h313
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp176
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbstractHeap.h144
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp119
-rw-r--r--Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h179
-rw-r--r--Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp41
-rw-r--r--Source/JavaScriptCore/ftl/FTLAvailableRecovery.h75
-rw-r--r--Source/JavaScriptCore/ftl/FTLCapabilities.cpp348
-rw-r--r--Source/JavaScriptCore/ftl/FTLCapabilities.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLCommonValues.cpp62
-rw-r--r--Source/JavaScriptCore/ftl/FTLCommonValues.h77
-rw-r--r--Source/JavaScriptCore/ftl/FTLCompile.cpp458
-rw-r--r--Source/JavaScriptCore/ftl/FTLCompile.h13
-rw-r--r--Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.cpp63
-rw-r--r--Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h61
-rw-r--r--Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp76
-rw-r--r--Source/JavaScriptCore/ftl/FTLExceptionTarget.h64
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitArgument.cpp2
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitArgument.h24
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp2
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp51
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitPropertyValue.h (renamed from Source/JavaScriptCore/ftl/FTLValueFormat.h)63
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp (renamed from Source/JavaScriptCore/ftl/FTLIntrinsicRepository.cpp)67
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h69
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitValue.cpp75
-rw-r--r--Source/JavaScriptCore/ftl/FTLExitValue.h129
-rw-r--r--Source/JavaScriptCore/ftl/FTLFail.cpp6
-rw-r--r--Source/JavaScriptCore/ftl/FTLFail.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp6
-rw-r--r--Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h10
-rw-r--r--Source/JavaScriptCore/ftl/FTLFormattedValue.h37
-rw-r--r--Source/JavaScriptCore/ftl/FTLGeneratedFunction.h7
-rw-r--r--Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h135
-rw-r--r--Source/JavaScriptCore/ftl/FTLJITCode.cpp96
-rw-r--r--Source/JavaScriptCore/ftl/FTLJITCode.h72
-rw-r--r--Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp88
-rw-r--r--Source/JavaScriptCore/ftl/FTLJITFinalizer.h41
-rw-r--r--Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp79
-rw-r--r--Source/JavaScriptCore/ftl/FTLLazySlowPath.h96
-rw-r--r--Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h52
-rw-r--r--Source/JavaScriptCore/ftl/FTLLink.cpp229
-rw-r--r--Source/JavaScriptCore/ftl/FTLLink.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLLocation.cpp195
-rw-r--r--Source/JavaScriptCore/ftl/FTLLocation.h213
-rw-r--r--Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp13970
-rw-r--r--Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h (renamed from Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.h)10
-rw-r--r--Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp4515
-rw-r--r--Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSREntry.cpp28
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSREntry.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExit.cpp83
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExit.h206
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp554
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h8
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp62
-rw-r--r--Source/JavaScriptCore/ftl/FTLOSRExitHandle.h63
-rw-r--r--Source/JavaScriptCore/ftl/FTLOperations.cpp535
-rw-r--r--Source/JavaScriptCore/ftl/FTLOperations.h51
-rw-r--r--Source/JavaScriptCore/ftl/FTLOutput.cpp800
-rw-r--r--Source/JavaScriptCore/ftl/FTLOutput.h688
-rw-r--r--Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp121
-rw-r--r--Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h102
-rw-r--r--Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp51
-rw-r--r--Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h46
-rw-r--r--Source/JavaScriptCore/ftl/FTLSaveRestore.cpp149
-rw-r--r--Source/JavaScriptCore/ftl/FTLSaveRestore.h55
-rw-r--r--Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp148
-rw-r--r--Source/JavaScriptCore/ftl/FTLSlowPathCall.h124
-rw-r--r--Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp (renamed from Source/JavaScriptCore/ftl/FTLOSRExitCompilationInfo.h)26
-rw-r--r--Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h132
-rw-r--r--Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h (renamed from Source/JavaScriptCore/ftl/FTLExitArgumentList.h)14
-rw-r--r--Source/JavaScriptCore/ftl/FTLState.cpp32
-rw-r--r--Source/JavaScriptCore/ftl/FTLState.h55
-rw-r--r--Source/JavaScriptCore/ftl/FTLSwitchCase.h22
-rw-r--r--Source/JavaScriptCore/ftl/FTLThunks.cpp125
-rw-r--r--Source/JavaScriptCore/ftl/FTLThunks.h17
-rw-r--r--Source/JavaScriptCore/ftl/FTLTypedPointer.h18
-rw-r--r--Source/JavaScriptCore/ftl/FTLValueFormat.cpp123
-rw-r--r--Source/JavaScriptCore/ftl/FTLValueFromBlock.h10
-rw-r--r--Source/JavaScriptCore/ftl/FTLValueRange.cpp41
-rw-r--r--Source/JavaScriptCore/ftl/FTLValueRange.h (renamed from Source/JavaScriptCore/ftl/FTLExitThunkGenerator.h)42
-rw-r--r--Source/JavaScriptCore/ftl/FTLWeight.h (renamed from Source/JavaScriptCore/ftl/FTLExitThunkGenerator.cpp)66
-rw-r--r--Source/JavaScriptCore/ftl/FTLWeightedTarget.h89
85 files changed, 20345 insertions, 7029 deletions
diff --git a/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h b/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h
index 06a68cd67..7a64d4d4f 100644
--- a/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h
+++ b/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,32 +23,24 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLAbbreviatedTypes_h
-#define FTLAbbreviatedTypes_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "LLVMAPI.h"
+#include "DFGCommon.h"
+
+namespace JSC { namespace B3 {
+class BasicBlock;
+class Value;
+enum Type : int8_t;
+} }
namespace JSC { namespace FTL {
-typedef LLVMAtomicOrdering LAtomicOrdering;
-typedef LLVMBasicBlockRef LBasicBlock;
-typedef LLVMBuilderRef LBuilder;
-typedef LLVMCallConv LCallConv;
-typedef LLVMContextRef LContext;
-typedef LLVMIntPredicate LIntPredicate;
-typedef LLVMLinkage LLinkage;
-typedef LLVMModuleRef LModule;
-typedef LLVMRealPredicate LRealPredicate;
-typedef LLVMTypeRef LType;
-typedef LLVMValueRef LValue;
+typedef B3::BasicBlock* LBasicBlock;
+typedef B3::Type LType;
+typedef B3::Value* LValue;
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLAbbreviatedTypes_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLAbbreviations.h b/Source/JavaScriptCore/ftl/FTLAbbreviations.h
deleted file mode 100644
index 02e0c04c3..000000000
--- a/Source/JavaScriptCore/ftl/FTLAbbreviations.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef FTLAbbreviations_h
-#define FTLAbbreviations_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(FTL_JIT)
-
-#include "FTLAbbreviatedTypes.h"
-#include "FTLSwitchCase.h"
-#include "FTLValueFromBlock.h"
-#include "LLVMAPI.h"
-#include <cstring>
-
-namespace JSC { namespace FTL {
-
-// This file contains short-form calls into the LLVM C API. It is meant to
-// save typing and make the lowering code clearer. If we ever call an LLVM C API
-// function more than once in the FTL lowering code, we should add a shortcut for
-// it here.
-
-#if USE(JSVALUE32_64)
-#error "The FTL backend assumes that pointers are 64-bit."
-#endif
-
-static inline LType voidType(LContext context) { return llvm->VoidTypeInContext(context); }
-static inline LType int1Type(LContext context) { return llvm->Int1TypeInContext(context); }
-static inline LType int8Type(LContext context) { return llvm->Int8TypeInContext(context); }
-static inline LType int16Type(LContext context) { return llvm->Int16TypeInContext(context); }
-static inline LType int32Type(LContext context) { return llvm->Int32TypeInContext(context); }
-static inline LType int64Type(LContext context) { return llvm->Int64TypeInContext(context); }
-static inline LType intPtrType(LContext context) { return llvm->Int64TypeInContext(context); }
-static inline LType floatType(LContext context) { return llvm->FloatTypeInContext(context); }
-static inline LType doubleType(LContext context) { return llvm->DoubleTypeInContext(context); }
-
-static inline LType pointerType(LType type) { return llvm->PointerType(type, 0); }
-static inline LType vectorType(LType type, unsigned count) { return llvm->VectorType(type, count); }
-
-enum PackingMode { NotPacked, Packed };
-static inline LType structType(LContext context, LType* elementTypes, unsigned elementCount, PackingMode packing = NotPacked)
-{
- return llvm->StructTypeInContext(context, elementTypes, elementCount, packing == Packed);
-}
-static inline LType structType(LContext context, PackingMode packing = NotPacked)
-{
- return structType(context, 0, 0, packing);
-}
-static inline LType structType(LContext context, LType element1, PackingMode packing = NotPacked)
-{
- return structType(context, &element1, 1, packing);
-}
-static inline LType structType(LContext context, LType element1, LType element2, PackingMode packing = NotPacked)
-{
- LType elements[] = { element1, element2 };
- return structType(context, elements, 2, packing);
-}
-
-enum Variadicity { NotVariadic, Variadic };
-static inline LType functionType(LType returnType, const LType* paramTypes, unsigned paramCount, Variadicity variadicity)
-{
- return llvm->FunctionType(returnType, const_cast<LType*>(paramTypes), paramCount, variadicity == Variadic);
-}
-template<typename VectorType>
-inline LType functionType(LType returnType, const VectorType& vector, Variadicity variadicity = NotVariadic)
-{
- return functionType(returnType, vector.begin(), vector.size(), variadicity);
-}
-static inline LType functionType(LType returnType, Variadicity variadicity = NotVariadic)
-{
- return functionType(returnType, 0, 0, variadicity);
-}
-static inline LType functionType(LType returnType, LType param1, Variadicity variadicity = NotVariadic)
-{
- return functionType(returnType, &param1, 1, variadicity);
-}
-static inline LType functionType(LType returnType, LType param1, LType param2, Variadicity variadicity = NotVariadic)
-{
- LType paramTypes[] = { param1, param2 };
- return functionType(returnType, paramTypes, 2, variadicity);
-}
-static inline LType functionType(LType returnType, LType param1, LType param2, LType param3, Variadicity variadicity = NotVariadic)
-{
- LType paramTypes[] = { param1, param2, param3 };
- return functionType(returnType, paramTypes, 3, variadicity);
-}
-static inline LType functionType(LType returnType, LType param1, LType param2, LType param3, LType param4, Variadicity variadicity = NotVariadic)
-{
- LType paramTypes[] = { param1, param2, param3, param4 };
- return functionType(returnType, paramTypes, 4, variadicity);
-}
-
-static inline LType typeOf(LValue value) { return llvm->TypeOf(value); }
-
-static inline unsigned mdKindID(LContext context, const char* string) { return llvm->GetMDKindIDInContext(context, string, std::strlen(string)); }
-static inline LValue mdString(LContext context, const char* string, unsigned length) { return llvm->MDStringInContext(context, string, length); }
-static inline LValue mdString(LContext context, const char* string) { return mdString(context, string, std::strlen(string)); }
-static inline LValue mdNode(LContext context, LValue* args, unsigned numArgs) { return llvm->MDNodeInContext(context, args, numArgs); }
-static inline LValue mdNode(LContext context) { return mdNode(context, 0, 0); }
-static inline LValue mdNode(LContext context, LValue arg1) { return mdNode(context, &arg1, 1); }
-static inline LValue mdNode(LContext context, LValue arg1, LValue arg2)
-{
- LValue args[] = { arg1, arg2 };
- return mdNode(context, args, 2);
-}
-
-static inline void setMetadata(LValue instruction, unsigned kind, LValue metadata) { llvm->SetMetadata(instruction, kind, metadata); }
-
-static inline LValue addFunction(LModule module, const char* name, LType type) { return llvm->AddFunction(module, name, type); }
-static inline void setLinkage(LValue global, LLinkage linkage) { llvm->SetLinkage(global, linkage); }
-static inline void setFunctionCallingConv(LValue function, LCallConv convention) { llvm->SetFunctionCallConv(function, convention); }
-
-static inline LValue addExternFunction(LModule module, const char* name, LType type)
-{
- LValue result = addFunction(module, name, type);
- setLinkage(result, LLVMExternalLinkage);
- return result;
-}
-
-static inline LValue getParam(LValue function, unsigned index) { return llvm->GetParam(function, index); }
-static inline LValue getUndef(LType type) { return llvm->GetUndef(type); }
-
-enum BitExtension { ZeroExtend, SignExtend };
-static inline LValue constInt(LType type, unsigned long long value, BitExtension extension = ZeroExtend) { return llvm->ConstInt(type, value, extension == SignExtend); }
-static inline LValue constReal(LType type, double value) { return llvm->ConstReal(type, value); }
-static inline LValue constIntToPtr(LValue value, LType type) { return llvm->ConstIntToPtr(value, type); }
-static inline LValue constNull(LType type) { return llvm->ConstNull(type); }
-static inline LValue constBitCast(LValue value, LType type) { return llvm->ConstBitCast(value, type); }
-
-static inline LBasicBlock appendBasicBlock(LContext context, LValue function, const char* name = "") { return llvm->AppendBasicBlockInContext(context, function, name); }
-static inline LBasicBlock insertBasicBlock(LContext context, LBasicBlock beforeBasicBlock, const char* name = "") { return llvm->InsertBasicBlockInContext(context, beforeBasicBlock, name); }
-
-static inline LValue buildPhi(LBuilder builder, LType type) { return llvm->BuildPhi(builder, type, ""); }
-static inline void addIncoming(LValue phi, const LValue* values, const LBasicBlock* blocks, unsigned numPredecessors)
-{
- llvm->AddIncoming(phi, const_cast<LValue*>(values), const_cast<LBasicBlock*>(blocks), numPredecessors);
-}
-static inline void addIncoming(LValue phi, ValueFromBlock value1)
-{
- LValue value = value1.value();
- LBasicBlock block = value1.block();
- addIncoming(phi, &value, &block, 1);
-}
-static inline void addIncoming(LValue phi, ValueFromBlock value1, ValueFromBlock value2)
-{
- LValue values[] = { value1.value(), value2.value() };
- LBasicBlock blocks[] = { value1.block(), value2.block() };
- addIncoming(phi, values, blocks, 2);
-}
-static inline LValue buildPhi(LBuilder builder, LType type, ValueFromBlock value1)
-{
- LValue result = buildPhi(builder, type);
- addIncoming(result, value1);
- return result;
-}
-static inline LValue buildPhi(
- LBuilder builder, LType type, ValueFromBlock value1, ValueFromBlock value2)
-{
- LValue result = buildPhi(builder, type);
- addIncoming(result, value1, value2);
- return result;
-}
-
-static inline LValue buildAlloca(LBuilder builder, LType type) { return llvm->BuildAlloca(builder, type, ""); }
-static inline LValue buildAdd(LBuilder builder, LValue left, LValue right) { return llvm->BuildAdd(builder, left, right, ""); }
-static inline LValue buildSub(LBuilder builder, LValue left, LValue right) { return llvm->BuildSub(builder, left, right, ""); }
-static inline LValue buildMul(LBuilder builder, LValue left, LValue right) { return llvm->BuildMul(builder, left, right, ""); }
-static inline LValue buildDiv(LBuilder builder, LValue left, LValue right) { return llvm->BuildSDiv(builder, left, right, ""); }
-static inline LValue buildRem(LBuilder builder, LValue left, LValue right) { return llvm->BuildSRem(builder, left, right, ""); }
-static inline LValue buildNeg(LBuilder builder, LValue value) { return llvm->BuildNeg(builder, value, ""); }
-static inline LValue buildFAdd(LBuilder builder, LValue left, LValue right) { return llvm->BuildFAdd(builder, left, right, ""); }
-static inline LValue buildFSub(LBuilder builder, LValue left, LValue right) { return llvm->BuildFSub(builder, left, right, ""); }
-static inline LValue buildFMul(LBuilder builder, LValue left, LValue right) { return llvm->BuildFMul(builder, left, right, ""); }
-static inline LValue buildFDiv(LBuilder builder, LValue left, LValue right) { return llvm->BuildFDiv(builder, left, right, ""); }
-static inline LValue buildFRem(LBuilder builder, LValue left, LValue right) { return llvm->BuildFRem(builder, left, right, ""); }
-static inline LValue buildFNeg(LBuilder builder, LValue value) { return llvm->BuildFNeg(builder, value, ""); }
-static inline LValue buildAnd(LBuilder builder, LValue left, LValue right) { return llvm->BuildAnd(builder, left, right, ""); }
-static inline LValue buildOr(LBuilder builder, LValue left, LValue right) { return llvm->BuildOr(builder, left, right, ""); }
-static inline LValue buildXor(LBuilder builder, LValue left, LValue right) { return llvm->BuildXor(builder, left, right, ""); }
-static inline LValue buildShl(LBuilder builder, LValue left, LValue right) { return llvm->BuildShl(builder, left, right, ""); }
-static inline LValue buildAShr(LBuilder builder, LValue left, LValue right) { return llvm->BuildAShr(builder, left, right, ""); }
-static inline LValue buildLShr(LBuilder builder, LValue left, LValue right) { return llvm->BuildLShr(builder, left, right, ""); }
-static inline LValue buildNot(LBuilder builder, LValue value) { return llvm->BuildNot(builder, value, ""); }
-static inline LValue buildLoad(LBuilder builder, LValue pointer) { return llvm->BuildLoad(builder, pointer, ""); }
-static inline LValue buildStore(LBuilder builder, LValue value, LValue pointer) { return llvm->BuildStore(builder, value, pointer); }
-static inline LValue buildSExt(LBuilder builder, LValue value, LType type) { return llvm->BuildSExt(builder, value, type, ""); }
-static inline LValue buildZExt(LBuilder builder, LValue value, LType type) { return llvm->BuildZExt(builder, value, type, ""); }
-static inline LValue buildFPToSI(LBuilder builder, LValue value, LType type) { return llvm->BuildFPToSI(builder, value, type, ""); }
-static inline LValue buildFPToUI(LBuilder builder, LValue value, LType type) { return llvm->BuildFPToUI(builder, value, type, ""); }
-static inline LValue buildSIToFP(LBuilder builder, LValue value, LType type) { return llvm->BuildSIToFP(builder, value, type, ""); }
-static inline LValue buildUIToFP(LBuilder builder, LValue value, LType type) { return llvm->BuildUIToFP(builder, value, type, ""); }
-static inline LValue buildIntCast(LBuilder builder, LValue value, LType type) { return llvm->BuildIntCast(builder, value, type, ""); }
-static inline LValue buildFPCast(LBuilder builder, LValue value, LType type) { return llvm->BuildFPCast(builder, value, type, ""); }
-static inline LValue buildIntToPtr(LBuilder builder, LValue value, LType type) { return llvm->BuildIntToPtr(builder, value, type, ""); }
-static inline LValue buildPtrToInt(LBuilder builder, LValue value, LType type) { return llvm->BuildPtrToInt(builder, value, type, ""); }
-static inline LValue buildBitCast(LBuilder builder, LValue value, LType type) { return llvm->BuildBitCast(builder, value, type, ""); }
-static inline LValue buildICmp(LBuilder builder, LIntPredicate cond, LValue left, LValue right) { return llvm->BuildICmp(builder, cond, left, right, ""); }
-static inline LValue buildFCmp(LBuilder builder, LRealPredicate cond, LValue left, LValue right) { return llvm->BuildFCmp(builder, cond, left, right, ""); }
-static inline LValue buildInsertElement(LBuilder builder, LValue vector, LValue element, LValue index) { return llvm->BuildInsertElement(builder, vector, element, index, ""); }
-
-enum SynchronizationScope { SingleThread, CrossThread };
-static inline LValue buildFence(LBuilder builder, LAtomicOrdering ordering, SynchronizationScope scope = CrossThread)
-{
- return llvm->BuildFence(builder, ordering, scope == SingleThread, "");
-}
-
-static inline LValue buildCall(LBuilder builder, LValue function, const LValue* args, unsigned numArgs)
-{
- return llvm->BuildCall(builder, function, const_cast<LValue*>(args), numArgs, "");
-}
-template<typename VectorType>
-inline LValue buildCall(LBuilder builder, LValue function, const VectorType& vector)
-{
- return buildCall(builder, function, vector.begin(), vector.size());
-}
-static inline LValue buildCall(LBuilder builder, LValue function)
-{
- return buildCall(builder, function, 0, 0);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1)
-{
- return buildCall(builder, function, &arg1, 1);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2)
-{
- LValue args[] = { arg1, arg2 };
- return buildCall(builder, function, args, 2);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3)
-{
- LValue args[] = { arg1, arg2, arg3 };
- return buildCall(builder, function, args, 3);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4)
-{
- LValue args[] = { arg1, arg2, arg3, arg4 };
- return buildCall(builder, function, args, 4);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5)
-{
- LValue args[] = { arg1, arg2, arg3, arg4, arg5 };
- return buildCall(builder, function, args, 5);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6)
-{
- LValue args[] = { arg1, arg2, arg3, arg4, arg5, arg6 };
- return buildCall(builder, function, args, 6);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6, LValue arg7)
-{
- LValue args[] = { arg1, arg2, arg3, arg4, arg5, arg6, arg7 };
- return buildCall(builder, function, args, 7);
-}
-static inline LValue buildCall(LBuilder builder, LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6, LValue arg7, LValue arg8)
-{
- LValue args[] = { arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 };
- return buildCall(builder, function, args, 8);
-}
-static inline void setInstructionCallingConvention(LValue instruction, LCallConv callingConvention) { llvm->SetInstructionCallConv(instruction, callingConvention); }
-static inline LValue buildExtractValue(LBuilder builder, LValue aggVal, unsigned index) { return llvm->BuildExtractValue(builder, aggVal, index, ""); }
-static inline LValue buildSelect(LBuilder builder, LValue condition, LValue taken, LValue notTaken) { return llvm->BuildSelect(builder, condition, taken, notTaken, ""); }
-static inline LValue buildBr(LBuilder builder, LBasicBlock destination) { return llvm->BuildBr(builder, destination); }
-static inline LValue buildCondBr(LBuilder builder, LValue condition, LBasicBlock taken, LBasicBlock notTaken) { return llvm->BuildCondBr(builder, condition, taken, notTaken); }
-static inline LValue buildSwitch(LBuilder builder, LValue value, LBasicBlock fallThrough, unsigned numCases) { return llvm->BuildSwitch(builder, value, fallThrough, numCases); }
-static inline void addCase(LValue switchInst, LValue value, LBasicBlock target) { llvm->AddCase(switchInst, value, target); }
-template<typename VectorType>
-static inline LValue buildSwitch(LBuilder builder, LValue value, const VectorType& cases, LBasicBlock fallThrough)
-{
- LValue result = buildSwitch(builder, value, fallThrough, cases.size());
- for (unsigned i = 0; i < cases.size(); ++i)
- addCase(result, cases[i].value(), cases[i].target());
- return result;
-}
-static inline LValue buildRet(LBuilder builder, LValue value) { return llvm->BuildRet(builder, value); }
-static inline LValue buildUnreachable(LBuilder builder) { return llvm->BuildUnreachable(builder); }
-
-static inline void dumpModule(LModule module) { llvm->DumpModule(module); }
-static inline void verifyModule(LModule module)
-{
- char* error = 0;
- llvm->VerifyModule(module, LLVMAbortProcessAction, &error);
- llvm->DisposeMessage(error);
-}
-
-} } // namespace JSC::FTL
-
-#endif // ENABLE(FTL_JIT)
-
-#endif // FTLAbbreviations_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp b/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp
index eadd5af97..90211fd94 100644
--- a/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp
+++ b/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,51 +28,114 @@
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
+#include "DFGCommon.h"
+#include "FTLAbbreviatedTypes.h"
#include "FTLAbstractHeapRepository.h"
#include "FTLOutput.h"
#include "FTLTypedPointer.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "Options.h"
namespace JSC { namespace FTL {
-LValue AbstractHeap::tbaaMetadataSlow(const AbstractHeapRepository& repository) const
+using namespace B3;
+
+AbstractHeap::AbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset)
+ : m_offset(offset)
+ , m_heapName(heapName)
{
- m_tbaaMetadata = mdNode(
- repository.m_context,
- mdString(repository.m_context, m_heapName),
- m_parent->tbaaMetadata(repository));
- return m_tbaaMetadata;
+ changeParent(parent);
}
-void AbstractHeap::decorateInstruction(LValue instruction, const AbstractHeapRepository& repository) const
+void AbstractHeap::changeParent(AbstractHeap* parent)
{
- if (!Options::useFTLTBAA())
+ if (m_parent) {
+ bool result = m_parent->m_children.removeFirst(this);
+ RELEASE_ASSERT(result);
+ }
+
+ m_parent = parent;
+
+ if (parent) {
+ ASSERT(!m_parent->m_children.contains(this));
+ m_parent->m_children.append(this);
+ }
+}
+
+void AbstractHeap::compute(unsigned begin)
+{
+ // This recursively computes the ranges of the tree. This solves the following constraints
+ // in linear time:
+ //
+ // - A node's end is greater than its begin.
+ // - A node's begin is greater than or equal to its parent's begin.
+ // - A node's end is less than or equal to its parent's end.
+ // - The ranges are as small as possible.
+ //
+ // It's OK to recurse because we keep the depth of our abstract heap hierarchy fairly sane.
+ // I think that it gets 4 deep at most.
+
+ if (m_children.isEmpty()) {
+ // Must special-case leaves so that they use just one slot on the number line.
+ m_range = HeapRange(begin);
return;
- setMetadata(instruction, repository.m_tbaaKind, tbaaMetadata(repository));
+ }
+
+ unsigned current = begin;
+ for (AbstractHeap* child : m_children) {
+ child->compute(current);
+ current = child->range().end();
+ }
+
+ m_range = HeapRange(begin, current);
+}
+
+void AbstractHeap::shallowDump(PrintStream& out) const
+{
+ out.print(heapName(), "(", m_offset, ")");
+ if (m_range)
+ out.print("<", m_range, ">");
+}
+
+void AbstractHeap::dump(PrintStream& out) const
+{
+ shallowDump(out);
+ if (m_parent)
+ out.print("->", *m_parent);
}
-IndexedAbstractHeap::IndexedAbstractHeap(LContext context, AbstractHeap* parent, const char* heapName, size_t elementSize)
+void AbstractHeap::deepDump(PrintStream& out, unsigned indent) const
+{
+ auto printIndent = [&] () {
+ for (unsigned i = indent; i--;)
+ out.print(" ");
+ };
+
+ printIndent();
+ shallowDump(out);
+
+ if (m_children.isEmpty()) {
+ out.print("\n");
+ return;
+ }
+
+ out.print(":\n");
+ for (AbstractHeap* child : m_children)
+ child->deepDump(out, indent + 1);
+}
+
+void AbstractHeap::badRangeError() const
+{
+ dataLog("Heap does not have range: ", *this, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+IndexedAbstractHeap::IndexedAbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset, size_t elementSize)
: m_heapForAnyIndex(parent, heapName)
, m_heapNameLength(strlen(heapName))
+ , m_offset(offset)
, m_elementSize(elementSize)
- , m_scaleTerm(0)
- , m_canShift(false)
-{
- // See if there is a common shift amount we could use instead of multiplying. Don't
- // try too hard. This is just a speculative optimization to reduce load on LLVM.
- for (unsigned i = 0; i < 4; ++i) {
- if ((1 << i) == m_elementSize) {
- if (i)
- m_scaleTerm = constInt(intPtrType(context), i, ZeroExtend);
- m_canShift = true;
- break;
- }
- }
-
- if (!m_canShift)
- m_scaleTerm = constInt(intPtrType(context), m_elementSize, ZeroExtend);
+{
}
IndexedAbstractHeap::~IndexedAbstractHeap()
@@ -83,36 +146,29 @@ TypedPointer IndexedAbstractHeap::baseIndex(Output& out, LValue base, LValue ind
{
if (indexAsConstant.isInt32())
return out.address(base, at(indexAsConstant.asInt32()), offset);
+
+ LValue result = out.add(base, out.mul(index, out.constIntPtr(m_elementSize)));
- LValue result;
- if (m_canShift) {
- if (!m_scaleTerm)
- result = out.add(base, index);
- else
- result = out.add(base, out.shl(index, m_scaleTerm));
- } else
- result = out.add(base, out.mul(index, m_scaleTerm));
-
- return TypedPointer(atAnyIndex(), out.addPtr(result, offset));
+ return TypedPointer(atAnyIndex(), out.addPtr(result, m_offset + offset));
}
-const AbstractField& IndexedAbstractHeap::atSlow(ptrdiff_t index)
+const AbstractHeap& IndexedAbstractHeap::atSlow(ptrdiff_t index)
{
ASSERT(static_cast<size_t>(index) >= m_smallIndices.size());
if (UNLIKELY(!m_largeIndices))
- m_largeIndices = adoptPtr(new MapType());
+ m_largeIndices = std::make_unique<MapType>();
- std::unique_ptr<AbstractField>& field = m_largeIndices->add(index, nullptr).iterator->value;
+ std::unique_ptr<AbstractHeap>& field = m_largeIndices->add(index, nullptr).iterator->value;
if (!field) {
- field = std::make_unique<AbstractField>();
+ field = std::make_unique<AbstractHeap>();
initialize(*field, index);
}
return *field;
}
-void IndexedAbstractHeap::initialize(AbstractField& field, ptrdiff_t signedIndex)
+void IndexedAbstractHeap::initialize(AbstractHeap& field, ptrdiff_t signedIndex)
{
// Build up a name of the form:
//
@@ -131,7 +187,10 @@ void IndexedAbstractHeap::initialize(AbstractField& field, ptrdiff_t signedIndex
//
// Blah_neg_A
//
- // This is important because LLVM uses the string to distinguish the types.
+ // This naming convention comes from our previous use of LLVM. It's not clear that we need
+ // it anymore, though it is sort of nifty. Basically, B3 doesn't need string names for
+ // abstract heaps, but the fact that we have a reasonably efficient way to always name the
+ // heaps will probably come in handy for debugging.
static const char* negSplit = "_neg_";
static const char* posSplit = "_";
@@ -168,16 +227,20 @@ void IndexedAbstractHeap::initialize(AbstractField& field, ptrdiff_t signedIndex
accumulator >>= 4;
}
- field.initialize(&m_heapForAnyIndex, characters, signedIndex * m_elementSize);
+ field.initialize(&m_heapForAnyIndex, characters, m_offset + signedIndex * m_elementSize);
return;
}
RELEASE_ASSERT_NOT_REACHED();
}
-NumberedAbstractHeap::NumberedAbstractHeap(
- LContext context, AbstractHeap* heap, const char* heapName)
- : m_indexedHeap(context, heap, heapName, 1)
+void IndexedAbstractHeap::dump(PrintStream& out) const
+{
+ out.print("Indexed:", atAnyIndex());
+}
+
+NumberedAbstractHeap::NumberedAbstractHeap(AbstractHeap* heap, const char* heapName)
+ : m_indexedHeap(heap, heapName, 0, 1)
{
}
@@ -185,9 +248,13 @@ NumberedAbstractHeap::~NumberedAbstractHeap()
{
}
-AbsoluteAbstractHeap::AbsoluteAbstractHeap(
- LContext context, AbstractHeap* heap, const char* heapName)
- : m_indexedHeap(context, heap, heapName, 1)
+void NumberedAbstractHeap::dump(PrintStream& out) const
+{
+ out.print("Numbered: ", atAnyNumber());
+}
+
+AbsoluteAbstractHeap::AbsoluteAbstractHeap(AbstractHeap* heap, const char* heapName)
+ : m_indexedHeap(heap, heapName, 0, 1)
{
}
@@ -195,6 +262,11 @@ AbsoluteAbstractHeap::~AbsoluteAbstractHeap()
{
}
+void AbsoluteAbstractHeap::dump(PrintStream& out) const
+{
+ out.print("Absolute:", atAnyAddress());
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeap.h b/Source/JavaScriptCore/ftl/FTLAbstractHeap.h
index e92a2fc2c..ae883a620 100644
--- a/Source/JavaScriptCore/ftl/FTLAbstractHeap.h
+++ b/Source/JavaScriptCore/ftl/FTLAbstractHeap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,29 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLAbstractHeap_h
-#define FTLAbstractHeap_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
+#include "B3HeapRange.h"
+#include "FTLAbbreviatedTypes.h"
#include "JSCJSValue.h"
#include <array>
#include <wtf/FastMalloc.h>
#include <wtf/HashMap.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
#include <wtf/text/CString.h>
namespace JSC { namespace FTL {
-// The FTL JIT tries to aid LLVM's TBAA. The FTL's notion of how this
-// happens is the AbstractHeap. AbstractHeaps are a simple type system
-// with sub-typing.
-
class AbstractHeapRepository;
class Output;
class TypedPointer;
@@ -54,132 +47,120 @@ class AbstractHeap {
WTF_MAKE_NONCOPYABLE(AbstractHeap); WTF_MAKE_FAST_ALLOCATED;
public:
AbstractHeap()
- : m_parent(0)
- , m_heapName(0)
- , m_tbaaMetadata(0)
- {
- }
-
- AbstractHeap(AbstractHeap* parent, const char* heapName)
- : m_parent(parent)
- , m_heapName(heapName)
- , m_tbaaMetadata(0)
{
}
+ AbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset = 0);
+
bool isInitialized() const { return !!m_heapName; }
- void initialize(AbstractHeap* parent, const char* heapName)
+ void initialize(AbstractHeap* parent, const char* heapName, ptrdiff_t offset = 0)
{
- m_parent = parent;
+ changeParent(parent);
m_heapName = heapName;
+ m_offset = offset;
}
+
+ void changeParent(AbstractHeap* parent);
AbstractHeap* parent() const
{
ASSERT(isInitialized());
return m_parent;
}
+
+ const Vector<AbstractHeap*>& children() const;
const char* heapName() const
{
ASSERT(isInitialized());
return m_heapName;
}
-
- LValue tbaaMetadata(const AbstractHeapRepository& repository) const
- {
- ASSERT(isInitialized());
- if (LIKELY(!!m_tbaaMetadata))
- return m_tbaaMetadata;
- return tbaaMetadataSlow(repository);
- }
-
- void decorateInstruction(LValue instruction, const AbstractHeapRepository&) const;
-private:
- friend class AbstractHeapRepository;
-
- LValue tbaaMetadataSlow(const AbstractHeapRepository&) const;
-
- AbstractHeap* m_parent;
- const char* m_heapName;
- mutable LValue m_tbaaMetadata;
-};
-
-// Think of "AbstractField" as being an "AbstractHeapWithOffset". I would have named
-// it the latter except that I don't like typing that much.
-class AbstractField : public AbstractHeap {
-public:
- AbstractField()
- {
- }
-
- AbstractField(AbstractHeap* parent, const char* heapName, ptrdiff_t offset)
- : AbstractHeap(parent, heapName)
- , m_offset(offset)
- {
- }
-
- void initialize(AbstractHeap* parent, const char* heapName, ptrdiff_t offset)
+ B3::HeapRange range() const
{
- AbstractHeap::initialize(parent, heapName);
- m_offset = offset;
+ // This will not have a valid value until after all lowering is done. Do associate an
+ // AbstractHeap with a B3::Value*, use AbstractHeapRepository::decorateXXX().
+ if (!m_range)
+ badRangeError();
+
+ return m_range;
}
-
+
+ // WARNING: Not all abstract heaps have a meaningful offset.
ptrdiff_t offset() const
{
ASSERT(isInitialized());
return m_offset;
}
-
+
+ void compute(unsigned begin = 0);
+
+ // Print information about just this heap.
+ void shallowDump(PrintStream&) const;
+
+ // Print information about this heap and its ancestors. This is the default.
+ void dump(PrintStream&) const;
+
+ // Print information about this heap and its descendents. This is a multi-line dump.
+ void deepDump(PrintStream&, unsigned indent = 0) const;
+
private:
- ptrdiff_t m_offset;
+ friend class AbstractHeapRepository;
+
+ NO_RETURN_DUE_TO_CRASH void badRangeError() const;
+
+ AbstractHeap* m_parent { nullptr };
+ Vector<AbstractHeap*> m_children;
+ intptr_t m_offset { 0 };
+ B3::HeapRange m_range;
+ const char* m_heapName { nullptr };
};
class IndexedAbstractHeap {
public:
- IndexedAbstractHeap(LContext, AbstractHeap* parent, const char* heapName, size_t elementSize);
+ IndexedAbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset, size_t elementSize);
~IndexedAbstractHeap();
const AbstractHeap& atAnyIndex() const { return m_heapForAnyIndex; }
- const AbstractField& at(ptrdiff_t index)
+ const AbstractHeap& at(ptrdiff_t index)
{
if (static_cast<size_t>(index) < m_smallIndices.size())
return returnInitialized(m_smallIndices[index], index);
return atSlow(index);
}
- const AbstractField& operator[](ptrdiff_t index) { return at(index); }
+ const AbstractHeap& operator[](ptrdiff_t index) { return at(index); }
TypedPointer baseIndex(Output& out, LValue base, LValue index, JSValue indexAsConstant = JSValue(), ptrdiff_t offset = 0);
+ void dump(PrintStream&) const;
+
private:
- const AbstractField& returnInitialized(AbstractField& field, ptrdiff_t index)
+ const AbstractHeap& returnInitialized(AbstractHeap& field, ptrdiff_t index)
{
if (UNLIKELY(!field.isInitialized()))
initialize(field, index);
return field;
}
- const AbstractField& atSlow(ptrdiff_t index);
- void initialize(AbstractField& field, ptrdiff_t index);
+ const AbstractHeap& atSlow(ptrdiff_t index);
+ void initialize(AbstractHeap& field, ptrdiff_t index);
AbstractHeap m_heapForAnyIndex;
size_t m_heapNameLength;
+ ptrdiff_t m_offset;
size_t m_elementSize;
- LValue m_scaleTerm;
- bool m_canShift;
- std::array<AbstractField, 16> m_smallIndices;
+ std::array<AbstractHeap, 16> m_smallIndices;
struct WithoutZeroOrOneHashTraits : WTF::GenericHashTraits<ptrdiff_t> {
static void constructDeletedValue(ptrdiff_t& slot) { slot = 1; }
static bool isDeletedValue(ptrdiff_t value) { return value == 1; }
};
- typedef HashMap<ptrdiff_t, std::unique_ptr<AbstractField>, WTF::IntHash<ptrdiff_t>, WithoutZeroOrOneHashTraits> MapType;
+ typedef HashMap<ptrdiff_t, std::unique_ptr<AbstractHeap>, WTF::IntHash<ptrdiff_t>, WithoutZeroOrOneHashTraits> MapType;
- OwnPtr<MapType> m_largeIndices;
+ std::unique_ptr<MapType> m_largeIndices;
Vector<CString, 16> m_largeIndexNames;
};
@@ -190,7 +171,7 @@ private:
class NumberedAbstractHeap {
public:
- NumberedAbstractHeap(LContext, AbstractHeap* parent, const char* heapName);
+ NumberedAbstractHeap(AbstractHeap* parent, const char* heapName);
~NumberedAbstractHeap();
const AbstractHeap& atAnyNumber() const { return m_indexedHeap.atAnyIndex(); }
@@ -198,6 +179,8 @@ public:
const AbstractHeap& at(unsigned number) { return m_indexedHeap.at(number); }
const AbstractHeap& operator[](unsigned number) { return at(number); }
+ void dump(PrintStream&) const;
+
private:
// We use the fact that the indexed heap already has a superset of the
@@ -207,17 +190,19 @@ private:
class AbsoluteAbstractHeap {
public:
- AbsoluteAbstractHeap(LContext, AbstractHeap* parent, const char* heapName);
+ AbsoluteAbstractHeap(AbstractHeap* parent, const char* heapName);
~AbsoluteAbstractHeap();
const AbstractHeap& atAnyAddress() const { return m_indexedHeap.atAnyIndex(); }
- const AbstractHeap& at(void* address)
+ const AbstractHeap& at(const void* address)
{
return m_indexedHeap.at(bitwise_cast<ptrdiff_t>(address));
}
- const AbstractHeap& operator[](void* address) { return at(address); }
+ const AbstractHeap& operator[](const void* address) { return at(address); }
+
+ void dump(PrintStream&) const;
private:
// The trick here is that the indexed heap is "indexed" by a pointer-width
@@ -229,6 +214,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLAbstractHeap_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp
index d7f83c960..0b48c4b19 100644
--- a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp
+++ b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,14 +28,29 @@
#if ENABLE(FTL_JIT)
+#include "B3CCallValue.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointValue.h"
+#include "B3ValueInlines.h"
+#include "DirectArguments.h"
+#include "FTLState.h"
+#include "GetterSetter.h"
+#include "JSPropertyNameEnumerator.h"
#include "JSScope.h"
-#include "JSVariableObject.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "RegExpConstructor.h"
+#include "RegExpObject.h"
+#include "ScopedArguments.h"
+#include "ScopedArgumentsTable.h"
+#include "ShadowChicken.h"
namespace JSC { namespace FTL {
-AbstractHeapRepository::AbstractHeapRepository(LContext context)
- : root(0, "jscRoot")
+using namespace B3;
+
+AbstractHeapRepository::AbstractHeapRepository()
+ : root(nullptr, "jscRoot")
#define ABSTRACT_HEAP_INITIALIZATION(name) , name(&root, #name)
FOR_EACH_ABSTRACT_HEAP(ABSTRACT_HEAP_INITIALIZATION)
@@ -45,25 +60,31 @@ AbstractHeapRepository::AbstractHeapRepository(LContext context)
FOR_EACH_ABSTRACT_FIELD(ABSTRACT_FIELD_INITIALIZATION)
#undef ABSTRACT_FIELD_INITIALIZATION
- , JSCell_freeListNext(JSCell_structure)
+ , JSCell_freeListNext(JSCell_header)
-#define INDEXED_ABSTRACT_HEAP_INITIALIZATION(name, size) , name(context, &root, #name, size)
+#define INDEXED_ABSTRACT_HEAP_INITIALIZATION(name, offset, size) , name(&root, #name, offset, size)
FOR_EACH_INDEXED_ABSTRACT_HEAP(INDEXED_ABSTRACT_HEAP_INITIALIZATION)
#undef INDEXED_ABSTRACT_HEAP_INITIALIZATION
-#define NUMBERED_ABSTRACT_HEAP_INITIALIZATION(name) , name(context, &root, #name)
+#define NUMBERED_ABSTRACT_HEAP_INITIALIZATION(name) , name(&root, #name)
FOR_EACH_NUMBERED_ABSTRACT_HEAP(NUMBERED_ABSTRACT_HEAP_INITIALIZATION)
#undef NUMBERED_ABSTRACT_HEAP_INITIALIZATION
- , absolute(context, &root, "absolute")
- , m_context(context)
- , m_tbaaKind(mdKindID(m_context, "tbaa"))
+ , absolute(&root, "absolute")
{
- root.m_tbaaMetadata = mdNode(m_context, mdString(m_context, root.m_heapName));
-
- RELEASE_ASSERT(m_tbaaKind);
- RELEASE_ASSERT(root.m_tbaaMetadata);
-
+ // Make sure that our explicit assumptions about the StructureIDBlob match reality.
+ RELEASE_ASSERT(!(JSCell_indexingTypeAndMisc.offset() & (sizeof(int32_t) - 1)));
+ RELEASE_ASSERT(JSCell_indexingTypeAndMisc.offset() + 1 == JSCell_typeInfoType.offset());
+ RELEASE_ASSERT(JSCell_indexingTypeAndMisc.offset() + 2 == JSCell_typeInfoFlags.offset());
+ RELEASE_ASSERT(JSCell_indexingTypeAndMisc.offset() + 3 == JSCell_cellState.offset());
+
+ JSCell_structureID.changeParent(&JSCell_header);
+ JSCell_usefulBytes.changeParent(&JSCell_header);
+ JSCell_indexingTypeAndMisc.changeParent(&JSCell_usefulBytes);
+ JSCell_typeInfoType.changeParent(&JSCell_usefulBytes);
+ JSCell_typeInfoFlags.changeParent(&JSCell_usefulBytes);
+ JSCell_cellState.changeParent(&JSCell_usefulBytes);
+
RELEASE_ASSERT(!JSCell_freeListNext.offset());
}
@@ -71,6 +92,72 @@ AbstractHeapRepository::~AbstractHeapRepository()
{
}
+void AbstractHeapRepository::decorateMemory(const AbstractHeap* heap, Value* value)
+{
+ m_heapForMemory.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decorateCCallRead(const AbstractHeap* heap, Value* value)
+{
+ m_heapForCCallRead.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decorateCCallWrite(const AbstractHeap* heap, Value* value)
+{
+ m_heapForCCallWrite.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decoratePatchpointRead(const AbstractHeap* heap, Value* value)
+{
+ m_heapForPatchpointRead.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decoratePatchpointWrite(const AbstractHeap* heap, Value* value)
+{
+ m_heapForPatchpointWrite.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decorateFenceRead(const AbstractHeap* heap, Value* value)
+{
+ m_heapForFenceRead.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::decorateFenceWrite(const AbstractHeap* heap, Value* value)
+{
+ m_heapForFenceWrite.append(HeapForValue(heap, value));
+}
+
+void AbstractHeapRepository::computeRangesAndDecorateInstructions()
+{
+ root.compute();
+
+ if (verboseCompilationEnabled()) {
+ dataLog("Abstract Heap Repository:\n");
+ root.deepDump(WTF::dataFile());
+ }
+
+ auto rangeFor = [&] (const AbstractHeap* heap) -> HeapRange {
+ if (heap)
+ return heap->range();
+ return HeapRange();
+ };
+
+ for (HeapForValue entry : m_heapForMemory)
+ entry.value->as<MemoryValue>()->setRange(rangeFor(entry.heap));
+ for (HeapForValue entry : m_heapForCCallRead)
+ entry.value->as<CCallValue>()->effects.reads = rangeFor(entry.heap);
+ for (HeapForValue entry : m_heapForCCallWrite)
+ entry.value->as<CCallValue>()->effects.writes = rangeFor(entry.heap);
+ for (HeapForValue entry : m_heapForPatchpointRead)
+ entry.value->as<PatchpointValue>()->effects.reads = rangeFor(entry.heap);
+ for (HeapForValue entry : m_heapForPatchpointWrite)
+ entry.value->as<PatchpointValue>()->effects.writes = rangeFor(entry.heap);
+ for (HeapForValue entry : m_heapForFenceRead)
+ entry.value->as<FenceValue>()->read = rangeFor(entry.heap);
+ for (HeapForValue entry : m_heapForFenceWrite)
+ entry.value->as<FenceValue>()->write = rangeFor(entry.heap);
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h
index f811c8be5..4268179c3 100644
--- a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h
+++ b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,55 +23,122 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLAbstractHeapRepository_h
-#define FTLAbstractHeapRepository_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
+#include "B3Value.h"
+#include "DFGArrayMode.h"
#include "FTLAbstractHeap.h"
+#include "HasOwnPropertyCache.h"
#include "IndexingType.h"
+#include "JSFixedArray.h"
+#include "JSMap.h"
+#include "JSSet.h"
+#include "Symbol.h"
namespace JSC { namespace FTL {
#define FOR_EACH_ABSTRACT_HEAP(macro) \
- macro(length) \
- macro(typedArrayProperties) \
- macro(WriteBarrierBuffer_bufferContents)
+ macro(typedArrayProperties)
#define FOR_EACH_ABSTRACT_FIELD(macro) \
+ macro(ArrayBuffer_data, ArrayBuffer::offsetOfData()) \
+ macro(Butterfly_arrayBuffer, Butterfly::offsetOfArrayBuffer()) \
macro(Butterfly_publicLength, Butterfly::offsetOfPublicLength()) \
macro(Butterfly_vectorLength, Butterfly::offsetOfVectorLength()) \
macro(CallFrame_callerFrame, CallFrame::callerFrameOffset()) \
+ macro(DirectArguments_callee, DirectArguments::offsetOfCallee()) \
+ macro(DirectArguments_length, DirectArguments::offsetOfLength()) \
+ macro(DirectArguments_minCapacity, DirectArguments::offsetOfMinCapacity()) \
+ macro(DirectArguments_mappedArguments, DirectArguments::offsetOfMappedArguments()) \
+ macro(DirectArguments_modifiedArgumentsDescriptor, DirectArguments::offsetOfModifiedArgumentsDescriptor()) \
+ macro(GetterSetter_getter, GetterSetter::offsetOfGetter()) \
+ macro(GetterSetter_setter, GetterSetter::offsetOfSetter()) \
macro(JSArrayBufferView_length, JSArrayBufferView::offsetOfLength()) \
macro(JSArrayBufferView_mode, JSArrayBufferView::offsetOfMode()) \
macro(JSArrayBufferView_vector, JSArrayBufferView::offsetOfVector()) \
- macro(JSCell_structure, JSCell::structureOffset()) \
+ macro(JSCell_cellState, JSCell::cellStateOffset()) \
+ macro(JSCell_header, 0) \
+ macro(JSCell_indexingTypeAndMisc, JSCell::indexingTypeAndMiscOffset()) \
+ macro(JSCell_structureID, JSCell::structureIDOffset()) \
+ macro(JSCell_typeInfoFlags, JSCell::typeInfoFlagsOffset()) \
+ macro(JSCell_typeInfoType, JSCell::typeInfoTypeOffset()) \
+ macro(JSCell_usefulBytes, JSCell::indexingTypeAndMiscOffset()) \
+ macro(JSFunction_executable, JSFunction::offsetOfExecutable()) \
+ macro(JSFunction_scope, JSFunction::offsetOfScopeChain()) \
+ macro(JSFunction_rareData, JSFunction::offsetOfRareData()) \
macro(JSObject_butterfly, JSObject::butterflyOffset()) \
+ macro(JSPropertyNameEnumerator_cachedInlineCapacity, JSPropertyNameEnumerator::cachedInlineCapacityOffset()) \
+ macro(JSPropertyNameEnumerator_cachedPropertyNamesVector, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()) \
+ macro(JSPropertyNameEnumerator_cachedStructureID, JSPropertyNameEnumerator::cachedStructureIDOffset()) \
+ macro(JSPropertyNameEnumerator_endGenericPropertyIndex, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()) \
+ macro(JSPropertyNameEnumerator_endStructurePropertyIndex, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()) \
+ macro(JSPropertyNameEnumerator_indexLength, JSPropertyNameEnumerator::indexedLengthOffset()) \
macro(JSScope_next, JSScope::offsetOfNext()) \
+ macro(JSString_flags, JSString::offsetOfFlags()) \
macro(JSString_length, JSString::offsetOfLength()) \
macro(JSString_value, JSString::offsetOfValue()) \
- macro(JSVariableObject_registers, JSVariableObject::offsetOfRegisters()) \
- macro(MarkedAllocator_freeListHead, MarkedAllocator::offsetOfFreeListHead()) \
- macro(MarkedBlock_markBits, MarkedBlock::offsetOfMarks()) \
+ macro(JSSymbolTableObject_symbolTable, JSSymbolTableObject::offsetOfSymbolTable()) \
+ macro(JSWrapperObject_internalValue, JSWrapperObject::internalValueOffset()) \
+ macro(RegExpConstructor_cachedResult_lastRegExp, RegExpConstructor::offsetOfCachedResult() + RegExpCachedResult::offsetOfLastRegExp()) \
+ macro(RegExpConstructor_cachedResult_lastInput, RegExpConstructor::offsetOfCachedResult() + RegExpCachedResult::offsetOfLastInput()) \
+ macro(RegExpConstructor_cachedResult_result_start, RegExpConstructor::offsetOfCachedResult() + RegExpCachedResult::offsetOfResult() + OBJECT_OFFSETOF(MatchResult, start)) \
+ macro(RegExpConstructor_cachedResult_result_end, RegExpConstructor::offsetOfCachedResult() + RegExpCachedResult::offsetOfResult() + OBJECT_OFFSETOF(MatchResult, end)) \
+ macro(RegExpConstructor_cachedResult_reified, RegExpConstructor::offsetOfCachedResult() + RegExpCachedResult::offsetOfReified()) \
+ macro(RegExpObject_lastIndex, RegExpObject::offsetOfLastIndex()) \
+ macro(RegExpObject_lastIndexIsWritable, RegExpObject::offsetOfLastIndexIsWritable()) \
+ macro(ShadowChicken_Packet_callee, OBJECT_OFFSETOF(ShadowChicken::Packet, callee)) \
+ macro(ShadowChicken_Packet_frame, OBJECT_OFFSETOF(ShadowChicken::Packet, frame)) \
+ macro(ShadowChicken_Packet_callerFrame, OBJECT_OFFSETOF(ShadowChicken::Packet, callerFrame)) \
+ macro(ShadowChicken_Packet_thisValue, OBJECT_OFFSETOF(ShadowChicken::Packet, thisValue)) \
+ macro(ShadowChicken_Packet_scope, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)) \
+ macro(ShadowChicken_Packet_codeBlock, OBJECT_OFFSETOF(ShadowChicken::Packet, codeBlock)) \
+ macro(ShadowChicken_Packet_callSiteIndex, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex)) \
+ macro(ScopedArguments_overrodeThings, ScopedArguments::offsetOfOverrodeThings()) \
+ macro(ScopedArguments_scope, ScopedArguments::offsetOfScope()) \
+ macro(ScopedArguments_table, ScopedArguments::offsetOfTable()) \
+ macro(ScopedArguments_totalLength, ScopedArguments::offsetOfTotalLength()) \
+ macro(ScopedArgumentsTable_arguments, ScopedArgumentsTable::offsetOfArguments()) \
+ macro(ScopedArgumentsTable_length, ScopedArgumentsTable::offsetOfLength()) \
macro(StringImpl_data, StringImpl::dataOffset()) \
macro(StringImpl_hashAndFlags, StringImpl::flagsOffset()) \
+ macro(StringImpl_length, StringImpl::lengthMemoryOffset()) \
macro(Structure_classInfo, Structure::classInfoOffset()) \
macro(Structure_globalObject, Structure::globalObjectOffset()) \
- macro(Structure_indexingType, Structure::indexingTypeOffset()) \
- macro(Structure_typeInfoFlags, Structure::typeInfoFlagsOffset()) \
- macro(Structure_typeInfoType, Structure::typeInfoTypeOffset())
+ macro(Structure_prototype, Structure::prototypeOffset()) \
+ macro(Structure_structureID, Structure::structureIDOffset()) \
+ macro(Structure_inlineCapacity, Structure::inlineCapacityOffset()) \
+ macro(Structure_indexingTypeIncludingHistory, Structure::indexingTypeIncludingHistoryOffset()) \
+ macro(JSMap_hashMapImpl, JSMap::offsetOfHashMapImpl()) \
+ macro(JSSet_hashMapImpl, JSSet::offsetOfHashMapImpl()) \
+ macro(HashMapImpl_capacity, HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfCapacity()) \
+ macro(HashMapImpl_buffer, HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfBuffer()) \
+ macro(HashMapBucket_value, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfValue()) \
+ macro(HashMapBucket_key, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey()) \
+ macro(Symbol_symbolImpl, Symbol::offsetOfSymbolImpl()) \
+ macro(JSFixedArray_size, JSFixedArray::offsetOfSize()) \
#define FOR_EACH_INDEXED_ABSTRACT_HEAP(macro) \
- macro(characters8, sizeof(LChar)) \
- macro(characters16, sizeof(UChar)) \
- macro(indexedInt32Properties, sizeof(EncodedJSValue)) \
- macro(indexedDoubleProperties, sizeof(double)) \
- macro(indexedContiguousProperties, sizeof(EncodedJSValue)) \
- macro(indexedArrayStorageProperties, sizeof(EncodedJSValue)) \
- macro(singleCharacterStrings, sizeof(JSString*)) \
- macro(variables, sizeof(Register))
+ macro(DirectArguments_storage, DirectArguments::storageOffset(), sizeof(EncodedJSValue)) \
+ macro(JSEnvironmentRecord_variables, JSEnvironmentRecord::offsetOfVariables(), sizeof(EncodedJSValue)) \
+ macro(JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, 0, sizeof(WriteBarrier<JSString>)) \
+ macro(JSRopeString_fibers, JSRopeString::offsetOfFibers(), sizeof(WriteBarrier<JSString>)) \
+ macro(ScopedArguments_overflowStorage, ScopedArguments::overflowStorageOffset(), sizeof(EncodedJSValue)) \
+ macro(Subspace_allocatorForSizeStep, Subspace::offsetOfAllocatorForSizeStep(), sizeof(MarkedAllocator*)) \
+ macro(WriteBarrierBuffer_bufferContents, 0, sizeof(JSCell*)) \
+ macro(characters8, 0, sizeof(LChar)) \
+ macro(characters16, 0, sizeof(UChar)) \
+ macro(indexedInt32Properties, 0, sizeof(EncodedJSValue)) \
+ macro(indexedDoubleProperties, 0, sizeof(double)) \
+ macro(indexedContiguousProperties, 0, sizeof(EncodedJSValue)) \
+ macro(indexedArrayStorageProperties, 0, sizeof(EncodedJSValue)) \
+ macro(scopedArgumentsTableArguments, 0, sizeof(int32_t)) \
+ macro(singleCharacterStrings, 0, sizeof(JSString*)) \
+ macro(structureTable, 0, sizeof(Structure*)) \
+ macro(variables, 0, sizeof(Register)) \
+ macro(HasOwnPropertyCache, 0, sizeof(HasOwnPropertyCache::Entry)) \
+ macro(JSFixedArray_buffer, JSFixedArray::offsetOfData(), sizeof(EncodedJSValue)) \
#define FOR_EACH_NUMBERED_ABSTRACT_HEAP(macro) \
macro(properties)
@@ -82,7 +149,7 @@ namespace JSC { namespace FTL {
class AbstractHeapRepository {
WTF_MAKE_NONCOPYABLE(AbstractHeapRepository);
public:
- AbstractHeapRepository(LContext);
+ AbstractHeapRepository();
~AbstractHeapRepository();
AbstractHeap root;
@@ -91,13 +158,13 @@ public:
FOR_EACH_ABSTRACT_HEAP(ABSTRACT_HEAP_DECLARATION)
#undef ABSTRACT_HEAP_DECLARATION
-#define ABSTRACT_FIELD_DECLARATION(name, offset) AbstractField name;
+#define ABSTRACT_FIELD_DECLARATION(name, offset) AbstractHeap name;
FOR_EACH_ABSTRACT_FIELD(ABSTRACT_FIELD_DECLARATION)
#undef ABSTRACT_FIELD_DECLARATION
- AbstractField& JSCell_freeListNext;
+ AbstractHeap& JSCell_freeListNext;
-#define INDEXED_ABSTRACT_HEAP_DECLARATION(name, size) IndexedAbstractHeap name;
+#define INDEXED_ABSTRACT_HEAP_DECLARATION(name, offset, size) IndexedAbstractHeap name;
FOR_EACH_INDEXED_ABSTRACT_HEAP(INDEXED_ABSTRACT_HEAP_DECLARATION)
#undef INDEXED_ABSTRACT_HEAP_DECLARATION
@@ -131,17 +198,61 @@ public:
return 0;
}
}
+
+ IndexedAbstractHeap& forArrayType(DFG::Array::Type type)
+ {
+ switch (type) {
+ case DFG::Array::Int32:
+ return indexedInt32Properties;
+ case DFG::Array::Double:
+ return indexedDoubleProperties;
+ case DFG::Array::Contiguous:
+ return indexedContiguousProperties;
+ case DFG::Array::ArrayStorage:
+ case DFG::Array::SlowPutArrayStorage:
+ return indexedArrayStorageProperties;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return indexedInt32Properties;
+ }
+ }
+
+ void decorateMemory(const AbstractHeap*, B3::Value*);
+ void decorateCCallRead(const AbstractHeap*, B3::Value*);
+ void decorateCCallWrite(const AbstractHeap*, B3::Value*);
+ void decoratePatchpointRead(const AbstractHeap*, B3::Value*);
+ void decoratePatchpointWrite(const AbstractHeap*, B3::Value*);
+ void decorateFenceRead(const AbstractHeap*, B3::Value*);
+ void decorateFenceWrite(const AbstractHeap*, B3::Value*);
+
+ void computeRangesAndDecorateInstructions();
private:
- friend class AbstractHeap;
-
- LContext m_context;
- unsigned m_tbaaKind;
+
+ struct HeapForValue {
+ HeapForValue()
+ {
+ }
+
+ HeapForValue(const AbstractHeap* heap, B3::Value* value)
+ : heap(heap)
+ , value(value)
+ {
+ }
+
+ const AbstractHeap* heap { nullptr };
+ B3::Value* value { nullptr };
+ };
+
+ Vector<HeapForValue> m_heapForMemory;
+ Vector<HeapForValue> m_heapForCCallRead;
+ Vector<HeapForValue> m_heapForCCallWrite;
+ Vector<HeapForValue> m_heapForPatchpointRead;
+ Vector<HeapForValue> m_heapForPatchpointWrite;
+ Vector<HeapForValue> m_heapForFenceRead;
+ Vector<HeapForValue> m_heapForFenceWrite;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLAbstractHeapRepository_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp
new file mode 100644
index 000000000..6c00fa4d9
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLAvailableRecovery.h"
+
+#if ENABLE(FTL_JIT)
+
+namespace JSC { namespace FTL {
+
+void AvailableRecovery::dump(PrintStream& out) const
+{
+ out.print(node(), " => ", opcode(), ", ", RawPointer(m_left), ", ", RawPointer(m_right), ", ", m_format);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h
new file mode 100644
index 000000000..1f36c3b87
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGNode.h"
+#include "DataFormat.h"
+#include "FTLAbbreviatedTypes.h"
+#include "FTLRecoveryOpcode.h"
+
+namespace JSC { namespace FTL {
+
+class AvailableRecovery {
+public:
+ AvailableRecovery()
+ : m_node(0)
+ , m_format(DataFormatNone)
+ , m_opcode(AddRecovery)
+ , m_left(0)
+ , m_right(0)
+ {
+ }
+
+ AvailableRecovery(DFG::Node* node, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format)
+ : m_node(node)
+ , m_format(format)
+ , m_opcode(opcode)
+ , m_left(left)
+ , m_right(right)
+ {
+ }
+
+ DFG::Node* node() const { return m_node; }
+ DataFormat format() const { return m_format; }
+ RecoveryOpcode opcode() const { return m_opcode; }
+ LValue left() const { return m_left; }
+ LValue right() const { return m_right; }
+
+ void dump(PrintStream&) const;
+
+private:
+ DFG::Node* m_node;
+ DataFormat m_format;
+ RecoveryOpcode m_opcode;
+ LValue m_left;
+ LValue m_right;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLCapabilities.cpp b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp
index 86a28cf8a..81d566583 100644
--- a/Source/JavaScriptCore/ftl/FTLCapabilities.cpp
+++ b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +32,11 @@ namespace JSC { namespace FTL {
using namespace DFG;
+static bool verboseCapabilities()
+{
+ return verboseCompilationEnabled() || Options::verboseFTLFailure();
+}
+
inline CapabilityLevel canCompile(Node* node)
{
// NOTE: If we ever have phantom arguments, we can compile them but we cannot
@@ -39,11 +44,15 @@ inline CapabilityLevel canCompile(Node* node)
switch (node->op()) {
case JSConstant:
- case WeakJSConstant:
+ case LazyJSConstant:
case GetLocal:
case SetLocal:
+ case PutStack:
+ case KillStack:
+ case GetStack:
case MovHint:
case ZombieHint:
+ case ExitOK:
case Phantom:
case Flush:
case PhantomLocal:
@@ -56,20 +65,28 @@ inline CapabilityLevel canCompile(Node* node)
case BitLShift:
case BitURShift:
case CheckStructure:
- case StructureTransitionWatchpoint:
+ case DoubleAsInt32:
case ArrayifyToStructure:
case PutStructure:
- case PhantomPutStructure:
case GetButterfly:
case NewObject:
case NewArray:
+ case NewArrayWithSpread:
+ case Spread:
case NewArrayBuffer:
+ case NewTypedArray:
case GetByOffset:
+ case GetGetterSetterByOffset:
+ case GetGetter:
+ case GetSetter:
case PutByOffset:
case GetGlobalVar:
- case PutGlobalVar:
+ case GetGlobalLexicalVariable:
+ case PutGlobalVariable:
case ValueAdd:
+ case StrCat:
case ArithAdd:
+ case ArithClz32:
case ArithSub:
case ArithMul:
case ArithDiv:
@@ -77,61 +94,213 @@ inline CapabilityLevel canCompile(Node* node)
case ArithMin:
case ArithMax:
case ArithAbs:
+ case ArithSin:
+ case ArithCos:
+ case ArithTan:
+ case ArithPow:
+ case ArithRandom:
+ case ArithRound:
+ case ArithFloor:
+ case ArithCeil:
+ case ArithTrunc:
+ case ArithSqrt:
+ case ArithLog:
+ case ArithFRound:
case ArithNegate:
case UInt32ToNumber:
- case Int32ToDouble:
- case CompareEqConstant:
- case CompareStrictEqConstant:
case Jump:
case ForceOSRExit:
case Phi:
case Upsilon:
case ExtractOSREntryLocal:
case LoopHint:
- case Call:
- case Construct:
- case GetMyScope:
case SkipScope:
- case GetClosureRegisters:
+ case GetGlobalObject:
+ case CreateActivation:
+ case NewFunction:
+ case NewGeneratorFunction:
+ case NewAsyncFunction:
case GetClosureVar:
case PutClosureVar:
- case Int52ToValue:
+ case CreateDirectArguments:
+ case CreateScopedArguments:
+ case CreateClonedArguments:
+ case GetFromArguments:
+ case PutToArguments:
+ case GetArgument:
case InvalidationPoint:
case StringCharAt:
- case CheckFunction:
+ case CheckCell:
+ case CheckBadCell:
+ case CheckNotEmpty:
+ case CheckStringIdent:
+ case CheckWatchdogTimer:
case StringCharCodeAt:
+ case StringFromCharCode:
case AllocatePropertyStorage:
- case FunctionReentryWatchpoint:
- case TypedArrayWatchpoint:
- case VariableWatchpoint:
+ case ReallocatePropertyStorage:
+ case NukeStructureAndSetButterfly:
+ case GetTypedArrayByteOffset:
case NotifyWrite:
case StoreBarrier:
- case ConditionalStoreBarrier:
- case StoreBarrierWithNullCheck:
+ case FencedStoreBarrier:
+ case Call:
+ case DirectCall:
+ case TailCall:
+ case DirectTailCall:
+ case TailCallInlinedCaller:
+ case DirectTailCallInlinedCaller:
+ case Construct:
+ case DirectConstruct:
+ case CallVarargs:
+ case CallEval:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case ConstructVarargs:
+ case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
+ case ConstructForwardVarargs:
+ case LoadVarargs:
case ValueToInt32:
case Branch:
case LogicalNot:
case CheckInBounds:
case ConstantStoragePointer:
case Check:
- // These are OK.
- break;
+ case CountExecution:
+ case GetExecutable:
+ case GetScope:
+ case GetCallee:
+ case GetArgumentCountIncludingThis:
+ case ToNumber:
+ case ToString:
+ case CallObjectConstructor:
+ case CallStringConstructor:
+ case MakeRope:
+ case NewArrayWithSize:
+ case TryGetById:
case GetById:
- case PutById:
- if (node->child1().useKind() == CellUse)
- break;
- return CannotCompile;
+ case GetByIdFlush:
+ case GetByIdWithThis:
+ case ToThis:
+ case MultiGetByOffset:
+ case MultiPutByOffset:
+ case ToPrimitive:
+ case Throw:
+ case ThrowStaticError:
+ case Unreachable:
+ case In:
+ case HasOwnProperty:
+ case IsCellWithType:
+ case MapHash:
+ case GetMapBucket:
+ case LoadFromJSMapBucket:
+ case IsNonEmptyMapBucket:
+ case IsEmpty:
+ case IsUndefined:
+ case IsBoolean:
+ case IsNumber:
+ case IsObject:
+ case IsObjectOrNull:
+ case IsFunction:
+ case IsTypedArrayView:
+ case CheckTypeInfoFlags:
+ case OverridesHasInstance:
+ case InstanceOf:
+ case InstanceOfCustom:
+ case DoubleRep:
+ case ValueRep:
+ case Int52Rep:
+ case DoubleConstant:
+ case Int52Constant:
+ case BooleanToNumber:
+ case HasGenericProperty:
+ case HasStructureProperty:
+ case GetDirectPname:
+ case GetEnumerableLength:
case GetIndexedPropertyStorage:
- if (node->arrayMode().type() == Array::String)
- break;
- if (isTypedView(node->arrayMode().typedArrayType()))
- break;
- return CannotCompile;
+ case GetPropertyEnumerator:
+ case GetEnumeratorStructurePname:
+ case GetEnumeratorGenericPname:
+ case ToIndexString:
+ case BottomValue:
+ case PhantomNewObject:
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction:
+ case PhantomCreateActivation:
+ case PutHint:
+ case CheckStructureImmediate:
+ case MaterializeNewObject:
+ case MaterializeCreateActivation:
+ case PhantomDirectArguments:
+ case PhantomCreateRest:
+ case PhantomSpread:
+ case PhantomNewArrayWithSpread:
+ case PhantomClonedArguments:
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValOutOfBounds:
+ case ForwardVarargs:
+ case Switch:
+ case TypeOf:
+ case PutById:
+ case PutByIdDirect:
+ case PutByIdFlush:
+ case PutByIdWithThis:
+ case PutGetterById:
+ case PutSetterById:
+ case PutGetterSetterById:
+ case PutGetterByVal:
+ case PutSetterByVal:
+ case CreateRest:
+ case GetRestLength:
+ case RegExpExec:
+ case RegExpTest:
+ case NewRegexp:
+ case StringReplace:
+ case StringReplaceRegExp:
+ case GetRegExpObjectLastIndex:
+ case SetRegExpObjectLastIndex:
+ case RecordRegExpCachedResult:
+ case SetFunctionName:
+ case LogShadowChickenPrologue:
+ case LogShadowChickenTail:
+ case ResolveScope:
+ case GetDynamicVar:
+ case PutDynamicVar:
+ case CompareEq:
+ case CompareEqPtr:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareStrictEq:
+ case DefineDataProperty:
+ case DefineAccessorProperty:
+ case ToLowerCase:
+ case NumberToStringWithRadix:
+ case CheckDOM:
+ case CallDOM:
+ case CallDOMGetter:
+ case ArraySlice:
+ case ParseInt:
+ // These are OK.
+ break;
+
+ case Identity:
+ // No backend handles this because it will be optimized out. But we may check
+ // for capabilities before optimization. It would be a deep error to remove this
+ // case because it would prevent us from catching bugs where the FTL backend
+ // pipeline failed to optimize out an Identity.
+ break;
case CheckArray:
switch (node->arrayMode().type()) {
case Array::Int32:
case Array::Double:
case Array::Contiguous:
+ case Array::DirectArguments:
+ case Array::ScopedArguments:
break;
default:
if (isTypedView(node->arrayMode().typedArrayType()))
@@ -141,17 +310,31 @@ inline CapabilityLevel canCompile(Node* node)
break;
case GetArrayLength:
switch (node->arrayMode().type()) {
+ case Array::Undecided:
case Array::Int32:
case Array::Double:
case Array::Contiguous:
case Array::String:
+ case Array::DirectArguments:
+ case Array::ScopedArguments:
break;
default:
- if (isTypedView(node->arrayMode().typedArrayType()))
+ if (node->arrayMode().isSomeTypedArrayView())
break;
return CannotCompile;
}
break;
+ case HasIndexedProperty:
+ switch (node->arrayMode().type()) {
+ case Array::ForceExit:
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ break;
+ default:
+ return CannotCompile;
+ }
+ break;
case GetByVal:
switch (node->arrayMode().type()) {
case Array::ForceExit:
@@ -160,6 +343,9 @@ inline CapabilityLevel canCompile(Node* node)
case Array::Int32:
case Array::Double:
case Array::Contiguous:
+ case Array::Undecided:
+ case Array::DirectArguments:
+ case Array::ScopedArguments:
break;
default:
if (isTypedView(node->arrayMode().typedArrayType()))
@@ -167,6 +353,8 @@ inline CapabilityLevel canCompile(Node* node)
return CannotCompile;
}
break;
+ case GetByValWithThis:
+ break;
case PutByVal:
case PutByValAlias:
case PutByValDirect:
@@ -183,45 +371,14 @@ inline CapabilityLevel canCompile(Node* node)
return CannotCompile;
}
break;
- case CompareEq:
- if (node->isBinaryUseKind(Int32Use))
- break;
- if (node->isBinaryUseKind(MachineIntUse))
- break;
- if (node->isBinaryUseKind(NumberUse))
- break;
- if (node->isBinaryUseKind(ObjectUse))
- break;
- if (node->isBinaryUseKind(UntypedUse))
- break;
- return CannotCompile;
- case CompareStrictEq:
- if (node->isBinaryUseKind(Int32Use))
- break;
- if (node->isBinaryUseKind(MachineIntUse))
- break;
- if (node->isBinaryUseKind(NumberUse))
- break;
- if (node->isBinaryUseKind(ObjectUse))
- break;
- return CannotCompile;
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- if (node->isBinaryUseKind(Int32Use))
- break;
- if (node->isBinaryUseKind(MachineIntUse))
- break;
- if (node->isBinaryUseKind(NumberUse))
- break;
- if (node->isBinaryUseKind(UntypedUse))
- break;
- return CannotCompile;
- case Switch:
- switch (node->switchData()->kind) {
- case SwitchImm:
- case SwitchChar:
+ case PutByValWithThis:
+ break;
+ case ArrayPush:
+ case ArrayPop:
+ switch (node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous:
+ case Array::Double:
break;
default:
return CannotCompile;
@@ -236,9 +393,21 @@ inline CapabilityLevel canCompile(Node* node)
CapabilityLevel canCompile(Graph& graph)
{
+ if (graph.m_codeBlock->instructionCount() > Options::maximumFTLCandidateInstructionCount()) {
+ if (verboseCapabilities())
+ dataLog("FTL rejecting ", *graph.m_codeBlock, " because it's too big.\n");
+ return CannotCompile;
+ }
+
if (graph.m_codeBlock->codeType() != FunctionCode) {
- if (verboseCompilationEnabled())
- dataLog("FTL rejecting code block that doesn't belong to a function.\n");
+ if (verboseCapabilities())
+ dataLog("FTL rejecting ", *graph.m_codeBlock, " because it doesn't belong to a function.\n");
+ return CannotCompile;
+ }
+
+ if (UNLIKELY(graph.m_codeBlock->ownerScriptExecutable()->neverFTLOptimize())) {
+ if (verboseCapabilities())
+ dataLog("FTL rejecting ", *graph.m_codeBlock, " because it is marked as never FTL compile.\n");
return CannotCompile;
}
@@ -264,23 +433,46 @@ CapabilityLevel canCompile(Graph& graph)
case UntypedUse:
case Int32Use:
case KnownInt32Use:
- case MachineIntUse:
+ case Int52RepUse:
case NumberUse:
- case KnownNumberUse:
case RealNumberUse:
+ case DoubleRepUse:
+ case DoubleRepRealUse:
case BooleanUse:
+ case KnownBooleanUse:
case CellUse:
case KnownCellUse:
+ case CellOrOtherUse:
case ObjectUse:
+ case ArrayUse:
+ case FunctionUse:
case ObjectOrOtherUse:
case StringUse:
+ case StringOrOtherUse:
+ case KnownStringUse:
+ case KnownPrimitiveUse:
+ case StringObjectUse:
+ case StringOrStringObjectUse:
+ case SymbolUse:
+ case MapObjectUse:
+ case SetObjectUse:
case FinalObjectUse:
+ case RegExpObjectUse:
+ case ProxyObjectUse:
+ case DerivedArrayUse:
+ case NotCellUse:
+ case OtherUse:
+ case MiscUse:
+ case StringIdentUse:
+ case NotStringVarUse:
+ case AnyIntUse:
+ case DoubleRepAnyIntUse:
// These are OK.
break;
default:
// Don't know how to handle anything else.
- if (verboseCompilationEnabled()) {
- dataLog("FTL rejecting node because of bad use kind: ", edge.useKind(), " in node:\n");
+ if (verboseCapabilities()) {
+ dataLog("FTL rejecting node in ", *graph.m_codeBlock, " because of bad use kind: ", edge.useKind(), " in node:\n");
graph.dump(WTF::dataFile(), " ", node);
}
return CannotCompile;
@@ -289,8 +481,8 @@ CapabilityLevel canCompile(Graph& graph)
switch (canCompile(node)) {
case CannotCompile:
- if (verboseCompilationEnabled()) {
- dataLog("FTL rejecting node:\n");
+ if (verboseCapabilities()) {
+ dataLog("FTL rejecting node in ", *graph.m_codeBlock, ":\n");
graph.dump(WTF::dataFile(), " ", node);
}
return CannotCompile;
diff --git a/Source/JavaScriptCore/ftl/FTLCapabilities.h b/Source/JavaScriptCore/ftl/FTLCapabilities.h
index 75a426df1..ab3a62b9f 100644
--- a/Source/JavaScriptCore/ftl/FTLCapabilities.h
+++ b/Source/JavaScriptCore/ftl/FTLCapabilities.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLCapabilities_h
-#define FTLCapabilities_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -45,6 +42,3 @@ CapabilityLevel canCompile(DFG::Graph&);
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLCapabilities_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLCommonValues.cpp b/Source/JavaScriptCore/ftl/FTLCommonValues.cpp
index 653bb7ddd..5b4925cff 100644
--- a/Source/JavaScriptCore/ftl/FTLCommonValues.cpp
+++ b/Source/JavaScriptCore/ftl/FTLCommonValues.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,44 +26,38 @@
#include "config.h"
#include "FTLCommonValues.h"
+#include "B3BasicBlockInlines.h"
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3Type.h"
+#include "B3ValueInlines.h"
+
#if ENABLE(FTL_JIT)
namespace JSC { namespace FTL {
-CommonValues::CommonValues(LContext context)
- : voidType(FTL::voidType(context))
- , boolean(int1Type(context))
- , int8(int8Type(context))
- , int16(int16Type(context))
- , int32(int32Type(context))
- , int64(int64Type(context))
- , intPtr(intPtrType(context))
- , floatType(FTL::floatType(context))
- , doubleType(FTL::doubleType(context))
- , ref8(pointerType(int8))
- , ref16(pointerType(int16))
- , ref32(pointerType(int32))
- , ref64(pointerType(int64))
- , refPtr(pointerType(intPtr))
- , refFloat(pointerType(floatType))
- , refDouble(pointerType(doubleType))
- , booleanTrue(constInt(boolean, true, ZeroExtend))
- , booleanFalse(constInt(boolean, false, ZeroExtend))
- , int8Zero(constInt(int8, 0, SignExtend))
- , int32Zero(constInt(int32, 0, SignExtend))
- , int32One(constInt(int32, 1, SignExtend))
- , int64Zero(constInt(int64, 0, SignExtend))
- , intPtrZero(constInt(intPtr, 0, SignExtend))
- , intPtrOne(constInt(intPtr, 1, SignExtend))
- , intPtrTwo(constInt(intPtr, 2, SignExtend))
- , intPtrThree(constInt(intPtr, 3, SignExtend))
- , intPtrFour(constInt(intPtr, 4, SignExtend))
- , intPtrEight(constInt(intPtr, 8, SignExtend))
- , intPtrPtr(constInt(intPtr, sizeof(void*), SignExtend))
- , doubleZero(constReal(doubleType, 0))
- , m_context(context)
- , m_module(0)
+using namespace B3;
+
+CommonValues::CommonValues()
+{
+}
+
+void CommonValues::initializeConstants(B3::Procedure& proc, B3::BasicBlock* block)
{
+ int32Zero = block->appendNew<Const32Value>(proc, Origin(), 0);
+ int32One = block->appendNew<Const32Value>(proc, Origin(), 1);
+ booleanTrue = int32One;
+ booleanFalse = int32Zero;
+ int64Zero = block->appendNew<Const64Value>(proc, Origin(), 0);
+ intPtrZero = block->appendNew<ConstPtrValue>(proc, Origin(), 0);
+ intPtrOne = block->appendNew<ConstPtrValue>(proc, Origin(), 1);
+ intPtrTwo = block->appendNew<ConstPtrValue>(proc, Origin(), 2);
+ intPtrThree = block->appendNew<ConstPtrValue>(proc, Origin(), 3);
+ intPtrEight = block->appendNew<ConstPtrValue>(proc, Origin(), 8);
+ doubleZero = block->appendNew<ConstDoubleValue>(proc, Origin(), 0.);
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLCommonValues.h b/Source/JavaScriptCore/ftl/FTLCommonValues.h
index 848090210..c1d6d5a55 100644
--- a/Source/JavaScriptCore/ftl/FTLCommonValues.h
+++ b/Source/JavaScriptCore/ftl/FTLCommonValues.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,64 +23,47 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLCommonValues_h
-#define FTLCommonValues_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
+#include "FTLAbbreviatedTypes.h"
+#include "FTLValueRange.h"
+
+namespace JSC {
+
+namespace B3 {
+class BasicBlock;
+class Procedure;
+}
-namespace JSC { namespace FTL {
+namespace FTL {
class CommonValues {
public:
- CommonValues(LContext context);
+ CommonValues();
+
+ void initializeConstants(B3::Procedure&, B3::BasicBlock*);
- void initialize(LModule module)
- {
- m_module = module;
- }
+ LValue booleanTrue { nullptr };
+ LValue booleanFalse { nullptr };
+ LValue int32Zero { nullptr };
+ LValue int32One { nullptr };
+ LValue int64Zero { nullptr };
+ LValue intPtrZero { nullptr };
+ LValue intPtrOne { nullptr };
+ LValue intPtrTwo { nullptr };
+ LValue intPtrThree { nullptr };
+ LValue intPtrEight { nullptr };
+ LValue doubleZero { nullptr };
- const LType voidType;
- const LType boolean;
- const LType int8;
- const LType int16;
- const LType int32;
- const LType int64;
- const LType intPtr;
- const LType floatType;
- const LType doubleType;
- const LType ref8;
- const LType ref16;
- const LType ref32;
- const LType ref64;
- const LType refPtr;
- const LType refFloat;
- const LType refDouble;
- const LValue booleanTrue;
- const LValue booleanFalse;
- const LValue int8Zero;
- const LValue int32Zero;
- const LValue int32One;
- const LValue int64Zero;
- const LValue intPtrZero;
- const LValue intPtrOne;
- const LValue intPtrTwo;
- const LValue intPtrThree;
- const LValue intPtrFour;
- const LValue intPtrEight;
- const LValue intPtrPtr;
- const LValue doubleZero;
+ const unsigned rangeKind { 0 };
+ const unsigned profKind { 0 };
+ const LValue branchWeights { nullptr };
- LContext const m_context;
- LModule m_module;
+ const ValueRange nonNegativeInt32;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLCommonValues_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLCompile.cpp b/Source/JavaScriptCore/ftl/FTLCompile.cpp
index 6c01f4fa5..e85e28778 100644
--- a/Source/JavaScriptCore/ftl/FTLCompile.cpp
+++ b/Source/JavaScriptCore/ftl/FTLCompile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,409 +28,129 @@
#if ENABLE(FTL_JIT)
+#include "AirCode.h"
+#include "B3Generate.h"
+#include "B3ProcedureInlines.h"
+#include "B3StackSlot.h"
#include "CodeBlockWithJITType.h"
#include "CCallHelpers.h"
#include "DFGCommon.h"
+#include "DFGGraphSafepoint.h"
+#include "DFGOperations.h"
#include "DataView.h"
#include "Disassembler.h"
-#include "FTLExitThunkGenerator.h"
-#include "FTLInlineCacheSize.h"
#include "FTLJITCode.h"
#include "FTLThunks.h"
-#include "JITStubs.h"
-#include "LLVMAPI.h"
+#include "JITSubGenerator.h"
+#include "JSCInlines.h"
#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
+#include "PCToCodeOriginMap.h"
+#include "ScratchRegisterAllocator.h"
namespace JSC { namespace FTL {
using namespace DFG;
-static uint8_t* mmAllocateCodeSection(
- void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
+void compile(State& state, Safepoint::Result& safepointResult)
{
-
- State& state = *static_cast<State*>(opaqueState);
-
- RELEASE_ASSERT(alignment <= jitAllocationGranule);
-
- RefPtr<ExecutableMemoryHandle> result =
- state.graph.m_vm.executableAllocator.allocate(
- state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationMustSucceed);
-
- state.jitCode->addHandle(result);
- state.codeSectionNames.append(sectionName);
-
- return static_cast<uint8_t*>(result->start());
-}
-
-static uint8_t* mmAllocateDataSection(
- void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
- const char* sectionName, LLVMBool isReadOnly)
-{
- UNUSED_PARAM(sectionID);
- UNUSED_PARAM(isReadOnly);
-
- State& state = *static_cast<State*>(opaqueState);
-
- RELEASE_ASSERT(alignment <= sizeof(LSectionWord));
-
- RefCountedArray<LSectionWord> section(
- (size + sizeof(LSectionWord) - 1) / sizeof(LSectionWord));
-
- if (!strcmp(sectionName, "__llvm_stackmaps"))
- state.stackmapsSection = section;
- else {
- state.jitCode->addDataSection(section);
- state.dataSectionNames.append(sectionName);
- }
-
- return bitwise_cast<uint8_t*>(section.data());
-}
+ Graph& graph = state.graph;
+ CodeBlock* codeBlock = graph.m_codeBlock;
+ VM& vm = graph.m_vm;
-static LLVMBool mmApplyPermissions(void*, char**)
-{
- return false;
-}
-
-static void mmDestroy(void*)
-{
-}
+ {
+ GraphSafepoint safepoint(state.graph, safepointResult);
-static void dumpDataSection(RefCountedArray<LSectionWord> section, const char* prefix)
-{
- for (unsigned j = 0; j < section.size(); ++j) {
- char buf[32];
- snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(section.data() + j)));
- dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(section[j]));
+ B3::prepareForGeneration(*state.proc);
}
-}
-template<typename DescriptorType>
-void generateICFastPath(
- State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
- StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
-{
- VM& vm = state.graph.m_vm;
-
- StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
- if (iter == recordMap.end()) {
- // It was optimized out.
+ if (safepointResult.didGetCancelled())
return;
- }
+ RELEASE_ASSERT(!state.graph.m_vm.heap.collectorBelievesThatTheWorldIsStopped());
- StackMaps::Record& record = iter->value;
-
- CCallHelpers fastPathJIT(&vm, codeBlock);
- ic.m_generator.generateFastPath(fastPathJIT);
-
- char* startOfIC =
- bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
-
- LinkBuffer linkBuffer(vm, &fastPathJIT, startOfIC, sizeOfIC);
- // Note: we could handle the !isValid() case. We just don't appear to have a
- // reason to do so, yet.
- RELEASE_ASSERT(linkBuffer.isValid());
-
- MacroAssembler::AssemblerType_T::fillNops(
- startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
+ if (state.allocationFailed)
+ return;
- state.finalizer->sideCodeLinkBuffer->link(
- ic.m_slowPathDone, CodeLocationLabel(startOfIC + sizeOfIC));
-
- linkBuffer.link(
- ic.m_generator.slowPathJump(),
- state.finalizer->sideCodeLinkBuffer->locationOf(ic.m_generator.slowPathBegin()));
-
- ic.m_generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
-}
-
-static void fixFunctionBasedOnStackMaps(
- State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
- StackMaps::RecordMap& recordMap)
-{
- VM& vm = state.graph.m_vm;
- StackMaps stackmaps = jitCode->stackmaps;
-
- ExitThunkGenerator exitThunkGenerator(state);
- exitThunkGenerator.emitThunks();
- if (exitThunkGenerator.didThings()) {
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(
- vm, &exitThunkGenerator, codeBlock, JITCompilationMustSucceed));
-
- ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
-
- for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
- OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
- OSRExit& exit = jitCode->osrExit[i];
-
- if (Options::verboseCompilation())
- dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
-
- StackMaps::RecordMap::iterator iter = recordMap.find(exit.m_stackmapID);
- if (iter == recordMap.end()) {
- // It was optimized out.
- continue;
- }
-
- info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
-
- exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
- }
-
- state.finalizer->exitThunksLinkBuffer = linkBuffer.release();
+ std::unique_ptr<RegisterAtOffsetList> registerOffsets =
+ std::make_unique<RegisterAtOffsetList>(state.proc->calleeSaveRegisters());
+ if (shouldDumpDisassembly()) {
+ dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
+ dataLog(" ", *registerOffsets, "\n");
}
-
- if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty()) {
- CCallHelpers slowPathJIT(&vm, codeBlock);
-
- for (unsigned i = state.getByIds.size(); i--;) {
- GetByIdDescriptor& getById = state.getByIds[i];
-
- if (Options::verboseCompilation())
- dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
-
- StackMaps::RecordMap::iterator iter = recordMap.find(getById.stackmapID());
- if (iter == recordMap.end()) {
- // It was optimized out.
- continue;
- }
-
- StackMaps::Record& record = iter->value;
-
- // FIXME: LLVM should tell us which registers are live.
- RegisterSet usedRegisters = RegisterSet::allRegisters();
-
- GPRReg result = record.locations[0].directGPR();
- GPRReg callFrameRegister = record.locations[1].directGPR();
- GPRReg base = record.locations[2].directGPR();
-
- JITGetByIdGenerator gen(
- codeBlock, getById.codeOrigin(), usedRegisters, callFrameRegister,
- JSValueRegs(base), JSValueRegs(result), false);
-
- MacroAssembler::Label begin = slowPathJIT.label();
-
- MacroAssembler::Call call = callOperation(
- state, usedRegisters, slowPathJIT, operationGetByIdOptimize, result,
- callFrameRegister, gen.stubInfo(), base, getById.uid());
-
- gen.reportSlowPathCall(begin, call);
-
- getById.m_slowPathDone = slowPathJIT.jump();
- getById.m_generator = gen;
- }
-
- for (unsigned i = state.putByIds.size(); i--;) {
- PutByIdDescriptor& putById = state.putByIds[i];
-
- if (Options::verboseCompilation())
- dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
-
- StackMaps::RecordMap::iterator iter = recordMap.find(putById.stackmapID());
- if (iter == recordMap.end()) {
- // It was optimized out.
- continue;
- }
-
- StackMaps::Record& record = iter->value;
-
- // FIXME: LLVM should tell us which registers are live.
- RegisterSet usedRegisters = RegisterSet::allRegisters();
-
- GPRReg callFrameRegister = record.locations[0].directGPR();
- GPRReg base = record.locations[1].directGPR();
- GPRReg value = record.locations[2].directGPR();
-
- JITPutByIdGenerator gen(
- codeBlock, putById.codeOrigin(), usedRegisters, callFrameRegister,
- JSValueRegs(base), JSValueRegs(value), MacroAssembler::scratchRegister,
- false, putById.ecmaMode(), putById.putKind());
-
- MacroAssembler::Label begin = slowPathJIT.label();
-
- MacroAssembler::Call call = callOperation(
- state, usedRegisters, slowPathJIT, gen.slowPathFunction(), callFrameRegister,
- gen.stubInfo(), value, base, putById.uid());
-
- gen.reportSlowPathCall(begin, call);
-
- putById.m_slowPathDone = slowPathJIT.jump();
- putById.m_generator = gen;
- }
-
- state.finalizer->sideCodeLinkBuffer = adoptPtr(
- new LinkBuffer(vm, &slowPathJIT, codeBlock, JITCompilationMustSucceed));
-
- for (unsigned i = state.getByIds.size(); i--;) {
- generateICFastPath(
- state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
- sizeOfGetById());
- }
- for (unsigned i = state.putByIds.size(); i--;) {
- generateICFastPath(
- state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
- sizeOfPutById());
- }
+ state.graph.m_codeBlock->setCalleeSaveRegisters(WTFMove(registerOffsets));
+ ASSERT(!(state.proc->frameSize() % sizeof(EncodedJSValue)));
+ state.jitCode->common.frameRegisterCount = state.proc->frameSize() / sizeof(EncodedJSValue);
+
+ int localsOffset =
+ state.capturedValue->offsetFromFP() / sizeof(EncodedJSValue) + graph.m_nextMachineLocal;
+ if (shouldDumpDisassembly()) {
+ dataLog(
+ "localsOffset = ", localsOffset, " for stack slot: ",
+ pointerDump(state.capturedValue), " at ", RawPointer(state.capturedValue), "\n");
}
- RepatchBuffer repatchBuffer(codeBlock);
-
- for (unsigned exitIndex = jitCode->osrExit.size(); exitIndex--;) {
- OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
- OSRExit& exit = jitCode->osrExit[exitIndex];
- StackMaps::RecordMap::iterator iter = recordMap.find(exit.m_stackmapID);
- if (iter == recordMap.end()) {
- // This could happen if LLVM optimizes out an OSR exit.
- continue;
- }
+ for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
+ InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
- StackMaps::Record& record = iter->value;
+ if (inlineCallFrame->argumentCountRegister.isValid())
+ inlineCallFrame->argumentCountRegister += localsOffset;
- CodeLocationLabel source = CodeLocationLabel(
- bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
-
- if (info.m_isInvalidationPoint) {
- jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
- continue;
+ for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
+ inlineCallFrame->arguments[argument] =
+ inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
}
- repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
- }
-}
+ if (inlineCallFrame->isClosureCall) {
+ inlineCallFrame->calleeRecovery =
+ inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
+ }
-void compile(State& state)
-{
- char* error = 0;
-
- LLVMMCJITCompilerOptions options;
- llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
- options.OptLevel = Options::llvmBackendOptimizationLevel();
- options.NoFramePointerElim = true;
- if (Options::useLLVMSmallCodeModel())
- options.CodeModel = LLVMCodeModelSmall;
- options.EnableFastISel = Options::enableLLVMFastISel();
- options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
- &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
-
- LLVMExecutionEngineRef engine;
-
- if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
- dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
- CRASH();
}
- LLVMPassManagerRef functionPasses = 0;
- LLVMPassManagerRef modulePasses;
-
- if (Options::llvmSimpleOpt()) {
- modulePasses = llvm->CreatePassManager();
- llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
- llvm->AddPromoteMemoryToRegisterPass(modulePasses);
- llvm->AddConstantPropagationPass(modulePasses);
- llvm->AddInstructionCombiningPass(modulePasses);
- llvm->AddBasicAliasAnalysisPass(modulePasses);
- llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
- llvm->AddGVNPass(modulePasses);
- llvm->AddCFGSimplificationPass(modulePasses);
- llvm->RunPassManager(modulePasses, state.module);
- } else {
- LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
- llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
- llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
-
- functionPasses = llvm->CreateFunctionPassManagerForModule(state.module);
- modulePasses = llvm->CreatePassManager();
-
- llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
-
- llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
- llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
-
- llvm->PassManagerBuilderDispose(passBuilder);
-
- llvm->InitializeFunctionPassManager(functionPasses);
- for (LValue function = llvm->GetFirstFunction(state.module); function; function = llvm->GetNextFunction(function))
- llvm->RunFunctionPassManager(functionPasses, function);
- llvm->FinalizeFunctionPassManager(functionPasses);
-
- llvm->RunPassManager(modulePasses, state.module);
+ // Note that the scope register could be invalid here if the original code had CallEval but it
+ // got killed. That's because it takes the CallEval to cause the scope register to be kept alive
+ // unless the debugger is also enabled.
+ if (graph.needsScopeRegister() && codeBlock->scopeRegister().isValid())
+ codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
+
+ for (OSRExitDescriptor& descriptor : state.jitCode->osrExitDescriptors) {
+ for (unsigned i = descriptor.m_values.size(); i--;)
+ descriptor.m_values[i] = descriptor.m_values[i].withLocalsOffset(localsOffset);
+ for (ExitTimeObjectMaterialization* materialization : descriptor.m_materializations)
+ materialization->accountForLocalsOffset(localsOffset);
}
- if (DFG::shouldShowDisassembly() || DFG::verboseCompilationEnabled())
- state.dumpState("after optimization");
-
- // FIXME: Need to add support for the case where JIT memory allocation failed.
- // https://bugs.webkit.org/show_bug.cgi?id=113620
- state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
- if (functionPasses)
- llvm->DisposePassManager(functionPasses);
- llvm->DisposePassManager(modulePasses);
- llvm->DisposeExecutionEngine(engine);
-
- if (shouldShowDisassembly()) {
- for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
- ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
- dataLog(
- "Generated LLVM code for ",
- CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::DFGJIT),
- " #", i, ", ", state.codeSectionNames[i], ":\n");
- disassemble(
- MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
- " ", WTF::dataFile(), LLVMSubset);
- }
-
- for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
- const RefCountedArray<LSectionWord>& section = state.jitCode->dataSections()[i];
- dataLog(
- "Generated LLVM data section for ",
- CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::DFGJIT),
- " #", i, ", ", state.dataSectionNames[i], ":\n");
- dumpDataSection(section, " ");
- }
- }
-
- if (state.stackmapsSection.size()) {
- if (shouldShowDisassembly()) {
- dataLog(
- "Generated LLVM stackmaps section for ",
- CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::DFGJIT), ":\n");
- dataLog(" Raw data:\n");
- dumpDataSection(state.stackmapsSection, " ");
- }
-
- RefPtr<DataView> stackmapsData = DataView::create(
- ArrayBuffer::create(state.stackmapsSection.data(), state.stackmapsSection.byteSize()));
- state.jitCode->stackmaps.parse(stackmapsData.get());
-
- if (shouldShowDisassembly()) {
- dataLog(" Structured data:\n");
- state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), " ");
- }
-
- StackMaps::RecordMap recordMap = state.jitCode->stackmaps.getRecordMap();
- fixFunctionBasedOnStackMaps(
- state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
- recordMap);
-
- if (shouldShowDisassembly()) {
- for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
- if (state.codeSectionNames[i] != "__text")
- continue;
-
- ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
- dataLog(
- "Generated LLVM code after stackmap-based fix-up for ",
- CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::DFGJIT),
- " #", i, ", ", state.codeSectionNames[i], ":\n");
- disassemble(
- MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
- " ", WTF::dataFile(), LLVMSubset);
- }
- }
+ // We will add exception handlers while generating.
+ codeBlock->clearExceptionHandlers();
+
+ CCallHelpers jit(&vm, codeBlock);
+ B3::generate(*state.proc, jit);
+
+ // Emit the exception handler.
+ *state.exceptionHandler = jit.label();
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+ jit.move(MacroAssembler::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ CCallHelpers::Call call = jit.call();
+ jit.jumpToExceptionHandler();
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(call, FunctionPtr(lookupExceptionHandler));
+ });
+
+ state.finalizer->b3CodeLinkBuffer = std::make_unique<LinkBuffer>(
+ vm, jit, codeBlock, JITCompilationCanFail);
+ if (state.finalizer->b3CodeLinkBuffer->didFailToAllocate()) {
+ state.allocationFailed = true;
+ return;
}
- state.module = 0; // We no longer own the module.
+ B3::PCToOriginMap originMap = state.proc->releasePCToOriginMap();
+ if (vm.shouldBuilderPCToCodeOriginMapping())
+ codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(PCToCodeOriginMapBuilder(vm, WTFMove(originMap)), *state.finalizer->b3CodeLinkBuffer));
+
+ state.generatedFunction = bitwise_cast<GeneratedFunction>(
+ state.finalizer->b3CodeLinkBuffer->entrypoint().executableAddress());
+ state.jitCode->initializeB3Byproducts(state.proc->releaseByproducts());
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLCompile.h b/Source/JavaScriptCore/ftl/FTLCompile.h
index c963643ed..c3dd83ae7 100644
--- a/Source/JavaScriptCore/ftl/FTLCompile.h
+++ b/Source/JavaScriptCore/ftl/FTLCompile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,22 +23,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLCompile_h
-#define FTLCompile_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
+#include "DFGSafepoint.h"
#include "FTLState.h"
namespace JSC { namespace FTL {
-void compile(State&);
+void compile(State&, DFG::Safepoint::Result&);
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLCompile_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.cpp b/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.cpp
new file mode 100644
index 000000000..106e2e561
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLDOMJITPatchpointParams.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "AllowMacroScratchRegisterUsage.h"
+#include "FTLSlowPathCall.h"
+#include "FTLState.h"
+
+namespace JSC { namespace FTL {
+
+template<typename OperationType, typename ResultType, typename Arguments, size_t... ArgumentsIndex>
+static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>)
+{
+ CCallHelpers::Label done = jit.label();
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ from.link(&jit);
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...);
+ jit.jump().linkTo(done, &jit);
+ });
+}
+
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) \
+ void DOMJITPatchpointParams::addSlowPathCallImpl(CCallHelpers::JumpList from, CCallHelpers& jit, OperationType operation, ResultType result, std::tuple<__VA_ARGS__> args) \
+ { \
+ dispatch(jit, &m_state, m_params, m_node, m_exceptions, from, operation, result, args, std::make_index_sequence<std::tuple_size<decltype(args)>::value>()); \
+ } \
+
+DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+
+} }
+
+#endif
diff --git a/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h b/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h
new file mode 100644
index 000000000..3387c9086
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLDOMJITPatchpointParams.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "B3StackmapGenerationParams.h"
+#include "DOMJITPatchpointParams.h"
+
+namespace JSC { namespace FTL {
+
+class State;
+
+class DOMJITPatchpointParams : public DOMJIT::PatchpointParams {
+public:
+ DOMJITPatchpointParams(State& state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, Vector<DOMJIT::Value>&& regs, Vector<GPRReg>&& gpScratch, Vector<FPRReg>&& fpScratch)
+ : DOMJIT::PatchpointParams(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch))
+ , m_state(state)
+ , m_params(params)
+ , m_node(node)
+ , m_exceptions(exceptions)
+ {
+ }
+
+private:
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) void addSlowPathCallImpl(CCallHelpers::JumpList, CCallHelpers&, OperationType, ResultType, std::tuple<__VA_ARGS__> args) override;
+ DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+
+ State& m_state;
+ const B3::StackmapGenerationParams& m_params;
+ DFG::Node* m_node;
+ Box<CCallHelpers::JumpList> m_exceptions;
+};
+
+} }
+
+#endif
diff --git a/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp b/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp
new file mode 100644
index 000000000..023e119bf
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLExceptionTarget.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "FTLState.h"
+
+namespace JSC { namespace FTL {
+
+ExceptionTarget::~ExceptionTarget()
+{
+}
+
+CodeLocationLabel ExceptionTarget::label(LinkBuffer& linkBuffer)
+{
+ if (m_isDefaultHandler)
+ return linkBuffer.locationOf(*m_defaultHandler);
+ return linkBuffer.locationOf(m_handle->label);
+}
+
+Box<CCallHelpers::JumpList> ExceptionTarget::jumps(CCallHelpers& jit)
+{
+ Box<CCallHelpers::JumpList> result = Box<CCallHelpers::JumpList>::create();
+ if (m_isDefaultHandler) {
+ Box<CCallHelpers::Label> defaultHandler = m_defaultHandler;
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(*result, linkBuffer.locationOf(*defaultHandler));
+ });
+ } else {
+ RefPtr<OSRExitHandle> handle = m_handle;
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(*result, linkBuffer.locationOf(handle->label));
+ });
+ }
+ return result;
+}
+
+ExceptionTarget::ExceptionTarget(
+ bool isDefaultHandler, Box<CCallHelpers::Label> defaultHandler, RefPtr<OSRExitHandle> handle)
+ : m_isDefaultHandler(isDefaultHandler)
+ , m_defaultHandler(defaultHandler)
+ , m_handle(handle)
+{
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLExceptionTarget.h b/Source/JavaScriptCore/ftl/FTLExceptionTarget.h
new file mode 100644
index 000000000..495377433
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLExceptionTarget.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DFGCommon.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+#include "FTLOSRExitHandle.h"
+#include <wtf/Box.h>
+#include <wtf/ThreadSafeRefCounted.h>
+
+namespace JSC { namespace FTL {
+
+class State;
+
+class ExceptionTarget : public ThreadSafeRefCounted<ExceptionTarget> {
+public:
+ ~ExceptionTarget();
+
+ // It's OK to call this during linking, but not any sooner.
+ CodeLocationLabel label(LinkBuffer&);
+
+ // Or, you can get a JumpList at any time. Anything you add to this JumpList will be linked to
+ // the target's label.
+ Box<CCallHelpers::JumpList> jumps(CCallHelpers&);
+
+private:
+ friend class PatchpointExceptionHandle;
+
+ ExceptionTarget(bool isDefaultHandler, Box<CCallHelpers::Label>, RefPtr<OSRExitHandle>);
+
+ bool m_isDefaultHandler;
+ Box<CCallHelpers::Label> m_defaultHandler;
+ RefPtr<OSRExitHandle> m_handle;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLExitArgument.cpp b/Source/JavaScriptCore/ftl/FTLExitArgument.cpp
index cbe0de7fd..381b3d3a4 100644
--- a/Source/JavaScriptCore/ftl/FTLExitArgument.cpp
+++ b/Source/JavaScriptCore/ftl/FTLExitArgument.cpp
@@ -32,7 +32,7 @@ namespace JSC { namespace FTL {
void ExitArgument::dump(PrintStream& out) const
{
- out.print("arg", argument(), " as ", format());
+ out.print("#", argument(), " as ", format());
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLExitArgument.h b/Source/JavaScriptCore/ftl/FTLExitArgument.h
index ae292c69e..833b3b1eb 100644
--- a/Source/JavaScriptCore/ftl/FTLExitArgument.h
+++ b/Source/JavaScriptCore/ftl/FTLExitArgument.h
@@ -23,20 +23,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLExitArgument_h
-#define FTLExitArgument_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLValueFormat.h"
+#include "DataFormat.h"
#include <wtf/PrintStream.h>
namespace JSC { namespace FTL {
struct ExitArgumentRepresentation {
- ValueFormat format;
+ DataFormat format;
unsigned argument;
};
@@ -44,10 +41,10 @@ class ExitArgument {
public:
ExitArgument()
{
- m_representation.format = InvalidValueFormat;
+ m_representation.format = DataFormatNone;
}
- ExitArgument(ValueFormat format, unsigned argument)
+ ExitArgument(DataFormat format, unsigned argument)
{
m_representation.format = format;
m_representation.argument = argument;
@@ -58,9 +55,9 @@ public:
m_representation = representation;
}
- bool operator!() const { return m_representation.format == InvalidValueFormat; }
+ bool operator!() const { return m_representation.format == DataFormatNone; }
- ValueFormat format() const
+ DataFormat format() const
{
ASSERT(*this);
return m_representation.format;
@@ -72,11 +69,11 @@ public:
return m_representation.argument;
}
- ExitArgument withFormat(ValueFormat format)
+ ExitArgument withFormat(DataFormat format)
{
return ExitArgument(format, argument());
}
-
+
ExitArgumentRepresentation representation() const { return m_representation; }
void dump(PrintStream&) const;
@@ -88,6 +85,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLExitArgument_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp
index 775abe18f..074e7a69e 100644
--- a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp
+++ b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp
@@ -32,7 +32,7 @@ namespace JSC { namespace FTL {
void ExitArgumentForOperand::dump(PrintStream& out) const
{
- out.print(m_exitArgument, " for r", m_operand);
+ out.print(m_exitArgument, " for ", m_operand);
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h
index a5ec737c0..2cad043dd 100644
--- a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h
+++ b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLExitArgumentForOperand_h
-#define FTLExitArgumentForOperand_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -69,6 +66,3 @@ inline bool lesserArgumentIndex(
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLExitArgumentForOperand_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp
new file mode 100644
index 000000000..d57db747a
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLExitPropertyValue.h"
+
+#if ENABLE(FTL_JIT)
+
+namespace JSC { namespace FTL {
+
+ExitPropertyValue ExitPropertyValue::withLocalsOffset(int offset) const
+{
+ return ExitPropertyValue(m_location, m_value.withLocalsOffset(offset));
+}
+
+void ExitPropertyValue::dump(PrintStream& out) const
+{
+ out.print(m_location, " => ", m_value);
+}
+
+void ExitPropertyValue::validateReferences(const TrackedReferences& trackedReferences) const
+{
+ m_value.validateReferences(trackedReferences);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLValueFormat.h b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.h
index b031f0de2..228827ab4 100644
--- a/Source/JavaScriptCore/ftl/FTLValueFormat.h
+++ b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,47 +23,48 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLValueFormat_h
-#define FTLValueFormat_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "GPRInfo.h"
-#include <wtf/PrintStream.h>
+#include "DFGPromotedHeapLocation.h"
+#include "FTLExitValue.h"
namespace JSC {
-class AssemblyHelpers;
+class TrackedReferences;
namespace FTL {
-// Note that this is awkwardly similar to DataFormat in other parts of JSC, except that
-// unlike DataFormat and like ValueRecovery, it distinguishes between UInt32 and Int32.
-
-enum ValueFormat {
- InvalidValueFormat,
- ValueFormatInt32,
- ValueFormatInt52,
- ValueFormatStrictInt52,
- ValueFormatBoolean,
- ValueFormatJSValue,
- ValueFormatDouble
+class ExitPropertyValue {
+public:
+ ExitPropertyValue()
+ {
+ }
+
+ ExitPropertyValue(DFG::PromotedLocationDescriptor location, const ExitValue& value)
+ : m_location(location)
+ , m_value(value)
+ {
+ ASSERT(!!location == !!value);
+ }
+
+ bool operator!() const { return !m_location; }
+
+ DFG::PromotedLocationDescriptor location() const { return m_location; }
+ const ExitValue& value() const { return m_value; }
+
+ ExitPropertyValue withLocalsOffset(int offset) const;
+
+ void dump(PrintStream& out) const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+private:
+ DFG::PromotedLocationDescriptor m_location;
+ ExitValue m_value;
};
-void reboxAccordingToFormat(
- ValueFormat, AssemblyHelpers&, GPRReg value, GPRReg scratch1, GPRReg scratch2);
-
} } // namespace JSC::FTL
-namespace WTF {
-
-void printInternal(PrintStream&, JSC::FTL::ValueFormat);
-
-} // namespace WTF
-
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLValueFormat_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLIntrinsicRepository.cpp b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp
index dc7d57931..858d1babc 100644
--- a/Source/JavaScriptCore/ftl/FTLIntrinsicRepository.cpp
+++ b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,40 +24,57 @@
*/
#include "config.h"
-#include "FTLIntrinsicRepository.h"
+#include "FTLExitTimeObjectMaterialization.h"
#if ENABLE(FTL_JIT)
+#include "DFGGraph.h"
+
namespace JSC { namespace FTL {
-IntrinsicRepository::IntrinsicRepository(LContext context)
- : CommonValues(context)
-#define INTRINSIC_INITIALIZATION(ourName, llvmName, type) , m_##ourName(0)
- FOR_EACH_FTL_INTRINSIC(INTRINSIC_INITIALIZATION)
-#undef INTRINSIC_INITIALIZATION
-#define FUNCTION_TYPE_INITIALIZATION(typeName, type) , m_##typeName(0)
- FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_INITIALIZATION)
-#undef FUNCTION_TYPE_INITIALIZATION
+using namespace JSC::DFG;
+
+ExitTimeObjectMaterialization::ExitTimeObjectMaterialization(NodeType type, CodeOrigin codeOrigin)
+ : m_type(type)
+ , m_origin(codeOrigin)
{
}
-#define INTRINSIC_GETTER_SLOW_DEFINITION(ourName, llvmName, type) \
- LValue IntrinsicRepository::ourName##IntrinsicSlow() \
- { \
- m_##ourName = addExternFunction(m_module, llvmName, type); \
- return m_##ourName; \
- }
-FOR_EACH_FTL_INTRINSIC(INTRINSIC_GETTER_SLOW_DEFINITION)
-#undef INTRINSIC_GETTER
+ExitTimeObjectMaterialization::~ExitTimeObjectMaterialization()
+{
+}
-#define FUNCTION_TYPE_GETTER_SLOW_DEFINITION(typeName, type) \
- LType IntrinsicRepository::typeName##Slow() \
- { \
- m_##typeName = type; \
- return m_##typeName; \
+void ExitTimeObjectMaterialization::add(
+ PromotedLocationDescriptor location, const ExitValue& value)
+{
+ m_properties.append(ExitPropertyValue(location, value));
+}
+
+ExitValue ExitTimeObjectMaterialization::get(PromotedLocationDescriptor location) const
+{
+ for (ExitPropertyValue value : m_properties) {
+ if (value.location() == location)
+ return value.value();
}
-FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_GETTER_SLOW_DEFINITION)
-#undef FUNCTION_TYPE_GETTER_SLOW_DEFINITION
+ return ExitValue();
+}
+
+void ExitTimeObjectMaterialization::accountForLocalsOffset(int offset)
+{
+ for (ExitPropertyValue& property : m_properties)
+ property = property.withLocalsOffset(offset);
+}
+
+void ExitTimeObjectMaterialization::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":", Graph::opName(m_type), "(", listDump(m_properties), ")");
+}
+
+void ExitTimeObjectMaterialization::validateReferences(const TrackedReferences& trackedReferences) const
+{
+ for (ExitPropertyValue value : m_properties)
+ value.validateReferences(trackedReferences);
+}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h
new file mode 100644
index 000000000..549af1142
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGNodeType.h"
+#include "FTLExitPropertyValue.h"
+#include "FTLExitValue.h"
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class TrackedReferences;
+
+namespace FTL {
+
+class ExitTimeObjectMaterialization {
+ WTF_MAKE_NONCOPYABLE(ExitTimeObjectMaterialization)
+public:
+ ExitTimeObjectMaterialization(DFG::NodeType, CodeOrigin);
+ ~ExitTimeObjectMaterialization();
+
+ void add(DFG::PromotedLocationDescriptor, const ExitValue&);
+
+ DFG::NodeType type() const { return m_type; }
+ CodeOrigin origin() const { return m_origin; }
+
+ ExitValue get(DFG::PromotedLocationDescriptor) const;
+ const Vector<ExitPropertyValue>& properties() const { return m_properties; }
+
+ void accountForLocalsOffset(int offset);
+
+ void dump(PrintStream& out) const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+private:
+ DFG::NodeType m_type;
+ CodeOrigin m_origin;
+ Vector<ExitPropertyValue> m_properties;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLExitValue.cpp b/Source/JavaScriptCore/ftl/FTLExitValue.cpp
index a987c60ce..7004b9bdb 100644
--- a/Source/JavaScriptCore/ftl/FTLExitValue.cpp
+++ b/Source/JavaScriptCore/ftl/FTLExitValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +28,61 @@
#if ENABLE(FTL_JIT)
-#include "Operations.h"
+#include "FTLExitTimeObjectMaterialization.h"
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
namespace JSC { namespace FTL {
+ExitValue ExitValue::materializeNewObject(ExitTimeObjectMaterialization* data)
+{
+ ExitValue result;
+ result.m_kind = ExitValueMaterializeNewObject;
+ result.u.newObjectMaterializationData = data;
+ return result;
+}
+
+ExitValue ExitValue::withLocalsOffset(int offset) const
+{
+ if (!isInJSStackSomehow())
+ return *this;
+ if (!virtualRegister().isLocal())
+ return *this;
+ return withVirtualRegister(virtualRegister() + offset);
+}
+
+DataFormat ExitValue::dataFormat() const
+{
+ switch (kind()) {
+ case InvalidExitValue:
+ RELEASE_ASSERT_NOT_REACHED();
+ return DataFormatNone;
+
+ case ExitValueDead:
+ case ExitValueConstant:
+ case ExitValueInJSStack:
+ case ExitValueMaterializeNewObject:
+ return DataFormatJS;
+
+ case ExitValueArgument:
+ return exitArgument().format();
+
+ case ExitValueInJSStackAsInt32:
+ return DataFormatInt32;
+
+ case ExitValueInJSStackAsInt52:
+ return DataFormatInt52;
+
+ case ExitValueInJSStackAsDouble:
+ return DataFormatDouble;
+
+ case ExitValueRecovery:
+ return recoveryFormat();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
void ExitValue::dumpInContext(PrintStream& out, DumpContext* context) const
{
switch (kind()) {
@@ -48,16 +99,22 @@ void ExitValue::dumpInContext(PrintStream& out, DumpContext* context) const
out.print("Constant(", inContext(constant(), context), ")");
return;
case ExitValueInJSStack:
- out.print("InJSStack:r", virtualRegister());
+ out.print("InJSStack:", virtualRegister());
return;
case ExitValueInJSStackAsInt32:
- out.print("InJSStackAsInt32:r", virtualRegister());
+ out.print("InJSStackAsInt32:", virtualRegister());
return;
case ExitValueInJSStackAsInt52:
- out.print("InJSStackAsInt52:r", virtualRegister());
+ out.print("InJSStackAsInt52:", virtualRegister());
return;
case ExitValueInJSStackAsDouble:
- out.print("InJSStackAsDouble:r", virtualRegister());
+ out.print("InJSStackAsDouble:", virtualRegister());
+ return;
+ case ExitValueRecovery:
+ out.print("Recovery(", recoveryOpcode(), ", arg", leftRecoveryArgument(), ", arg", rightRecoveryArgument(), ", ", recoveryFormat(), ")");
+ return;
+ case ExitValueMaterializeNewObject:
+ out.print("Materialize(", WTF::RawPointer(objectMaterialization()), ")");
return;
}
@@ -69,6 +126,12 @@ void ExitValue::dump(PrintStream& out) const
dumpInContext(out, 0);
}
+void ExitValue::validateReferences(const TrackedReferences& trackedReferences) const
+{
+ if (isConstant())
+ trackedReferences.check(constant());
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLExitValue.h b/Source/JavaScriptCore/ftl/FTLExitValue.h
index 7aaaedd8d..c9946b2e7 100644
--- a/Source/JavaScriptCore/ftl/FTLExitValue.h
+++ b/Source/JavaScriptCore/ftl/FTLExitValue.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,19 +23,21 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLExitValue_h
-#define FTLExitValue_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
#include "FTLExitArgument.h"
+#include "FTLRecoveryOpcode.h"
#include "JSCJSValue.h"
#include "VirtualRegister.h"
#include <wtf/PrintStream.h>
-namespace JSC { namespace FTL {
+namespace JSC {
+
+class TrackedReferences;
+
+namespace FTL {
// This is like ValueRecovery, but respects the way that the FTL does OSR
// exit: the live non-constant non-flushed values are passed as arguments
@@ -51,9 +53,13 @@ enum ExitValueKind {
ExitValueInJSStack,
ExitValueInJSStackAsInt32,
ExitValueInJSStackAsInt52,
- ExitValueInJSStackAsDouble
+ ExitValueInJSStackAsDouble,
+ ExitValueRecovery,
+ ExitValueMaterializeNewObject
};
+class ExitTimeObjectMaterialization;
+
class ExitValue {
public:
ExitValue()
@@ -118,6 +124,19 @@ public:
return result;
}
+ static ExitValue recovery(RecoveryOpcode opcode, unsigned leftArgument, unsigned rightArgument, DataFormat format)
+ {
+ ExitValue result;
+ result.m_kind = ExitValueRecovery;
+ result.u.recovery.opcode = opcode;
+ result.u.recovery.leftArgument = leftArgument;
+ result.u.recovery.rightArgument = rightArgument;
+ result.u.recovery.format = format;
+ return result;
+ }
+
+ static ExitValue materializeNewObject(ExitTimeObjectMaterialization*);
+
ExitValueKind kind() const { return m_kind; }
bool isDead() const { return kind() == ExitValueDead; }
@@ -135,6 +154,9 @@ public:
}
bool isConstant() const { return kind() == ExitValueConstant; }
bool isArgument() const { return kind() == ExitValueArgument; }
+ bool isRecovery() const { return kind() == ExitValueRecovery; }
+ bool isObjectMaterialization() const { return kind() == ExitValueMaterializeNewObject; }
+ bool hasIndexInStackmapLocations() const { return isArgument() || isRecovery(); }
ExitArgument exitArgument() const
{
@@ -142,6 +164,42 @@ public:
return ExitArgument(u.argument);
}
+ unsigned leftRecoveryArgument() const
+ {
+ ASSERT(isRecovery());
+ return u.recovery.leftArgument;
+ }
+
+ unsigned rightRecoveryArgument() const
+ {
+ ASSERT(isRecovery());
+ return u.recovery.rightArgument;
+ }
+
+ void adjustStackmapLocationsIndexByOffset(unsigned offset)
+ {
+ ASSERT(hasIndexInStackmapLocations());
+ if (isArgument())
+ u.argument.argument += offset;
+ else {
+ ASSERT(isRecovery());
+ u.recovery.rightArgument += offset;
+ u.recovery.leftArgument += offset;
+ }
+ }
+
+ DataFormat recoveryFormat() const
+ {
+ ASSERT(isRecovery());
+ return static_cast<DataFormat>(u.recovery.format);
+ }
+
+ RecoveryOpcode recoveryOpcode() const
+ {
+ ASSERT(isRecovery());
+ return static_cast<RecoveryOpcode>(u.recovery.opcode);
+ }
+
JSValue constant() const
{
ASSERT(isConstant());
@@ -153,54 +211,51 @@ public:
ASSERT(isInJSStackSomehow());
return VirtualRegister(u.virtualRegister);
}
+
+ ExitTimeObjectMaterialization* objectMaterialization() const
+ {
+ ASSERT(isObjectMaterialization());
+ return u.newObjectMaterializationData;
+ }
+ ExitValue withVirtualRegister(VirtualRegister virtualRegister) const
+ {
+ ASSERT(isInJSStackSomehow());
+ ExitValue result;
+ result.m_kind = m_kind;
+ result.u.virtualRegister = virtualRegister.offset();
+ return result;
+ }
+
+ ExitValue withLocalsOffset(int offset) const;
+
// If it's in the JSStack somehow, this will tell you what format it's in, in a manner
// that is compatible with exitArgument().format(). If it's a constant or it's dead, it
// will claim to be a JSValue. If it's an argument then it will tell you the argument's
// format.
- ValueFormat valueFormat() const
- {
- switch (kind()) {
- case InvalidExitValue:
- RELEASE_ASSERT_NOT_REACHED();
- return InvalidValueFormat;
-
- case ExitValueDead:
- case ExitValueConstant:
- case ExitValueInJSStack:
- return ValueFormatJSValue;
-
- case ExitValueArgument:
- return exitArgument().format();
-
- case ExitValueInJSStackAsInt32:
- return ValueFormatInt32;
-
- case ExitValueInJSStackAsInt52:
- return ValueFormatInt52;
-
- case ExitValueInJSStackAsDouble:
- return ValueFormatDouble;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return InvalidValueFormat;
- }
+ DataFormat dataFormat() const;
void dump(PrintStream&) const;
void dumpInContext(PrintStream&, DumpContext*) const;
+ void validateReferences(const TrackedReferences&) const;
+
private:
ExitValueKind m_kind;
union {
ExitArgumentRepresentation argument;
EncodedJSValue constant;
int virtualRegister;
+ struct {
+ uint16_t leftArgument;
+ uint16_t rightArgument;
+ uint16_t opcode;
+ uint16_t format;
+ } recovery;
+ ExitTimeObjectMaterialization* newObjectMaterializationData;
} u;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLExitValue_h
diff --git a/Source/JavaScriptCore/ftl/FTLFail.cpp b/Source/JavaScriptCore/ftl/FTLFail.cpp
index 1345c6fa1..5c6426a54 100644
--- a/Source/JavaScriptCore/ftl/FTLFail.cpp
+++ b/Source/JavaScriptCore/ftl/FTLFail.cpp
@@ -30,7 +30,6 @@
#include "DFGFailedFinalizer.h"
#include "FTLJITCode.h"
-#include "LLVMAPI.h"
namespace JSC { namespace FTL {
@@ -38,10 +37,7 @@ using namespace DFG;
void fail(State& state)
{
- state.graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(state.graph.m_plan));
-
- if (state.module)
- llvm->DisposeModule(state.module);
+ state.graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(state.graph.m_plan);
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLFail.h b/Source/JavaScriptCore/ftl/FTLFail.h
index 1d6432cf3..35188a437 100644
--- a/Source/JavaScriptCore/ftl/FTLFail.h
+++ b/Source/JavaScriptCore/ftl/FTLFail.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLFail_h
-#define FTLFail_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -39,6 +36,3 @@ void fail(State&);
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLFail_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp
index 708bedc38..3080dc345 100644
--- a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp
+++ b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp
@@ -31,6 +31,8 @@
namespace JSC { namespace FTL {
ForOSREntryJITCode::ForOSREntryJITCode()
+ : m_bytecodeIndex(UINT_MAX)
+ , m_entryFailureCount(0)
{
}
@@ -43,9 +45,9 @@ ForOSREntryJITCode* ForOSREntryJITCode::ftlForOSREntry()
return this;
}
-void ForOSREntryJITCode::initializeEntryBuffer(VM& vm, unsigned numCalleeRegisters)
+void ForOSREntryJITCode::initializeEntryBuffer(VM& vm, unsigned numCalleeLocals)
{
- m_entryBuffer = vm.scratchBufferForSize(numCalleeRegisters * sizeof(EncodedJSValue));
+ m_entryBuffer = vm.scratchBufferForSize(numCalleeLocals * sizeof(EncodedJSValue));
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h
index 8d863a129..06db3dedb 100644
--- a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h
+++ b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLForOSREntryJITCode_h
-#define FTLForOSREntryJITCode_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -48,7 +45,7 @@ public:
ForOSREntryJITCode();
~ForOSREntryJITCode();
- void initializeEntryBuffer(VM&, unsigned numCalleeRegisters);
+ void initializeEntryBuffer(VM&, unsigned numCalleeLocals);
ScratchBuffer* entryBuffer() const { return m_entryBuffer; }
void setBytecodeIndex(unsigned value) { m_bytecodeIndex = value; }
@@ -68,6 +65,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FLT_JIT)
-
-#endif // FTLForOSREntryJITCode_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLFormattedValue.h b/Source/JavaScriptCore/ftl/FTLFormattedValue.h
index b7ab3616c..ee0a0701f 100644
--- a/Source/JavaScriptCore/ftl/FTLFormattedValue.h
+++ b/Source/JavaScriptCore/ftl/FTLFormattedValue.h
@@ -23,34 +23,28 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLFormattedValue_h
-#define FTLFormattedValue_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
-#include "FTLValueFormat.h"
+#include "DataFormat.h"
+#include "FTLAbbreviatedTypes.h"
namespace JSC { namespace FTL {
// This class is mostly used for OSR; it's a way of specifying how a value is formatted
// in cases where it wouldn't have been obvious from looking at other indicators (like
-// the type of the LLVMValueRef or the type of the DFG::Node). Typically this arises
-// because LLVMValueRef doesn't give us the granularity we need to begin with, and we
-// use this in situations where there is no good way to say what node the value came
-// from.
+// the type of the B3::Value* or the type of the DFG::Node).
class FormattedValue {
public:
FormattedValue()
- : m_format(InvalidValueFormat)
+ : m_format(DataFormatNone)
, m_value(0)
{
}
- FormattedValue(ValueFormat format, LValue value)
+ FormattedValue(DataFormat format, LValue value)
: m_format(format)
, m_value(value)
{
@@ -58,27 +52,24 @@ public:
bool operator!() const
{
- ASSERT((m_format == InvalidValueFormat) == !m_value);
- return m_format == InvalidValueFormat;
+ ASSERT((m_format == DataFormatNone) == !m_value);
+ return m_format == DataFormatNone;
}
- ValueFormat format() const { return m_format; }
+ DataFormat format() const { return m_format; }
LValue value() const { return m_value; }
private:
- ValueFormat m_format;
+ DataFormat m_format;
LValue m_value;
};
static inline FormattedValue noValue() { return FormattedValue(); }
-static inline FormattedValue int32Value(LValue value) { return FormattedValue(ValueFormatInt32, value); }
-static inline FormattedValue booleanValue(LValue value) { return FormattedValue(ValueFormatBoolean, value); }
-static inline FormattedValue jsValueValue(LValue value) { return FormattedValue(ValueFormatJSValue, value); }
-static inline FormattedValue doubleValue(LValue value) { return FormattedValue(ValueFormatDouble, value); }
+static inline FormattedValue int32Value(LValue value) { return FormattedValue(DataFormatInt32, value); }
+static inline FormattedValue booleanValue(LValue value) { return FormattedValue(DataFormatBoolean, value); }
+static inline FormattedValue jsValueValue(LValue value) { return FormattedValue(DataFormatJS, value); }
+static inline FormattedValue doubleValue(LValue value) { return FormattedValue(DataFormatDouble, value); }
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLFormattedValue_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h b/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h
index 8226d0ac3..f6fba2891 100644
--- a/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h
+++ b/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h
@@ -23,8 +23,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLGeneratedFunction_h
-#define FTLGeneratedFunction_h
+#pragma once
+
+#if ENABLE(FTL_JIT)
#include "CallFrame.h"
@@ -34,4 +35,4 @@ typedef EncodedJSValue (*GeneratedFunction)(ExecState*);
} } // namespace JSC::FTL
-#endif // FTLGeneratedFunction_h
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h b/Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h
deleted file mode 100644
index 7f560dd95..000000000
--- a/Source/JavaScriptCore/ftl/FTLIntrinsicRepository.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef FTLIntrinsicRepository_h
-#define FTLIntrinsicRepository_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(FTL_JIT)
-
-#include "DFGOperations.h"
-#include "FTLAbbreviations.h"
-#include "FTLCommonValues.h"
-
-namespace JSC { namespace FTL {
-
-#define FOR_EACH_FTL_INTRINSIC(macro) \
- macro(addWithOverflow32, "llvm.sadd.with.overflow.i32", functionType(structType(m_context, int32, boolean), int32, int32)) \
- macro(addWithOverflow64, "llvm.sadd.with.overflow.i64", functionType(structType(m_context, int64, boolean), int64, int64)) \
- macro(doubleAbs, "llvm.fabs.f64", functionType(doubleType, doubleType)) \
- macro(mulWithOverflow32, "llvm.smul.with.overflow.i32", functionType(structType(m_context, int32, boolean), int32, int32)) \
- macro(mulWithOverflow64, "llvm.smul.with.overflow.i64", functionType(structType(m_context, int64, boolean), int64, int64)) \
- macro(patchpointInt64, "llvm.experimental.patchpoint.i64", functionType(int64, int32, int32, ref8, int32, Variadic)) \
- macro(patchpointVoid, "llvm.experimental.patchpoint.void", functionType(voidType, int32, int32, ref8, int32, Variadic)) \
- macro(stackmap, "llvm.experimental.stackmap", functionType(voidType, int32, int32, Variadic)) \
- macro(subWithOverflow32, "llvm.ssub.with.overflow.i32", functionType(structType(m_context, int32, boolean), int32, int32)) \
- macro(subWithOverflow64, "llvm.ssub.with.overflow.i64", functionType(structType(m_context, int64, boolean), int64, int64)) \
- macro(trap, "llvm.trap", functionType(voidType)) \
- macro(x86SSE2CvtTSD2SI, "llvm.x86.sse2.cvttsd2si", functionType(int32, vectorType(doubleType, 2)))
-
-#define FOR_EACH_FUNCTION_TYPE(macro) \
- macro(C_JITOperation_ESt, functionType(intPtr, intPtr, intPtr)) \
- macro(I_JITOperation_EJss, functionType(intPtr, intPtr, intPtr)) \
- macro(J_JITOperation_E, functionType(int64, intPtr)) \
- macro(J_JITOperation_EAZ, functionType(int64, intPtr, intPtr, int32)) \
- macro(J_JITOperation_EJJ, functionType(int64, intPtr, int64, int64)) \
- macro(J_JITOperation_EJssZ, functionType(int64, intPtr, intPtr, int32)) \
- macro(J_JITOperation_ESsiJI, functionType(int64, intPtr, intPtr, int64, intPtr)) \
- macro(Jss_JITOperation_EZ, functionType(intPtr, intPtr, int32)) \
- macro(P_JITOperation_E, functionType(intPtr, intPtr)) \
- macro(P_JITOperation_EC, functionType(intPtr, intPtr, intPtr)) \
- macro(P_JITOperation_EO, functionType(intPtr, intPtr, intPtr)) \
- macro(P_JITOperation_ESt, functionType(intPtr, intPtr, intPtr)) \
- macro(P_JITOperation_EStPS, functionType(intPtr, intPtr, intPtr, intPtr, intPtr)) \
- macro(P_JITOperation_EStSS, functionType(intPtr, intPtr, intPtr, intPtr, intPtr)) \
- macro(P_JITOperation_EStZ, functionType(intPtr, intPtr, intPtr, int32)) \
- macro(S_JITOperation_EJ, functionType(intPtr, intPtr, int64)) \
- macro(S_JITOperation_EJJ, functionType(intPtr, intPtr, int64, int64)) \
- macro(V_JITOperation_EJJJ, functionType(voidType, intPtr, int64, int64, int64)) \
- macro(V_JITOperation_EOZD, functionType(voidType, intPtr, intPtr, int32, doubleType)) \
- macro(V_JITOperation_EOZJ, functionType(voidType, intPtr, intPtr, int32, int64)) \
- macro(V_JITOperation_EC, functionType(voidType, intPtr, intPtr)) \
- macro(V_JITOperation_EVws, functionType(voidType, intPtr, intPtr)) \
- macro(Z_JITOperation_D, functionType(int32, doubleType))
-
-class IntrinsicRepository : public CommonValues {
-public:
- IntrinsicRepository(LContext);
-
-#define INTRINSIC_GETTER(ourName, llvmName, type) \
- LValue ourName##Intrinsic() { \
- if (!m_##ourName) \
- return ourName##IntrinsicSlow(); \
- return m_##ourName; \
- }
- FOR_EACH_FTL_INTRINSIC(INTRINSIC_GETTER)
-#undef INTRINSIC_GETTER
-
-#define FUNCTION_TYPE_GETTER(typeName, type) \
- LType typeName() \
- { \
- if (!m_##typeName) \
- return typeName##Slow(); \
- return m_##typeName; \
- }
- FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_GETTER)
-#undef FUNCTION_TYPE_GETTER
-
-#define FUNCTION_TYPE_RESOLVER(typeName, type) \
- LType operationType(JSC::typeName) \
- { \
- return typeName(); \
- }
- FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_RESOLVER)
-#undef FUNCTION_TYPE_RESOLVER
-
-private:
-#define INTRINSIC_GETTER_SLOW_DECLARATION(ourName, llvmName, type) \
- LValue ourName##IntrinsicSlow();
- FOR_EACH_FTL_INTRINSIC(INTRINSIC_GETTER_SLOW_DECLARATION)
-#undef INTRINSIC_GETTER
-
-#define INTRINSIC_FIELD_DECLARATION(ourName, llvmName, type) LValue m_##ourName;
- FOR_EACH_FTL_INTRINSIC(INTRINSIC_FIELD_DECLARATION)
-#undef INTRINSIC_FIELD_DECLARATION
-
-#define FUNCTION_TYPE_GETTER_SLOW_DECLARATION(typeName, type) \
- LType typeName##Slow();
- FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_GETTER_SLOW_DECLARATION)
-#undef FUNCTION_TYPE_GETTER_SLOW_DECLARATION
-
-#define FUNCTION_TYPE_FIELD_DECLARATION(typeName, type) \
- LType m_##typeName;
- FOR_EACH_FUNCTION_TYPE(FUNCTION_TYPE_FIELD_DECLARATION)
-#undef FUNCTION_TYPE_FIELD_DECLARATION
-};
-
-} } // namespace JSC::FTL
-
-#endif // ENABLE(FTL_JIT)
-
-#endif // FTLIntrinsicRepository_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLJITCode.cpp b/Source/JavaScriptCore/ftl/FTLJITCode.cpp
index cdc7de0c3..1cdb50957 100644
--- a/Source/JavaScriptCore/ftl/FTLJITCode.cpp
+++ b/Source/JavaScriptCore/ftl/FTLJITCode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +28,12 @@
#if ENABLE(FTL_JIT)
+#include "FTLState.h"
+
namespace JSC { namespace FTL {
+using namespace B3;
+
JITCode::JITCode()
: JSC::JITCode(FTLJIT)
{
@@ -37,44 +41,54 @@ JITCode::JITCode()
JITCode::~JITCode()
{
+ if (FTL::shouldDumpDisassembly()) {
+ dataLog("Destroying FTL JIT code at ");
+ CommaPrinter comma;
+ dataLog(comma, m_b3Code);
+ dataLog(comma, m_arityCheckEntrypoint);
+ dataLog("\n");
+ }
}
-void JITCode::initializeExitThunks(CodeRef exitThunks)
+void JITCode::initializeB3Code(CodeRef b3Code)
{
- m_exitThunks = exitThunks;
+ m_b3Code = b3Code;
}
-void JITCode::addHandle(PassRefPtr<ExecutableMemoryHandle> handle)
+void JITCode::initializeB3Byproducts(std::unique_ptr<OpaqueByproducts> byproducts)
{
- m_handles.append(handle);
+ m_b3Byproducts = WTFMove(byproducts);
}
-void JITCode::addDataSection(RefCountedArray<LSectionWord> dataSection)
+void JITCode::initializeAddressForCall(CodePtr address)
{
- m_dataSections.append(dataSection);
+ m_addressForCall = address;
}
-void JITCode::initializeCode(CodeRef entrypoint)
+void JITCode::initializeArityCheckEntrypoint(CodeRef entrypoint)
{
- m_entrypoint = entrypoint;
+ m_arityCheckEntrypoint = entrypoint;
}
-JITCode::CodePtr JITCode::addressForCall()
+JITCode::CodePtr JITCode::addressForCall(ArityCheckMode arityCheck)
{
- RELEASE_ASSERT(m_entrypoint);
- return m_entrypoint.code();
+ switch (arityCheck) {
+ case ArityCheckNotRequired:
+ return m_addressForCall;
+ case MustCheckArity:
+ return m_arityCheckEntrypoint.code();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return CodePtr();
}
void* JITCode::executableAddressAtOffset(size_t offset)
{
- RELEASE_ASSERT(m_entrypoint);
- return reinterpret_cast<char*>(m_entrypoint.code().executableAddress()) + offset;
+ return reinterpret_cast<char*>(m_addressForCall.executableAddress()) + offset;
}
void* JITCode::dataAddressAtOffset(size_t)
{
- RELEASE_ASSERT(m_entrypoint);
-
// We can't patch FTL code, yet. Even if we did, it's not clear that we would do so
// through this API.
RELEASE_ASSERT_NOT_REACHED();
@@ -83,8 +97,6 @@ void* JITCode::dataAddressAtOffset(size_t)
unsigned JITCode::offsetOf(void*)
{
- RELEASE_ASSERT(m_entrypoint);
-
// We currently don't have visibility into the FTL code.
RELEASE_ASSERT_NOT_REACHED();
return 0;
@@ -92,8 +104,6 @@ unsigned JITCode::offsetOf(void*)
size_t JITCode::size()
{
- RELEASE_ASSERT(m_entrypoint);
-
// We don't know the size of FTL code, yet. Make a wild guess. This is mostly used for
// GC load estimates.
return 1000;
@@ -101,18 +111,11 @@ size_t JITCode::size()
bool JITCode::contains(void*)
{
- RELEASE_ASSERT(m_entrypoint);
-
// We have no idea what addresses the FTL code contains, yet.
RELEASE_ASSERT_NOT_REACHED();
return false;
}
-JITCode::CodePtr JITCode::exitThunks()
-{
- return m_exitThunks.code();
-}
-
JITCode* JITCode::ftl()
{
return this;
@@ -123,6 +126,45 @@ DFG::CommonData* JITCode::dfgCommon()
return &common;
}
+void JITCode::validateReferences(const TrackedReferences& trackedReferences)
+{
+ common.validateReferences(trackedReferences);
+
+ for (OSRExit& exit : osrExit)
+ exit.m_descriptor->validateReferences(trackedReferences);
+}
+
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex callSiteIndex)
+{
+ for (OSRExit& exit : osrExit) {
+ if (exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
+ RELEASE_ASSERT(exit.isExceptionHandler());
+ RELEASE_ASSERT(exit.isGenericUnwindHandler());
+ return ValueRep::usedRegisters(exit.m_valueReps);
+ }
+ }
+ return RegisterSet();
+}
+
+std::optional<CodeOrigin> JITCode::findPC(CodeBlock* codeBlock, void* pc)
+{
+ for (OSRExit& exit : osrExit) {
+ if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) {
+ if (handle->start() <= pc && pc < handle->end())
+ return std::optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
+ }
+ }
+
+ for (std::unique_ptr<LazySlowPath>& lazySlowPath : lazySlowPaths) {
+ if (ExecutableMemoryHandle* handle = lazySlowPath->stub().executableMemory()) {
+ if (handle->start() <= pc && pc < handle->end())
+ return std::optional<CodeOrigin>(codeBlock->codeOrigin(lazySlowPath->callSiteIndex()));
+ }
+ }
+
+ return std::nullopt;
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLJITCode.h b/Source/JavaScriptCore/ftl/FTLJITCode.h
index 3e7213983..2c2809e97 100644
--- a/Source/JavaScriptCore/ftl/FTLJITCode.h
+++ b/Source/JavaScriptCore/ftl/FTLJITCode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,63 +23,63 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLJITCode_h
-#define FTLJITCode_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
+#include "B3OpaqueByproducts.h"
#include "DFGCommonData.h"
+#include "FTLLazySlowPath.h"
#include "FTLOSRExit.h"
-#include "FTLStackMaps.h"
#include "JITCode.h"
-#include "LLVMAPI.h"
-#include <wtf/RefCountedArray.h>
-namespace JSC { namespace FTL {
+namespace JSC {
+
+class TrackedReferences;
-typedef int64_t LSectionWord; // We refer to LLVM data sections using LSectionWord*, just to be clear about our intended alignment restrictions.
+namespace FTL {
class JITCode : public JSC::JITCode {
public:
JITCode();
~JITCode();
+
+ CodePtr addressForCall(ArityCheckMode) override;
+ void* executableAddressAtOffset(size_t offset) override;
+ void* dataAddressAtOffset(size_t offset) override;
+ unsigned offsetOf(void* pointerIntoCode) override;
+ size_t size() override;
+ bool contains(void*) override;
+
+ void initializeB3Code(CodeRef);
+ void initializeB3Byproducts(std::unique_ptr<B3::OpaqueByproducts>);
+ void initializeAddressForCall(CodePtr);
+ void initializeArityCheckEntrypoint(CodeRef);
- CodePtr addressForCall();
- void* executableAddressAtOffset(size_t offset);
- void* dataAddressAtOffset(size_t offset);
- unsigned offsetOf(void* pointerIntoCode);
- size_t size();
- bool contains(void*);
-
- void initializeExitThunks(CodeRef);
- void addHandle(PassRefPtr<ExecutableMemoryHandle>);
- void addDataSection(RefCountedArray<LSectionWord>);
- void initializeCode(CodeRef entrypoint);
-
- const Vector<RefPtr<ExecutableMemoryHandle>>& handles() const { return m_handles; }
- const Vector<RefCountedArray<LSectionWord>>& dataSections() const { return m_dataSections; }
-
- CodePtr exitThunks();
+ void validateReferences(const TrackedReferences&) override;
+
+ RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex) override;
+
+ std::optional<CodeOrigin> findPC(CodeBlock*, void* pc) override;
+
+ CodeRef b3Code() const { return m_b3Code; }
- JITCode* ftl();
- DFG::CommonData* dfgCommon();
+ JITCode* ftl() override;
+ DFG::CommonData* dfgCommon() override;
+ static ptrdiff_t commonDataOffset() { return OBJECT_OFFSETOF(JITCode, common); }
DFG::CommonData common;
SegmentedVector<OSRExit, 8> osrExit;
- StackMaps stackmaps;
+ SegmentedVector<OSRExitDescriptor, 8> osrExitDescriptors;
+ Vector<std::unique_ptr<LazySlowPath>> lazySlowPaths;
private:
- Vector<RefCountedArray<LSectionWord>> m_dataSections;
- Vector<RefPtr<ExecutableMemoryHandle>> m_handles;
- CodeRef m_entrypoint;
- CodeRef m_exitThunks;
+ CodePtr m_addressForCall;
+ CodeRef m_b3Code;
+ std::unique_ptr<B3::OpaqueByproducts> m_b3Byproducts;
+ CodeRef m_arityCheckEntrypoint;
};
} } // namespace JSC::FTL
#endif // ENABLE(FLT_JIT)
-
-#endif // FTLJITCode_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
index a8a99b63a..00ea651e1 100644
--- a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
+++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,10 @@
#include "CodeBlockWithJITType.h"
#include "DFGPlan.h"
+#include "FTLState.h"
#include "FTLThunks.h"
+#include "JSCInlines.h"
+#include "ProfilerDatabase.h"
namespace JSC { namespace FTL {
@@ -45,6 +48,19 @@ JITFinalizer::~JITFinalizer()
{
}
+size_t JITFinalizer::codeSize()
+{
+ size_t result = 0;
+
+ if (b3CodeLinkBuffer)
+ result += b3CodeLinkBuffer->size();
+
+ if (entrypointLinkBuffer)
+ result += entrypointLinkBuffer->size();
+
+ return result;
+}
+
bool JITFinalizer::finalize()
{
RELEASE_ASSERT_NOT_REACHED();
@@ -53,64 +69,22 @@ bool JITFinalizer::finalize()
bool JITFinalizer::finalizeFunction()
{
- for (unsigned i = jitCode->handles().size(); i--;) {
- MacroAssembler::cacheFlush(
- jitCode->handles()[i]->start(), jitCode->handles()[i]->sizeInBytes());
- }
-
- if (exitThunksLinkBuffer) {
- StackMaps::RecordMap recordMap = jitCode->stackmaps.getRecordMap();
-
- for (unsigned i = 0; i < osrExit.size(); ++i) {
- OSRExitCompilationInfo& info = osrExit[i];
- OSRExit& exit = jitCode->osrExit[i];
- StackMaps::RecordMap::iterator iter = recordMap.find(exit.m_stackmapID);
- if (iter == recordMap.end()) {
- // It's OK, it was optimized out.
- continue;
- }
-
- exitThunksLinkBuffer->link(
- info.m_thunkJump,
- CodeLocationLabel(
- m_plan.vm.ftlThunks->getOSRExitGenerationThunk(
- m_plan.vm, Location::forStackmaps(
- &jitCode->stackmaps, iter->value.locations[0])).code()));
- }
-
- jitCode->initializeExitThunks(
- FINALIZE_DFG_CODE(
- *exitThunksLinkBuffer,
- ("FTL exit thunks for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data())));
- } // else this function had no OSR exits, so no exit thunks.
-
- if (sideCodeLinkBuffer) {
- // Side code is for special slow paths that we generate ourselves, like for inline
- // caches.
-
- for (unsigned i = slowPathCalls.size(); i--;) {
- SlowPathCall& call = slowPathCalls[i];
- sideCodeLinkBuffer->link(
- call.call(),
- CodeLocationLabel(m_plan.vm.ftlThunks->getSlowPathCallThunk(m_plan.vm, call.key()).code()));
- }
-
- jitCode->addHandle(FINALIZE_DFG_CODE(
- *sideCodeLinkBuffer,
- ("FTL side code for %s",
- toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data()))
- .executableMemory());
- }
+ bool dumpDisassembly = shouldDumpDisassembly() || Options::asyncDisassembly();
- MacroAssemblerCodePtr withArityCheck;
- if (arityCheck.isSet())
- withArityCheck = entrypointLinkBuffer->locationOf(arityCheck);
- jitCode->initializeCode(
- FINALIZE_DFG_CODE(
- *entrypointLinkBuffer,
- ("FTL entrypoint thunk for %s with LLVM generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data(), function)));
+ jitCode->initializeB3Code(
+ FINALIZE_CODE_IF(
+ dumpDisassembly, *b3CodeLinkBuffer,
+ ("FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data())));
+
+ jitCode->initializeArityCheckEntrypoint(
+ FINALIZE_CODE_IF(
+ dumpDisassembly, *entrypointLinkBuffer,
+ ("FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data(), function)));
- m_plan.codeBlock->setJITCode(jitCode, withArityCheck);
+ m_plan.codeBlock->setJITCode(*jitCode);
+
+ if (m_plan.compilation)
+ m_plan.vm->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock, *m_plan.compilation);
return true;
}
diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.h b/Source/JavaScriptCore/ftl/FTLJITFinalizer.h
index a7ecd9486..630c3b44e 100644
--- a/Source/JavaScriptCore/ftl/FTLJITFinalizer.h
+++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.h
@@ -23,38 +23,46 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLJITFinalizer_h
-#define FTLJITFinalizer_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
#include "DFGFinalizer.h"
#include "FTLGeneratedFunction.h"
#include "FTLJITCode.h"
-#include "FTLOSRExitCompilationInfo.h"
#include "FTLSlowPathCall.h"
-#include "LLVMAPI.h"
#include "LinkBuffer.h"
#include "MacroAssembler.h"
namespace JSC { namespace FTL {
+class OutOfLineCodeInfo {
+public:
+ OutOfLineCodeInfo(std::unique_ptr<LinkBuffer> linkBuffer, const char* codeDescription)
+ : m_linkBuffer(WTFMove(linkBuffer))
+ , m_codeDescription(codeDescription)
+ {
+ }
+
+ std::unique_ptr<LinkBuffer> m_linkBuffer;
+ const char* m_codeDescription;
+};
+
class JITFinalizer : public DFG::Finalizer {
public:
JITFinalizer(DFG::Plan&);
virtual ~JITFinalizer();
-
- bool finalize();
- bool finalizeFunction();
- OwnPtr<LinkBuffer> exitThunksLinkBuffer;
- OwnPtr<LinkBuffer> entrypointLinkBuffer;
- OwnPtr<LinkBuffer> sideCodeLinkBuffer;
- Vector<SlowPathCall> slowPathCalls; // Calls inside the side code.
- Vector<OSRExitCompilationInfo> osrExit;
- MacroAssembler::Label arityCheck;
+ size_t codeSize() override;
+ bool finalize() override;
+ bool finalizeFunction() override;
+
+ std::unique_ptr<LinkBuffer> b3CodeLinkBuffer;
+
+ // Eventually, we can get rid of this with B3.
+ std::unique_ptr<LinkBuffer> entrypointLinkBuffer;
+
+ Vector<CCallHelpers::Jump> lazySlowPathGeneratorJumps;
GeneratedFunction function;
RefPtr<JITCode> jitCode;
};
@@ -62,6 +70,3 @@ public:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLJITFinalizer_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
new file mode 100644
index 000000000..9bdc06da8
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLLazySlowPath.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "FTLSlowPathCall.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace FTL {
+
+LazySlowPath::LazySlowPath(
+ CodeLocationJump patchableJump, CodeLocationLabel done,
+ CodeLocationLabel exceptionTarget,
+ const RegisterSet& usedRegisters, CallSiteIndex callSiteIndex, RefPtr<Generator> generator
+ )
+ : m_patchableJump(patchableJump)
+ , m_done(done)
+ , m_exceptionTarget(exceptionTarget)
+ , m_usedRegisters(usedRegisters)
+ , m_callSiteIndex(callSiteIndex)
+ , m_generator(generator)
+{
+}
+
+LazySlowPath::~LazySlowPath()
+{
+}
+
+void LazySlowPath::generate(CodeBlock* codeBlock)
+{
+ RELEASE_ASSERT(!m_stub);
+
+ VM& vm = *codeBlock->vm();
+
+ CCallHelpers jit(&vm, codeBlock);
+ GenerationParams params;
+ CCallHelpers::JumpList exceptionJumps;
+ params.exceptionJumps = m_exceptionTarget ? &exceptionJumps : nullptr;
+ params.lazySlowPath = this;
+
+ m_generator->run(jit, params);
+
+ LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationMustSucceed);
+ linkBuffer.link(params.doneJumps, m_done);
+ if (m_exceptionTarget)
+ linkBuffer.link(exceptionJumps, m_exceptionTarget);
+ m_stub = FINALIZE_CODE_FOR(codeBlock, linkBuffer, ("Lazy slow path call stub"));
+
+ MacroAssembler::repatchJump(m_patchableJump, CodeLocationLabel(m_stub.code()));
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPath.h b/Source/JavaScriptCore/ftl/FTLLazySlowPath.h
new file mode 100644
index 000000000..83b5baf0a
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLazySlowPath.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "CodeLocation.h"
+#include "GPRInfo.h"
+#include "MacroAssemblerCodeRef.h"
+#include "RegisterSet.h"
+#include "ScratchRegisterAllocator.h"
+#include <wtf/SharedTask.h>
+
+namespace JSC { namespace FTL {
+
+// A LazySlowPath is an object that represents a piece of code that is part of FTL generated code
+// that will be generated lazily. It holds all of the important information needed to generate that
+// code, such as where to link jumps to and which registers are in use. It also has a reference to a
+// SharedTask that will do the actual code generation. That SharedTask may have additional data, like
+// which registers hold the inputs or outputs.
+class LazySlowPath {
+ WTF_MAKE_NONCOPYABLE(LazySlowPath);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ struct GenerationParams {
+ // Extra parameters to the GeneratorFunction are made into fields of this struct, so that if
+ // we add new parameters, we don't have to change all of the users.
+ CCallHelpers::JumpList doneJumps;
+ CCallHelpers::JumpList* exceptionJumps;
+ LazySlowPath* lazySlowPath;
+ };
+
+ typedef void GeneratorFunction(CCallHelpers&, GenerationParams&);
+ typedef SharedTask<GeneratorFunction> Generator;
+
+ template<typename Functor>
+ static RefPtr<Generator> createGenerator(const Functor& functor)
+ {
+ return createSharedTask<GeneratorFunction>(functor);
+ }
+
+ LazySlowPath(
+ CodeLocationJump patchableJump, CodeLocationLabel done,
+ CodeLocationLabel exceptionTarget, const RegisterSet& usedRegisters,
+ CallSiteIndex, RefPtr<Generator>
+ );
+
+ ~LazySlowPath();
+
+ CodeLocationJump patchableJump() const { return m_patchableJump; }
+ CodeLocationLabel done() const { return m_done; }
+ const RegisterSet& usedRegisters() const { return m_usedRegisters; }
+ CallSiteIndex callSiteIndex() const { return m_callSiteIndex; }
+
+ void generate(CodeBlock*);
+
+ MacroAssemblerCodeRef stub() const { return m_stub; }
+
+private:
+ CodeLocationJump m_patchableJump;
+ CodeLocationLabel m_done;
+ CodeLocationLabel m_exceptionTarget;
+ RegisterSet m_usedRegisters;
+ CallSiteIndex m_callSiteIndex;
+ MacroAssemblerCodeRef m_stub;
+ RefPtr<Generator> m_generator;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h b/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h
new file mode 100644
index 000000000..29214ac39
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeLocation.h"
+#include "FTLLazySlowPath.h"
+#include "FTLSlowPathCall.h"
+#include "FTLThunks.h"
+#include "GPRInfo.h"
+#include "MacroAssemblerCodeRef.h"
+#include "RegisterSet.h"
+
+namespace JSC { namespace FTL {
+
+template<typename ResultType, typename... ArgumentTypes>
+RefPtr<LazySlowPath::Generator> createLazyCallGenerator(
+ FunctionPtr function, ResultType result, ArgumentTypes... arguments)
+{
+ return LazySlowPath::createGenerator(
+ [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
+ callOperation(
+ params.lazySlowPath->usedRegisters(), jit, params.lazySlowPath->callSiteIndex(),
+ params.exceptionJumps, function, result, arguments...);
+ params.doneJumps.append(jit.jump());
+ });
+}
+
+} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLLink.cpp b/Source/JavaScriptCore/ftl/FTLLink.cpp
index 85050d151..d11b2a9b6 100644
--- a/Source/JavaScriptCore/ftl/FTLLink.cpp
+++ b/Source/JavaScriptCore/ftl/FTLLink.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,129 +29,149 @@
#if ENABLE(FTL_JIT)
#include "CCallHelpers.h"
-#include "CallFrameInlines.h"
#include "CodeBlockWithJITType.h"
#include "DFGCommon.h"
#include "FTLJITCode.h"
#include "JITOperations.h"
-#include "JITStubs.h"
-#include "LLVMAPI.h"
#include "LinkBuffer.h"
+#include "JSCInlines.h"
+#include "ProfilerCompilation.h"
#include "VirtualRegister.h"
namespace JSC { namespace FTL {
using namespace DFG;
-static void compileEntry(CCallHelpers& jit)
-{
- jit.preserveReturnAddressAfterCall(GPRInfo::regT2);
- jit.emitPutReturnPCToCallFrameHeader(GPRInfo::regT2);
- jit.emitPutImmediateToCallFrameHeader(jit.codeBlock(), JSStack::CodeBlock);
-}
-
void link(State& state)
{
- CodeBlock* codeBlock = state.graph.m_codeBlock;
+ Graph& graph = state.graph;
+ CodeBlock* codeBlock = graph.m_codeBlock;
+ VM& vm = graph.m_vm;
- // LLVM will create its own jump tables as needed.
+ // B3 will create its own jump tables as needed.
codeBlock->clearSwitchJumpTables();
+
+ state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit();
- state.jitCode->common.frameRegisterCount = state.graph.frameRegisterCount();
- state.jitCode->common.requiredRegisterCountForExit = state.graph.requiredRegisterCountForExit();
-
- if (!state.graph.m_inlineCallFrames->isEmpty())
- state.jitCode->common.inlineCallFrames = std::move(state.graph.m_inlineCallFrames);
+ if (!graph.m_plan.inlineCallFrames->isEmpty())
+ state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames;
+ graph.registerFrozenValues();
+
// Create the entrypoint. Note that we use this entrypoint totally differently
// depending on whether we're doing OSR entry or not.
- // FIXME: Except for OSR entry, this is a total kludge - LLVM should just use our
- // calling convention.
- // https://bugs.webkit.org/show_bug.cgi?id=113621
- CCallHelpers jit(&state.graph.m_vm, codeBlock);
+ CCallHelpers jit(&vm, codeBlock);
- OwnPtr<LinkBuffer> linkBuffer;
- CCallHelpers::Label arityCheck;
- switch (state.graph.m_plan.mode) {
- case FTLMode: {
- compileEntry(jit);
+ std::unique_ptr<LinkBuffer> linkBuffer;
+
+ CCallHelpers::Address frame = CCallHelpers::Address(
+ CCallHelpers::stackPointerRegister, -static_cast<int32_t>(AssemblyHelpers::prologueStackPointerDelta()));
- // This part is only necessary for functions. We currently only compile functions.
+ if (Profiler::Compilation* compilation = graph.compilation()) {
+ compilation->addDescription(
+ Profiler::OriginStack(),
+ toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n"));
- CCallHelpers::Label fromArityCheck = jit.label();
+ graph.ensureDominators();
+ graph.ensureNaturalLoops();
- // Plant a check that sufficient space is available in the JSStack.
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
- jit.addPtr(
- CCallHelpers::TrustedImm32(virtualRegisterForLocal(state.jitCode->common.requiredRegisterCountForExit).offset() * sizeof(Register)),
- GPRInfo::callFrameRegister, GPRInfo::regT1);
- CCallHelpers::Jump stackCheck = jit.branchPtr(
- CCallHelpers::Above,
- CCallHelpers::AbsoluteAddress(state.graph.m_vm.addressOfJSStackLimit()),
- GPRInfo::regT1);
- CCallHelpers::Label fromStackCheck = jit.label();
+ const char* prefix = " ";
- jit.setupArgumentsExecState();
- jit.move(
- CCallHelpers::TrustedImmPtr(reinterpret_cast<void*>(state.generatedFunction)),
- GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
- jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT1);
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
- jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
- jit.ret();
+ DumpContext dumpContext;
+ StringPrintStream out;
+ Node* lastNode = 0;
+ for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
+ BasicBlock* block = graph.block(blockIndex);
+ if (!block)
+ continue;
+
+ graph.dumpBlockHeader(out, prefix, block, Graph::DumpLivePhisOnly, &dumpContext);
+ compilation->addDescription(Profiler::OriginStack(), out.toCString());
+ out.reset();
+
+ for (size_t nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
+ Node* node = block->at(nodeIndex);
+
+ Profiler::OriginStack stack;
+
+ if (node->origin.semantic.isSet()) {
+ stack = Profiler::OriginStack(
+ *vm.m_perBytecodeProfiler, codeBlock, node->origin.semantic);
+ }
+
+ if (graph.dumpCodeOrigin(out, prefix, lastNode, node, &dumpContext)) {
+ compilation->addDescription(stack, out.toCString());
+ out.reset();
+ }
+
+ graph.dump(out, prefix, node, &dumpContext);
+ compilation->addDescription(stack, out.toCString());
+ out.reset();
+
+ if (node->origin.semantic.isSet())
+ lastNode = node;
+ }
+ }
- stackCheck.link(&jit);
- jit.move(CCallHelpers::TrustedImmPtr(codeBlock), GPRInfo::argumentGPR1);
- jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- jit.store32(
- CCallHelpers::TrustedImm32(CallFrame::Location::encodeAsBytecodeOffset(0)),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
- jit.storePtr(GPRInfo::callFrameRegister, &state.graph.m_vm.topCallFrame);
- CCallHelpers::Call callStackCheck = jit.call();
-#if !ASSERT_DISABLED
- // FIXME: need to make this call register with exception handling somehow. This is
- // part of a bigger problem: FTL should be able to handle exceptions.
- // https://bugs.webkit.org/show_bug.cgi?id=113622
- // Until then, use a JIT ASSERT.
- jit.load64(state.graph.m_vm.addressOfException(), GPRInfo::regT0);
- jit.jitAssertIsNull(GPRInfo::regT0);
-#endif
- jit.jump(fromStackCheck);
+ dumpContext.dump(out, prefix);
+ compilation->addDescription(Profiler::OriginStack(), out.toCString());
+ out.reset();
+
+ out.print(" Disassembly:\n");
+ out.print(" <not implemented yet>\n");
+ compilation->addDescription(Profiler::OriginStack(), out.toCString());
+ out.reset();
- arityCheck = jit.label();
- compileEntry(jit);
+ state.jitCode->common.compilation = compilation;
+ }
+
+ switch (graph.m_plan.mode) {
+ case FTLMode: {
+ CCallHelpers::JumpList mainPathJumps;
+
jit.load32(
- CCallHelpers::payloadFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)),
+ frame.withOffset(sizeof(Register) * CallFrameSlot::argumentCount),
GPRInfo::regT1);
- jit.branch32(
+ mainPathJumps.append(jit.branch32(
CCallHelpers::AboveOrEqual, GPRInfo::regT1,
- CCallHelpers::TrustedImm32(codeBlock->numParameters()))
- .linkTo(fromArityCheck, &jit);
+ CCallHelpers::TrustedImm32(codeBlock->numParameters())));
+ jit.emitFunctionPrologue();
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- jit.store32(
- CCallHelpers::TrustedImm32(CallFrame::Location::encodeAsBytecodeOffset(0)),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
- jit.storePtr(GPRInfo::callFrameRegister, &state.graph.m_vm.topCallFrame);
+ jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
CCallHelpers::Call callArityCheck = jit.call();
-#if !ASSERT_DISABLED
- // FIXME: need to make this call register with exception handling somehow. This is
- // part of a bigger problem: FTL should be able to handle exceptions.
- // https://bugs.webkit.org/show_bug.cgi?id=113622
- // Until then, use a JIT ASSERT.
- jit.load64(state.graph.m_vm.addressOfException(), GPRInfo::regT1);
- jit.jitAssertIsNull(GPRInfo::regT1);
-#endif
- if (GPRInfo::returnValueGPR != GPRInfo::regT0)
- jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
- jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT0).linkTo(fromArityCheck, &jit);
+
+ auto noException = jit.branch32(CCallHelpers::GreaterThanOrEqual, GPRInfo::returnValueGPR, CCallHelpers::TrustedImm32(0));
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+ jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ CCallHelpers::Call callLookupExceptionHandlerFromCallerFrame = jit.call();
+ jit.jumpToExceptionHandler();
+ noException.link(&jit);
+
+ if (!ASSERT_DISABLED) {
+ jit.load64(vm.addressOfException(), GPRInfo::regT1);
+ jit.jitAssertIsNull(GPRInfo::regT1);
+ }
+
+ jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
+ jit.emitFunctionEpilogue();
+ mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0));
+ jit.emitFunctionPrologue();
CCallHelpers::Call callArityFixup = jit.call();
- jit.jump(fromArityCheck);
-
- linkBuffer = adoptPtr(new LinkBuffer(state.graph.m_vm, &jit, codeBlock, JITCompilationMustSucceed));
- linkBuffer->link(callStackCheck, operationStackCheck);
+ jit.emitFunctionEpilogue();
+ mainPathJumps.append(jit.jump());
+
+ linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ state.allocationFailed = true;
+ return;
+ }
linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
- linkBuffer->link(callArityFixup, FunctionPtr((state.graph.m_vm.getCTIStub(arityFixup)).code().executableAddress()));
+ linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, lookupExceptionHandlerFromCallerFrame);
+ linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixupGenerator)).code().executableAddress()));
+ linkBuffer->link(mainPathJumps, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));
+
+ state.jitCode->initializeAddressForCall(MacroAssemblerCodePtr(bitwise_cast<void*>(state.generatedFunction)));
break;
}
@@ -159,19 +179,19 @@ void link(State& state)
// We jump to here straight from DFG code, after having boxed up all of the
// values into the scratch buffer. Everything should be good to go - at this
// point we've even done the stack check. Basically we just have to make the
- // call to the LLVM-generated code.
- jit.setupArgumentsExecState();
- jit.move(
- CCallHelpers::TrustedImmPtr(reinterpret_cast<void*>(state.generatedFunction)),
- GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
- jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::regT1);
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
- jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
- jit.ret();
+ // call to the B3-generated code.
+ CCallHelpers::Label start = jit.label();
+ jit.emitFunctionEpilogue();
+ CCallHelpers::Jump mainPathJump = jit.jump();
- linkBuffer = adoptPtr(new LinkBuffer(
- state.graph.m_vm, &jit, codeBlock, JITCompilationMustSucceed));
+ linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
+ if (linkBuffer->didFailToAllocate()) {
+ state.allocationFailed = true;
+ return;
+ }
+ linkBuffer->link(mainPathJump, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));
+
+ state.jitCode->initializeAddressForCall(linkBuffer->locationOf(start));
break;
}
@@ -180,9 +200,8 @@ void link(State& state)
break;
}
- state.finalizer->entrypointLinkBuffer = linkBuffer.release();
+ state.finalizer->entrypointLinkBuffer = WTFMove(linkBuffer);
state.finalizer->function = state.generatedFunction;
- state.finalizer->arityCheck = arityCheck;
state.finalizer->jitCode = state.jitCode;
}
diff --git a/Source/JavaScriptCore/ftl/FTLLink.h b/Source/JavaScriptCore/ftl/FTLLink.h
index d49e169fc..d3805737c 100644
--- a/Source/JavaScriptCore/ftl/FTLLink.h
+++ b/Source/JavaScriptCore/ftl/FTLLink.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLLink_h
-#define FTLLink_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -39,6 +36,3 @@ void link(State&);
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLLink_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLLocation.cpp b/Source/JavaScriptCore/ftl/FTLLocation.cpp
new file mode 100644
index 000000000..ac8f23ef8
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLocation.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLLocation.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "B3ValueRep.h"
+#include "FTLSaveRestore.h"
+#include "RegisterSet.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/DataLog.h>
+#include <wtf/ListDump.h>
+
+namespace JSC { namespace FTL {
+
+using namespace B3;
+
+Location Location::forValueRep(const ValueRep& rep)
+{
+ switch (rep.kind()) {
+ case ValueRep::Register:
+ return forRegister(rep.reg(), 0);
+ case ValueRep::Stack:
+ return forIndirect(GPRInfo::callFrameRegister, rep.offsetFromFP());
+ case ValueRep::Constant:
+ return forConstant(rep.value());
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Location();
+ }
+}
+
+void Location::dump(PrintStream& out) const
+{
+ out.print("(", kind());
+ if (hasReg())
+ out.print(", ", reg());
+ if (hasOffset())
+ out.print(", ", offset());
+ if (hasAddend())
+ out.print(", ", addend());
+ if (hasConstant())
+ out.print(", ", constant());
+ out.print(")");
+}
+
+bool Location::involvesGPR() const
+{
+ return isGPR() || kind() == Indirect;
+}
+
+bool Location::isGPR() const
+{
+ return kind() == Register && reg().isGPR();
+}
+
+GPRReg Location::gpr() const
+{
+ return reg().gpr();
+}
+
+bool Location::isFPR() const
+{
+ return kind() == Register && reg().isFPR();
+}
+
+FPRReg Location::fpr() const
+{
+ return reg().fpr();
+}
+
+void Location::restoreInto(MacroAssembler& jit, char* savedRegisters, GPRReg result, unsigned numFramesToPop) const
+{
+ if (involvesGPR() && RegisterSet::stackRegisters().get(gpr())) {
+ // Make the result GPR contain the appropriate stack register.
+ if (numFramesToPop) {
+ jit.move(MacroAssembler::framePointerRegister, result);
+
+ for (unsigned i = numFramesToPop - 1; i--;)
+ jit.loadPtr(result, result);
+
+ if (gpr() == MacroAssembler::framePointerRegister)
+ jit.loadPtr(result, result);
+ else
+ jit.addPtr(MacroAssembler::TrustedImmPtr(sizeof(void*) * 2), result);
+ } else
+ jit.move(gpr(), result);
+ }
+
+ if (isGPR()) {
+ if (RegisterSet::stackRegisters().get(gpr())) {
+ // Already restored into result.
+ } else
+ jit.load64(savedRegisters + offsetOfGPR(gpr()), result);
+
+ if (addend())
+ jit.add64(MacroAssembler::TrustedImm32(addend()), result);
+ return;
+ }
+
+ if (isFPR()) {
+ jit.load64(savedRegisters + offsetOfFPR(fpr()), result);
+ ASSERT(!addend());
+ return;
+ }
+
+ switch (kind()) {
+ case Register:
+ // B3 used some register that we don't know about!
+ dataLog("Unrecognized location: ", *this, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+
+ case Indirect:
+ if (RegisterSet::stackRegisters().get(gpr())) {
+ // The stack register is already recovered into result.
+ jit.load64(MacroAssembler::Address(result, offset()), result);
+ return;
+ }
+
+ jit.load64(savedRegisters + offsetOfGPR(gpr()), result);
+ jit.load64(MacroAssembler::Address(result, offset()), result);
+ return;
+
+ case Constant:
+ jit.move(MacroAssembler::TrustedImm64(constant()), result);
+ return;
+
+ case Unprocessed:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+GPRReg Location::directGPR() const
+{
+ RELEASE_ASSERT(!addend());
+ return gpr();
+}
+
+} } // namespace JSC::FTL
+
+namespace WTF {
+
+using namespace JSC::FTL;
+
+void printInternal(PrintStream& out, JSC::FTL::Location::Kind kind)
+{
+ switch (kind) {
+ case Location::Unprocessed:
+ out.print("Unprocessed");
+ return;
+ case Location::Register:
+ out.print("Register");
+ return;
+ case Location::Indirect:
+ out.print("Indirect");
+ return;
+ case Location::Constant:
+ out.print("Constant");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLLocation.h b/Source/JavaScriptCore/ftl/FTLLocation.h
new file mode 100644
index 000000000..46772b9f5
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLocation.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGCommon.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "Reg.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+namespace B3 {
+class ValueRep;
+} // namespace B3
+
+namespace FTL {
+
+class Location {
+public:
+ enum Kind {
+ Unprocessed,
+ Register,
+ Indirect,
+ Constant
+ };
+
+ Location()
+ : m_kind(Unprocessed)
+ {
+ u.constant = 0;
+ }
+
+ Location(WTF::HashTableDeletedValueType)
+ : m_kind(Unprocessed)
+ {
+ u.constant = 1;
+ }
+
+ static Location forRegister(Reg reg, int32_t addend)
+ {
+ Location result;
+ result.m_kind = Register;
+ result.u.variable.regIndex = reg.index();
+ result.u.variable.offset = addend;
+ return result;
+ }
+
+ static Location forIndirect(Reg reg, int32_t offset)
+ {
+ Location result;
+ result.m_kind = Indirect;
+ result.u.variable.regIndex = reg.index();
+ result.u.variable.offset = offset;
+ return result;
+ }
+
+ static Location forConstant(int64_t constant)
+ {
+ Location result;
+ result.m_kind = Constant;
+ result.u.constant = constant;
+ return result;
+ }
+
+ static Location forValueRep(const B3::ValueRep&);
+
+ Kind kind() const { return m_kind; }
+
+ bool hasReg() const { return kind() == Register || kind() == Indirect; }
+ Reg reg() const
+ {
+ ASSERT(hasReg());
+ return Reg::fromIndex(u.variable.regIndex);
+ }
+
+ bool hasOffset() const { return kind() == Indirect; }
+ int32_t offset() const
+ {
+ ASSERT(hasOffset());
+ return u.variable.offset;
+ }
+
+ bool hasAddend() const { return kind() == Register; }
+ int32_t addend() const
+ {
+ ASSERT(hasAddend());
+ return u.variable.offset;
+ }
+
+ bool hasConstant() const { return kind() == Constant; }
+ int64_t constant() const
+ {
+ ASSERT(hasConstant());
+ return u.constant;
+ }
+
+ explicit operator bool() const { return kind() != Unprocessed || u.variable.offset; }
+
+ bool operator!() const { return !static_cast<bool>(*this); }
+
+ bool isHashTableDeletedValue() const { return kind() == Unprocessed && u.variable.offset; }
+
+ bool operator==(const Location& other) const
+ {
+ return m_kind == other.m_kind
+ && u.constant == other.u.constant;
+ }
+
+ unsigned hash() const
+ {
+ unsigned result = m_kind;
+
+ switch (kind()) {
+ case Unprocessed:
+ result ^= u.variable.offset;
+ break;
+
+ case Register:
+ result ^= u.variable.regIndex;
+ break;
+
+ case Indirect:
+ result ^= u.variable.regIndex;
+ result ^= u.variable.offset;
+ break;
+
+ case Constant:
+ result ^= WTF::IntHash<int64_t>::hash(u.constant);
+ break;
+ }
+
+ return WTF::IntHash<unsigned>::hash(result);
+ }
+
+ void dump(PrintStream&) const;
+
+ bool isGPR() const;
+ bool involvesGPR() const;
+ GPRReg gpr() const;
+ GPRReg directGPR() const; // Get the GPR and assert that there is no addend.
+
+ bool isFPR() const;
+ FPRReg fpr() const;
+
+ // Assuming that all registers are saved to the savedRegisters buffer according
+ // to FTLSaveRestore convention, this loads the value into the given register.
+ // The code that this generates isn't exactly super fast. This assumes that FP
+ // and SP contain the same values that they would have contained in the original
+ // frame, or that you've done one or more canonically formed calls (i.e. can
+ // restore the FP by following the call frame linked list numFramesToPop times,
+ // and SP can be recovered by popping FP numFramesToPop-1 times and adding 16).
+ void restoreInto(MacroAssembler&, char* savedRegisters, GPRReg result, unsigned numFramesToPop = 0) const;
+
+private:
+ Kind m_kind;
+ union {
+ int64_t constant;
+ struct {
+ unsigned regIndex;
+ int32_t offset;
+ } variable;
+ } u;
+};
+
+struct LocationHash {
+ static unsigned hash(const Location& key) { return key.hash(); }
+ static bool equal(const Location& a, const Location& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::FTL
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::FTL::Location::Kind);
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::FTL::Location> {
+ typedef JSC::FTL::LocationHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::FTL::Location> : SimpleClassHashTraits<JSC::FTL::Location> { };
+
+} // namespace WTF
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
new file mode 100644
index 000000000..399c85127
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
@@ -0,0 +1,13970 @@
+/*
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLLowerDFGToB3.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "AirGenerationContext.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3CheckValue.h"
+#include "B3FenceValue.h"
+#include "B3PatchpointValue.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+#include "CallFrameShuffler.h"
+#include "CodeBlockWithJITType.h"
+#include "DFGAbstractInterpreterInlines.h"
+#include "DFGCapabilities.h"
+#include "DFGDominators.h"
+#include "DFGInPlaceAbstractState.h"
+#include "DFGOSRAvailabilityAnalysisPhase.h"
+#include "DFGOSRExitFuzz.h"
+#include "DOMJITPatchpoint.h"
+#include "DirectArguments.h"
+#include "FTLAbstractHeapRepository.h"
+#include "FTLAvailableRecovery.h"
+#include "FTLDOMJITPatchpointParams.h"
+#include "FTLExceptionTarget.h"
+#include "FTLForOSREntryJITCode.h"
+#include "FTLFormattedValue.h"
+#include "FTLLazySlowPathCall.h"
+#include "FTLLoweredNodeValue.h"
+#include "FTLOperations.h"
+#include "FTLOutput.h"
+#include "FTLPatchpointExceptionHandle.h"
+#include "FTLThunks.h"
+#include "FTLWeightedTarget.h"
+#include "JITAddGenerator.h"
+#include "JITBitAndGenerator.h"
+#include "JITBitOrGenerator.h"
+#include "JITBitXorGenerator.h"
+#include "JITDivGenerator.h"
+#include "JITInlineCacheGenerator.h"
+#include "JITLeftShiftGenerator.h"
+#include "JITMathIC.h"
+#include "JITMulGenerator.h"
+#include "JITRightShiftGenerator.h"
+#include "JITSubGenerator.h"
+#include "JSCInlines.h"
+#include "JSGeneratorFunction.h"
+#include "JSLexicalEnvironment.h"
+#include "JSMap.h"
+#include "OperandsInlines.h"
+#include "ScopedArguments.h"
+#include "ScopedArgumentsTable.h"
+#include "ScratchRegisterAllocator.h"
+#include "SetupVarargsFrame.h"
+#include "ShadowChicken.h"
+#include "StructureStubInfo.h"
+#include "VirtualRegister.h"
+#include "Watchdog.h"
+#include <atomic>
+#if !OS(WINDOWS)
+#include <dlfcn.h>
+#endif
+#include <unordered_set>
+#include <wtf/Box.h>
+#include <wtf/ProcessID.h>
+
+namespace JSC { namespace FTL {
+
+using namespace B3;
+using namespace DFG;
+
+namespace {
+
+std::atomic<int> compileCounter;
+
+#if !ASSERT_DISABLED
+NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
+ CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
+{
+ dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
+ if (nodeIndex != UINT_MAX)
+ dataLog(", node @", nodeIndex);
+ dataLog(".\n");
+ CRASH();
+}
+#endif
+
+// Using this instead of typeCheck() helps to reduce the load on B3, by creating
+// significantly less dead code.
+#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
+ FormattedValue _ftc_lowValue = (lowValue); \
+ Edge _ftc_highValue = (highValue); \
+ SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
+ if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
+ break; \
+ typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
+ } while (false)
+
+#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
+ FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
+
+class LowerDFGToB3 {
+ WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
+public:
+ LowerDFGToB3(State& state)
+ : m_graph(state.graph)
+ , m_ftlState(state)
+ , m_out(state)
+ , m_proc(*state.proc)
+ , m_availabilityCalculator(m_graph)
+ , m_state(state.graph)
+ , m_interpreter(state.graph, m_state)
+ {
+ }
+
+ void lower()
+ {
+ State* state = &m_ftlState;
+
+ CString name;
+ if (verboseCompilationEnabled()) {
+ name = toCString(
+ "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
+ "_", codeBlock()->hash());
+ } else
+ name = "jsBody";
+
+ m_graph.ensureDominators();
+
+ if (verboseCompilationEnabled())
+ dataLog("Function ready, beginning lowering.\n");
+
+ m_out.initialize(m_heaps);
+
+ // We use prologue frequency for all of the initialization code.
+ m_out.setFrequency(1);
+
+ m_prologue = m_out.newBlock();
+ m_handleExceptions = m_out.newBlock();
+
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
+ m_highBlock = m_graph.block(blockIndex);
+ if (!m_highBlock)
+ continue;
+ m_out.setFrequency(m_highBlock->executionCount);
+ m_blocks.add(m_highBlock, m_out.newBlock());
+ }
+
+ // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
+ m_out.setFrequency(1);
+
+ m_out.appendTo(m_prologue, m_handleExceptions);
+ m_out.initializeConstants(m_proc, m_prologue);
+ createPhiVariables();
+
+ size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
+ B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
+ m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
+ state->capturedValue = capturedBase->slot();
+
+ auto preOrder = m_graph.blocksInPreOrder();
+
+ m_callFrame = m_out.framePointer();
+ m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
+ m_tagMask = m_out.constInt64(TagMask);
+
+ // Make sure that B3 knows that we really care about the mask registers. This forces the
+ // constants to be materialized in registers.
+ m_proc.addFastConstant(m_tagTypeNumber->key());
+ m_proc.addFastConstant(m_tagMask->key());
+
+ // We don't want the CodeBlock to have a weak pointer to itself because
+ // that would cause it to always get collected.
+ m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
+
+ // Stack Overflow Check.
+ unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
+ MacroAssembler::AbsoluteAddress addressOfStackLimit(vm().addressOfSoftStackLimit());
+ PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
+ CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
+ stackOverflowHandler->appendSomeRegister(m_callFrame);
+ stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
+ stackOverflowHandler->numGPScratchRegisters = 1;
+ stackOverflowHandler->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ GPRReg fp = params[0].gpr();
+ GPRReg scratch = params.gpScratch(0);
+
+ unsigned ftlFrameSize = params.proc().frameSize();
+
+ jit.addPtr(MacroAssembler::TrustedImm32(-std::max(exitFrameSize, ftlFrameSize)), fp, scratch);
+ MacroAssembler::Jump stackOverflow = jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch);
+
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ stackOverflow.link(&jit);
+ jit.store32(
+ MacroAssembler::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
+ CCallHelpers::Call throwCall = jit.call();
+
+ jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
+ jit.jumpToExceptionHandler();
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
+ linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
+ });
+ });
+ });
+
+ LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
+ // Check Arguments.
+ availabilityMap().clear();
+ availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ availabilityMap().m_locals.argument(i) =
+ Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
+ }
+ m_node = nullptr;
+ m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ Node* node = m_graph.m_arguments[i];
+ VirtualRegister operand = virtualRegisterForArgument(i);
+
+ LValue jsValue = m_out.load64(addressFor(operand));
+
+ if (node) {
+ DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
+
+ // This is a hack, but it's an effective one. It allows us to do CSE on the
+ // primordial load of arguments. This assumes that the GetLocal that got put in
+ // place of the original SetArgument doesn't have any effects before it. This
+ // should hold true.
+ m_loadedArgumentValues.add(node, jsValue);
+ }
+
+ switch (m_graph.m_argumentFormats[i]) {
+ case FlushedInt32:
+ speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
+ break;
+ case FlushedBoolean:
+ speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
+ break;
+ case FlushedCell:
+ speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
+ break;
+ case FlushedJSValue:
+ break;
+ default:
+ DFG_CRASH(m_graph, node, "Bad flush format for argument");
+ break;
+ }
+ }
+ m_out.jump(firstDFGBasicBlock);
+
+ m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
+ Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
+ m_out.patchpoint(Void)->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ CCallHelpers::Jump jump = jit.jump();
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
+ });
+ });
+ m_out.unreachable();
+
+ for (DFG::BasicBlock* block : preOrder)
+ compileBlock(block);
+
+ // Make sure everything is decorated. This does a bunch of deferred decorating. This has
+ // to happen last because our abstract heaps are generated lazily. They have to be
+ // generated lazily because we have an infiniten number of numbered, indexed, and
+ // absolute heaps. We only become aware of the ones we actually mention while lowering.
+ m_heaps.computeRangesAndDecorateInstructions();
+
+ // We create all Phi's up front, but we may then decide not to compile the basic block
+ // that would have contained one of them. So this creates orphans, which triggers B3
+ // validation failures. Calling this fixes the issue.
+ //
+ // Note that you should avoid the temptation to make this call conditional upon
+ // validation being enabled. B3 makes no guarantees of any kind of correctness when
+ // dealing with IR that would have failed validation. For example, it would be valid to
+ // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
+ // if any orphans were around. We might even have such phases already.
+ m_proc.deleteOrphans();
+
+ // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
+ m_out.applyBlockOrder();
+ }
+
+private:
+
+ void createPhiVariables()
+ {
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ DFG::BasicBlock* block = m_graph.block(blockIndex);
+ if (!block)
+ continue;
+ for (unsigned nodeIndex = block->size(); nodeIndex--;) {
+ Node* node = block->at(nodeIndex);
+ if (node->op() != DFG::Phi)
+ continue;
+ LType type;
+ switch (node->flags() & NodeResultMask) {
+ case NodeResultDouble:
+ type = Double;
+ break;
+ case NodeResultInt32:
+ type = Int32;
+ break;
+ case NodeResultInt52:
+ type = Int64;
+ break;
+ case NodeResultBoolean:
+ type = Int32;
+ break;
+ case NodeResultJS:
+ type = Int64;
+ break;
+ default:
+ DFG_CRASH(m_graph, node, "Bad Phi node result type");
+ break;
+ }
+ m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
+ }
+ }
+ }
+
+ void compileBlock(DFG::BasicBlock* block)
+ {
+ if (!block)
+ return;
+
+ if (verboseCompilationEnabled())
+ dataLog("Compiling block ", *block, "\n");
+
+ m_highBlock = block;
+
+ // Make sure that any blocks created while lowering code in the high block have the frequency of
+ // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
+ // something roughly approximate for things like register allocation.
+ m_out.setFrequency(m_highBlock->executionCount);
+
+ LBasicBlock lowBlock = m_blocks.get(m_highBlock);
+
+ m_nextHighBlock = 0;
+ for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
+ m_nextHighBlock = m_graph.block(nextBlockIndex);
+ if (m_nextHighBlock)
+ break;
+ }
+ m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
+
+ // All of this effort to find the next block gives us the ability to keep the
+ // generated IR in roughly program order. This ought not affect the performance
+ // of the generated code (since we expect B3 to reorder things) but it will
+ // make IR dumps easier to read.
+ m_out.appendTo(lowBlock, m_nextLowBlock);
+
+ if (Options::ftlCrashes())
+ m_out.trap();
+
+ if (!m_highBlock->cfaHasVisited) {
+ if (verboseCompilationEnabled())
+ dataLog("Bailing because CFA didn't reach.\n");
+ crash(m_highBlock, nullptr);
+ return;
+ }
+
+ m_availabilityCalculator.beginBlock(m_highBlock);
+
+ m_state.reset();
+ m_state.beginBasicBlock(m_highBlock);
+
+ for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
+ if (!compileNode(m_nodeIndex))
+ break;
+ }
+ }
+
+ void safelyInvalidateAfterTermination()
+ {
+ if (verboseCompilationEnabled())
+ dataLog("Bailing.\n");
+ crash();
+
+ // Invalidate dominated blocks. Under normal circumstances we would expect
+ // them to be invalidated already. But you can have the CFA become more
+ // precise over time because the structures of objects change on the main
+ // thread. Failing to do this would result in weird crashes due to a value
+ // being used but not defined. Race conditions FTW!
+ for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
+ DFG::BasicBlock* target = m_graph.block(blockIndex);
+ if (!target)
+ continue;
+ if (m_graph.m_dominators->dominates(m_highBlock, target)) {
+ if (verboseCompilationEnabled())
+ dataLog("Block ", *target, " will bail also.\n");
+ target->cfaHasVisited = false;
+ }
+ }
+ }
+
+ bool compileNode(unsigned nodeIndex)
+ {
+ if (!m_state.isValid()) {
+ safelyInvalidateAfterTermination();
+ return false;
+ }
+
+ m_node = m_highBlock->at(nodeIndex);
+ m_origin = m_node->origin;
+ m_out.setOrigin(m_node);
+
+ if (verboseCompilationEnabled())
+ dataLog("Lowering ", m_node, "\n");
+
+ m_availableRecoveries.resize(0);
+
+ m_interpreter.startExecuting();
+ m_interpreter.executeKnownEdgeTypes(m_node);
+
+ switch (m_node->op()) {
+ case DFG::Upsilon:
+ compileUpsilon();
+ break;
+ case DFG::Phi:
+ compilePhi();
+ break;
+ case JSConstant:
+ break;
+ case DoubleConstant:
+ compileDoubleConstant();
+ break;
+ case Int52Constant:
+ compileInt52Constant();
+ break;
+ case LazyJSConstant:
+ compileLazyJSConstant();
+ break;
+ case DoubleRep:
+ compileDoubleRep();
+ break;
+ case DoubleAsInt32:
+ compileDoubleAsInt32();
+ break;
+ case DFG::ValueRep:
+ compileValueRep();
+ break;
+ case Int52Rep:
+ compileInt52Rep();
+ break;
+ case ValueToInt32:
+ compileValueToInt32();
+ break;
+ case BooleanToNumber:
+ compileBooleanToNumber();
+ break;
+ case ExtractOSREntryLocal:
+ compileExtractOSREntryLocal();
+ break;
+ case GetStack:
+ compileGetStack();
+ break;
+ case PutStack:
+ compilePutStack();
+ break;
+ case DFG::Check:
+ compileNoOp();
+ break;
+ case CallObjectConstructor:
+ compileCallObjectConstructor();
+ break;
+ case ToThis:
+ compileToThis();
+ break;
+ case ValueAdd:
+ compileValueAdd();
+ break;
+ case StrCat:
+ compileStrCat();
+ break;
+ case ArithAdd:
+ case ArithSub:
+ compileArithAddOrSub();
+ break;
+ case ArithClz32:
+ compileArithClz32();
+ break;
+ case ArithMul:
+ compileArithMul();
+ break;
+ case ArithDiv:
+ compileArithDiv();
+ break;
+ case ArithMod:
+ compileArithMod();
+ break;
+ case ArithMin:
+ case ArithMax:
+ compileArithMinOrMax();
+ break;
+ case ArithAbs:
+ compileArithAbs();
+ break;
+ case ArithSin:
+ compileArithSin();
+ break;
+ case ArithCos:
+ compileArithCos();
+ break;
+ case ArithTan:
+ compileArithTan();
+ break;
+ case ArithPow:
+ compileArithPow();
+ break;
+ case ArithRandom:
+ compileArithRandom();
+ break;
+ case ArithRound:
+ compileArithRound();
+ break;
+ case ArithFloor:
+ compileArithFloor();
+ break;
+ case ArithCeil:
+ compileArithCeil();
+ break;
+ case ArithTrunc:
+ compileArithTrunc();
+ break;
+ case ArithSqrt:
+ compileArithSqrt();
+ break;
+ case ArithLog:
+ compileArithLog();
+ break;
+ case ArithFRound:
+ compileArithFRound();
+ break;
+ case ArithNegate:
+ compileArithNegate();
+ break;
+ case DFG::BitAnd:
+ compileBitAnd();
+ break;
+ case DFG::BitOr:
+ compileBitOr();
+ break;
+ case DFG::BitXor:
+ compileBitXor();
+ break;
+ case BitRShift:
+ compileBitRShift();
+ break;
+ case BitLShift:
+ compileBitLShift();
+ break;
+ case BitURShift:
+ compileBitURShift();
+ break;
+ case UInt32ToNumber:
+ compileUInt32ToNumber();
+ break;
+ case CheckStructure:
+ compileCheckStructure();
+ break;
+ case CheckCell:
+ compileCheckCell();
+ break;
+ case CheckNotEmpty:
+ compileCheckNotEmpty();
+ break;
+ case CheckBadCell:
+ compileCheckBadCell();
+ break;
+ case CheckStringIdent:
+ compileCheckStringIdent();
+ break;
+ case GetExecutable:
+ compileGetExecutable();
+ break;
+ case ArrayifyToStructure:
+ compileArrayifyToStructure();
+ break;
+ case PutStructure:
+ compilePutStructure();
+ break;
+ case TryGetById:
+ compileGetById(AccessType::TryGet);
+ break;
+ case GetById:
+ case GetByIdFlush:
+ compileGetById(AccessType::Get);
+ break;
+ case GetByIdWithThis:
+ compileGetByIdWithThis();
+ break;
+ case In:
+ compileIn();
+ break;
+ case HasOwnProperty:
+ compileHasOwnProperty();
+ break;
+ case PutById:
+ case PutByIdDirect:
+ case PutByIdFlush:
+ compilePutById();
+ break;
+ case PutByIdWithThis:
+ compilePutByIdWithThis();
+ break;
+ case PutGetterById:
+ case PutSetterById:
+ compilePutAccessorById();
+ break;
+ case PutGetterSetterById:
+ compilePutGetterSetterById();
+ break;
+ case PutGetterByVal:
+ case PutSetterByVal:
+ compilePutAccessorByVal();
+ break;
+ case GetButterfly:
+ compileGetButterfly();
+ break;
+ case ConstantStoragePointer:
+ compileConstantStoragePointer();
+ break;
+ case GetIndexedPropertyStorage:
+ compileGetIndexedPropertyStorage();
+ break;
+ case CheckArray:
+ compileCheckArray();
+ break;
+ case GetArrayLength:
+ compileGetArrayLength();
+ break;
+ case CheckInBounds:
+ compileCheckInBounds();
+ break;
+ case GetByVal:
+ compileGetByVal();
+ break;
+ case GetMyArgumentByVal:
+ case GetMyArgumentByValOutOfBounds:
+ compileGetMyArgumentByVal();
+ break;
+ case GetByValWithThis:
+ compileGetByValWithThis();
+ break;
+ case PutByVal:
+ case PutByValAlias:
+ case PutByValDirect:
+ compilePutByVal();
+ break;
+ case PutByValWithThis:
+ compilePutByValWithThis();
+ break;
+ case DefineDataProperty:
+ compileDefineDataProperty();
+ break;
+ case DefineAccessorProperty:
+ compileDefineAccessorProperty();
+ break;
+ case ArrayPush:
+ compileArrayPush();
+ break;
+ case ArrayPop:
+ compileArrayPop();
+ break;
+ case ArraySlice:
+ compileArraySlice();
+ break;
+ case CreateActivation:
+ compileCreateActivation();
+ break;
+ case NewFunction:
+ case NewGeneratorFunction:
+ case NewAsyncFunction:
+ compileNewFunction();
+ break;
+ case CreateDirectArguments:
+ compileCreateDirectArguments();
+ break;
+ case CreateScopedArguments:
+ compileCreateScopedArguments();
+ break;
+ case CreateClonedArguments:
+ compileCreateClonedArguments();
+ break;
+ case NewObject:
+ compileNewObject();
+ break;
+ case NewArray:
+ compileNewArray();
+ break;
+ case NewArrayWithSpread:
+ compileNewArrayWithSpread();
+ break;
+ case Spread:
+ compileSpread();
+ break;
+ case NewArrayBuffer:
+ compileNewArrayBuffer();
+ break;
+ case NewArrayWithSize:
+ compileNewArrayWithSize();
+ break;
+ case NewTypedArray:
+ compileNewTypedArray();
+ break;
+ case GetTypedArrayByteOffset:
+ compileGetTypedArrayByteOffset();
+ break;
+ case AllocatePropertyStorage:
+ compileAllocatePropertyStorage();
+ break;
+ case ReallocatePropertyStorage:
+ compileReallocatePropertyStorage();
+ break;
+ case NukeStructureAndSetButterfly:
+ compileNukeStructureAndSetButterfly();
+ break;
+ case ToNumber:
+ compileToNumber();
+ break;
+ case ToString:
+ case CallStringConstructor:
+ compileToStringOrCallStringConstructor();
+ break;
+ case ToPrimitive:
+ compileToPrimitive();
+ break;
+ case MakeRope:
+ compileMakeRope();
+ break;
+ case StringCharAt:
+ compileStringCharAt();
+ break;
+ case StringCharCodeAt:
+ compileStringCharCodeAt();
+ break;
+ case StringFromCharCode:
+ compileStringFromCharCode();
+ break;
+ case GetByOffset:
+ case GetGetterSetterByOffset:
+ compileGetByOffset();
+ break;
+ case GetGetter:
+ compileGetGetter();
+ break;
+ case GetSetter:
+ compileGetSetter();
+ break;
+ case MultiGetByOffset:
+ compileMultiGetByOffset();
+ break;
+ case PutByOffset:
+ compilePutByOffset();
+ break;
+ case MultiPutByOffset:
+ compileMultiPutByOffset();
+ break;
+ case GetGlobalVar:
+ case GetGlobalLexicalVariable:
+ compileGetGlobalVariable();
+ break;
+ case PutGlobalVariable:
+ compilePutGlobalVariable();
+ break;
+ case NotifyWrite:
+ compileNotifyWrite();
+ break;
+ case GetCallee:
+ compileGetCallee();
+ break;
+ case GetArgumentCountIncludingThis:
+ compileGetArgumentCountIncludingThis();
+ break;
+ case GetScope:
+ compileGetScope();
+ break;
+ case SkipScope:
+ compileSkipScope();
+ break;
+ case GetGlobalObject:
+ compileGetGlobalObject();
+ break;
+ case GetClosureVar:
+ compileGetClosureVar();
+ break;
+ case PutClosureVar:
+ compilePutClosureVar();
+ break;
+ case GetFromArguments:
+ compileGetFromArguments();
+ break;
+ case PutToArguments:
+ compilePutToArguments();
+ break;
+ case GetArgument:
+ compileGetArgument();
+ break;
+ case CompareEq:
+ compileCompareEq();
+ break;
+ case CompareStrictEq:
+ compileCompareStrictEq();
+ break;
+ case CompareLess:
+ compileCompareLess();
+ break;
+ case CompareLessEq:
+ compileCompareLessEq();
+ break;
+ case CompareGreater:
+ compileCompareGreater();
+ break;
+ case CompareGreaterEq:
+ compileCompareGreaterEq();
+ break;
+ case CompareEqPtr:
+ compileCompareEqPtr();
+ break;
+ case LogicalNot:
+ compileLogicalNot();
+ break;
+ case Call:
+ case TailCallInlinedCaller:
+ case Construct:
+ compileCallOrConstruct();
+ break;
+ case DirectCall:
+ case DirectTailCallInlinedCaller:
+ case DirectConstruct:
+ case DirectTailCall:
+ compileDirectCallOrConstruct();
+ break;
+ case TailCall:
+ compileTailCall();
+ break;
+ case CallVarargs:
+ case CallForwardVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
+ compileCallOrConstructVarargs();
+ break;
+ case CallEval:
+ compileCallEval();
+ break;
+ case LoadVarargs:
+ compileLoadVarargs();
+ break;
+ case ForwardVarargs:
+ compileForwardVarargs();
+ break;
+ case DFG::Jump:
+ compileJump();
+ break;
+ case DFG::Branch:
+ compileBranch();
+ break;
+ case DFG::Switch:
+ compileSwitch();
+ break;
+ case DFG::Return:
+ compileReturn();
+ break;
+ case ForceOSRExit:
+ compileForceOSRExit();
+ break;
+ case Throw:
+ case ThrowStaticError:
+ compileThrow();
+ break;
+ case InvalidationPoint:
+ compileInvalidationPoint();
+ break;
+ case IsEmpty:
+ compileIsEmpty();
+ break;
+ case IsUndefined:
+ compileIsUndefined();
+ break;
+ case IsBoolean:
+ compileIsBoolean();
+ break;
+ case IsNumber:
+ compileIsNumber();
+ break;
+ case IsCellWithType:
+ compileIsCellWithType();
+ break;
+ case MapHash:
+ compileMapHash();
+ break;
+ case GetMapBucket:
+ compileGetMapBucket();
+ break;
+ case LoadFromJSMapBucket:
+ compileLoadFromJSMapBucket();
+ break;
+ case IsNonEmptyMapBucket:
+ compileIsNonEmptyMapBucket();
+ break;
+ case IsObject:
+ compileIsObject();
+ break;
+ case IsObjectOrNull:
+ compileIsObjectOrNull();
+ break;
+ case IsFunction:
+ compileIsFunction();
+ break;
+ case IsTypedArrayView:
+ compileIsTypedArrayView();
+ break;
+ case ParseInt:
+ compileParseInt();
+ break;
+ case TypeOf:
+ compileTypeOf();
+ break;
+ case CheckTypeInfoFlags:
+ compileCheckTypeInfoFlags();
+ break;
+ case OverridesHasInstance:
+ compileOverridesHasInstance();
+ break;
+ case InstanceOf:
+ compileInstanceOf();
+ break;
+ case InstanceOfCustom:
+ compileInstanceOfCustom();
+ break;
+ case CountExecution:
+ compileCountExecution();
+ break;
+ case StoreBarrier:
+ case FencedStoreBarrier:
+ compileStoreBarrier();
+ break;
+ case HasIndexedProperty:
+ compileHasIndexedProperty();
+ break;
+ case HasGenericProperty:
+ compileHasGenericProperty();
+ break;
+ case HasStructureProperty:
+ compileHasStructureProperty();
+ break;
+ case GetDirectPname:
+ compileGetDirectPname();
+ break;
+ case GetEnumerableLength:
+ compileGetEnumerableLength();
+ break;
+ case GetPropertyEnumerator:
+ compileGetPropertyEnumerator();
+ break;
+ case GetEnumeratorStructurePname:
+ compileGetEnumeratorStructurePname();
+ break;
+ case GetEnumeratorGenericPname:
+ compileGetEnumeratorGenericPname();
+ break;
+ case ToIndexString:
+ compileToIndexString();
+ break;
+ case CheckStructureImmediate:
+ compileCheckStructureImmediate();
+ break;
+ case MaterializeNewObject:
+ compileMaterializeNewObject();
+ break;
+ case MaterializeCreateActivation:
+ compileMaterializeCreateActivation();
+ break;
+ case CheckWatchdogTimer:
+ compileCheckWatchdogTimer();
+ break;
+ case CreateRest:
+ compileCreateRest();
+ break;
+ case GetRestLength:
+ compileGetRestLength();
+ break;
+ case RegExpExec:
+ compileRegExpExec();
+ break;
+ case RegExpTest:
+ compileRegExpTest();
+ break;
+ case NewRegexp:
+ compileNewRegexp();
+ break;
+ case SetFunctionName:
+ compileSetFunctionName();
+ break;
+ case StringReplace:
+ case StringReplaceRegExp:
+ compileStringReplace();
+ break;
+ case GetRegExpObjectLastIndex:
+ compileGetRegExpObjectLastIndex();
+ break;
+ case SetRegExpObjectLastIndex:
+ compileSetRegExpObjectLastIndex();
+ break;
+ case LogShadowChickenPrologue:
+ compileLogShadowChickenPrologue();
+ break;
+ case LogShadowChickenTail:
+ compileLogShadowChickenTail();
+ break;
+ case RecordRegExpCachedResult:
+ compileRecordRegExpCachedResult();
+ break;
+ case ResolveScope:
+ compileResolveScope();
+ break;
+ case GetDynamicVar:
+ compileGetDynamicVar();
+ break;
+ case PutDynamicVar:
+ compilePutDynamicVar();
+ break;
+ case Unreachable:
+ compileUnreachable();
+ break;
+ case ToLowerCase:
+ compileToLowerCase();
+ break;
+ case NumberToStringWithRadix:
+ compileNumberToStringWithRadix();
+ break;
+ case CheckDOM:
+ compileCheckDOM();
+ break;
+ case CallDOM:
+ compileCallDOM();
+ break;
+ case CallDOMGetter:
+ compileCallDOMGetter();
+ break;
+
+ case PhantomLocal:
+ case LoopHint:
+ case MovHint:
+ case ZombieHint:
+ case ExitOK:
+ case PhantomNewObject:
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction:
+ case PhantomCreateActivation:
+ case PhantomDirectArguments:
+ case PhantomCreateRest:
+ case PhantomSpread:
+ case PhantomNewArrayWithSpread:
+ case PhantomClonedArguments:
+ case PutHint:
+ case BottomValue:
+ case KillStack:
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
+ break;
+ }
+
+ if (m_node->isTerminal())
+ return false;
+
+ if (!m_state.isValid()) {
+ safelyInvalidateAfterTermination();
+ return false;
+ }
+
+ m_availabilityCalculator.executeNode(m_node);
+ m_interpreter.executeEffects(nodeIndex);
+
+ return true;
+ }
+
+ void compileUpsilon()
+ {
+ LValue upsilonValue = nullptr;
+ switch (m_node->child1().useKind()) {
+ case DoubleRepUse:
+ upsilonValue = lowDouble(m_node->child1());
+ break;
+ case Int32Use:
+ case KnownInt32Use:
+ upsilonValue = lowInt32(m_node->child1());
+ break;
+ case Int52RepUse:
+ upsilonValue = lowInt52(m_node->child1());
+ break;
+ case BooleanUse:
+ case KnownBooleanUse:
+ upsilonValue = lowBoolean(m_node->child1());
+ break;
+ case CellUse:
+ case KnownCellUse:
+ upsilonValue = lowCell(m_node->child1());
+ break;
+ case UntypedUse:
+ upsilonValue = lowJSValue(m_node->child1());
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ ValueFromBlock upsilon = m_out.anchor(upsilonValue);
+ LValue phiNode = m_phis.get(m_node->phi());
+ m_out.addIncomingToPhi(phiNode, upsilon);
+ }
+
+ void compilePhi()
+ {
+ LValue phi = m_phis.get(m_node);
+ m_out.m_block->append(phi);
+
+ switch (m_node->flags() & NodeResultMask) {
+ case NodeResultDouble:
+ setDouble(phi);
+ break;
+ case NodeResultInt32:
+ setInt32(phi);
+ break;
+ case NodeResultInt52:
+ setInt52(phi);
+ break;
+ case NodeResultBoolean:
+ setBoolean(phi);
+ break;
+ case NodeResultJS:
+ setJSValue(phi);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileDoubleConstant()
+ {
+ setDouble(m_out.constDouble(m_node->asNumber()));
+ }
+
+ void compileInt52Constant()
+ {
+ int64_t value = m_node->asAnyInt();
+
+ setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
+ setStrictInt52(m_out.constInt64(value));
+ }
+
+ void compileLazyJSConstant()
+ {
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ LazyJSValue value = m_node->lazyJSValue();
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ value.emit(jit, JSValueRegs(params[0].gpr()));
+ });
+ patchpoint->effects = Effects::none();
+ setJSValue(patchpoint);
+ }
+
+ void compileDoubleRep()
+ {
+ switch (m_node->child1().useKind()) {
+ case RealNumberUse: {
+ LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+
+ LValue doubleValue = unboxDouble(value);
+
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock fastResult = m_out.anchor(doubleValue);
+ m_out.branch(
+ m_out.doubleEqual(doubleValue, doubleValue),
+ usually(continuation), rarely(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
+
+ FTL_TYPE_CHECK(
+ jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
+ isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
+ ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ setDouble(m_out.phi(Double, fastResult, slowResult));
+ return;
+ }
+
+ case NotCellUse:
+ case NumberUse: {
+ bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
+
+ LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock doubleTesting = m_out.newBlock();
+ LBasicBlock doubleCase = m_out.newBlock();
+ LBasicBlock nonDoubleCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isNotInt32(value, provenType(m_node->child1())),
+ unsure(doubleTesting), unsure(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
+
+ ValueFromBlock intToDouble = m_out.anchor(
+ m_out.intToDouble(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(doubleTesting, doubleCase);
+ LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
+ m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
+
+ m_out.appendTo(doubleCase, nonDoubleCase);
+ ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
+ m_out.jump(continuation);
+
+ if (shouldConvertNonNumber) {
+ LBasicBlock undefinedCase = m_out.newBlock();
+ LBasicBlock testNullCase = m_out.newBlock();
+ LBasicBlock nullCase = m_out.newBlock();
+ LBasicBlock testBooleanTrueCase = m_out.newBlock();
+ LBasicBlock convertBooleanTrueCase = m_out.newBlock();
+ LBasicBlock convertBooleanFalseCase = m_out.newBlock();
+
+ m_out.appendTo(nonDoubleCase, undefinedCase);
+ LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
+ m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
+
+ m_out.appendTo(undefinedCase, testNullCase);
+ ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
+ m_out.jump(continuation);
+
+ m_out.appendTo(testNullCase, nullCase);
+ LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
+ m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
+
+ m_out.appendTo(nullCase, testBooleanTrueCase);
+ ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
+ m_out.jump(continuation);
+
+ m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
+ LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
+ m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
+
+ m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
+ ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
+ m_out.jump(continuation);
+
+ m_out.appendTo(convertBooleanFalseCase, continuation);
+
+ LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
+ FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCell, valueIsNotBooleanFalse);
+ ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
+ return;
+ }
+ m_out.appendTo(nonDoubleCase, continuation);
+ FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
+ m_out.unreachable();
+
+ m_out.appendTo(continuation, lastNext);
+
+ setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
+ return;
+ }
+
+ case Int52RepUse: {
+ setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ }
+ }
+
+ void compileDoubleAsInt32()
+ {
+ LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
+ setInt32(integerValue);
+ }
+
+ void compileValueRep()
+ {
+ switch (m_node->child1().useKind()) {
+ case DoubleRepUse: {
+ LValue value = lowDouble(m_node->child1());
+
+ if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
+ value = m_out.select(
+ m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
+ }
+
+ setJSValue(boxDouble(value));
+ return;
+ }
+
+ case Int52RepUse: {
+ setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ }
+ }
+
+ void compileInt52Rep()
+ {
+ switch (m_node->child1().useKind()) {
+ case Int32Use:
+ setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
+ return;
+
+ case AnyIntUse:
+ setStrictInt52(
+ jsValueToStrictInt52(
+ m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
+ return;
+
+ case DoubleRepAnyIntUse:
+ setStrictInt52(
+ doubleToStrictInt52(
+ m_node->child1(), lowDouble(m_node->child1())));
+ return;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void compileValueToInt32()
+ {
+ switch (m_node->child1().useKind()) {
+ case Int52RepUse:
+ setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
+ break;
+
+ case DoubleRepUse:
+ setInt32(doubleToInt32(lowDouble(m_node->child1())));
+ break;
+
+ case NumberUse:
+ case NotCellUse: {
+ LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
+ if (isValid(value)) {
+ setInt32(value.value());
+ break;
+ }
+
+ value = m_jsValueValues.get(m_node->child1().node());
+ if (isValid(value)) {
+ setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
+ break;
+ }
+
+ // We'll basically just get here for constants. But it's good to have this
+ // catch-all since we often add new representations into the mix.
+ setInt32(
+ numberOrNotCellToInt32(
+ m_node->child1(),
+ lowJSValue(m_node->child1(), ManualOperandSpeculation)));
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileBooleanToNumber()
+ {
+ switch (m_node->child1().useKind()) {
+ case BooleanUse: {
+ setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
+ return;
+ }
+
+ case UntypedUse: {
+ LValue value = lowJSValue(m_node->child1());
+
+ if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
+ setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
+ return;
+ }
+
+ LBasicBlock booleanCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notBooleanResult = m_out.anchor(value);
+ m_out.branch(
+ isBoolean(value, provenType(m_node->child1())),
+ unsure(booleanCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
+ ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
+ m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+ }
+
+ void compileExtractOSREntryLocal()
+ {
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
+ m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
+ setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
+ }
+
+ void compileGetStack()
+ {
+ // GetLocals arise only for captured variables and arguments. For arguments, we might have
+ // already loaded it.
+ if (LValue value = m_loadedArgumentValues.get(m_node)) {
+ setJSValue(value);
+ return;
+ }
+
+ StackAccessData* data = m_node->stackAccessData();
+ AbstractValue& value = m_state.variables().operand(data->local);
+
+ DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
+ DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
+
+ if (isInt32Speculation(value.m_type))
+ setInt32(m_out.load32(payloadFor(data->machineLocal)));
+ else
+ setJSValue(m_out.load64(addressFor(data->machineLocal)));
+ }
+
+ void compilePutStack()
+ {
+ StackAccessData* data = m_node->stackAccessData();
+ switch (data->format) {
+ case FlushedJSValue: {
+ LValue value = lowJSValue(m_node->child1());
+ m_out.store64(value, addressFor(data->machineLocal));
+ break;
+ }
+
+ case FlushedDouble: {
+ LValue value = lowDouble(m_node->child1());
+ m_out.storeDouble(value, addressFor(data->machineLocal));
+ break;
+ }
+
+ case FlushedInt32: {
+ LValue value = lowInt32(m_node->child1());
+ m_out.store32(value, payloadFor(data->machineLocal));
+ break;
+ }
+
+ case FlushedInt52: {
+ LValue value = lowInt52(m_node->child1());
+ m_out.store64(value, addressFor(data->machineLocal));
+ break;
+ }
+
+ case FlushedCell: {
+ LValue value = lowCell(m_node->child1());
+ m_out.store64(value, addressFor(data->machineLocal));
+ break;
+ }
+
+ case FlushedBoolean: {
+ speculateBoolean(m_node->child1());
+ m_out.store64(
+ lowJSValue(m_node->child1(), ManualOperandSpeculation),
+ addressFor(data->machineLocal));
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad flush format");
+ break;
+ }
+ }
+
+ void compileNoOp()
+ {
+ DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
+ }
+
+ void compileCallObjectConstructor()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
+ ValueFromBlock fastResult = m_out.anchor(value);
+ m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationObjectConstructor), m_callFrame, weakPointer(globalObject), value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ }
+
+ void compileToThis()
+ {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
+ ValueFromBlock fastResult = m_out.anchor(value);
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(OverridesToThis)),
+ usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ J_JITOperation_EJ function;
+ if (m_graph.isStrictModeFor(m_node->origin.semantic))
+ function = operationToThisStrict;
+ else
+ function = operationToThis;
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int64, m_out.operation(function), m_callFrame, value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ }
+
+ void compileValueAdd()
+ {
+ ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
+ JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
+ auto repatchingFunction = operationValueAddOptimize;
+ auto nonRepatchingFunction = operationValueAdd;
+ compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
+ }
+
+ template <typename Generator>
+ void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
+ {
+ Node* node = m_node;
+
+ LValue operand = lowJSValue(node->child1());
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(operand);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+ patchpoint->numGPScratchRegisters = 1;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineStart = jit.label();
+#endif
+
+ Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
+ mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
+
+ bool shouldEmitProfiling = false;
+ bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
+
+ if (generatedInline) {
+ ASSERT(!mathICGenerationState->slowPathJumps.empty());
+ auto done = jit.label();
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ mathICGenerationState->slowPathJumps.link(&jit);
+ mathICGenerationState->slowPathStart = jit.label();
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathStart = jit.label();
+#endif
+
+ if (mathICGenerationState->shouldSlowPathRepatch) {
+ SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
+ repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
+ mathICGenerationState->slowPathCall = call.call();
+ } else {
+ SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
+ mathICGenerationState->slowPathCall = call.call();
+ }
+ jit.jump().linkTo(done, &jit);
+
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
+ });
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathEnd = jit.label();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+ });
+ } else {
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
+ nonRepatchingFunction, params[0].gpr(), params[1].gpr());
+ }
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineEnd = jit.label();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ template <typename Generator>
+ void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
+ {
+ Node* node = m_node;
+
+ LValue left = lowJSValue(node->child1());
+ LValue right = lowJSValue(node->child2());
+
+ SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
+ SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(left);
+ patchpoint->appendSomeRegister(right);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+ patchpoint->numGPScratchRegisters = 1;
+ patchpoint->numFPScratchRegisters = 2;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineStart = jit.label();
+#endif
+
+ Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
+ mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
+ params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
+
+ bool shouldEmitProfiling = false;
+ bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
+
+ if (generatedInline) {
+ ASSERT(!mathICGenerationState->slowPathJumps.empty());
+ auto done = jit.label();
+ params.addLatePath([=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ mathICGenerationState->slowPathJumps.link(&jit);
+ mathICGenerationState->slowPathStart = jit.label();
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathStart = jit.label();
+#endif
+
+ if (mathICGenerationState->shouldSlowPathRepatch) {
+ SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
+ repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
+ mathICGenerationState->slowPathCall = call.call();
+ } else {
+ SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
+ mathICGenerationState->slowPathCall = call.call();
+ }
+ jit.jump().linkTo(done, &jit);
+
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
+ });
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathEnd = jit.label();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+ });
+ } else {
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
+ nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
+ }
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineEnd = jit.label();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ void compileStrCat()
+ {
+ LValue result;
+ if (m_node->child3()) {
+ result = vmCall(
+ Int64, m_out.operation(operationStrCat3), m_callFrame,
+ lowJSValue(m_node->child1(), ManualOperandSpeculation),
+ lowJSValue(m_node->child2(), ManualOperandSpeculation),
+ lowJSValue(m_node->child3(), ManualOperandSpeculation));
+ } else {
+ result = vmCall(
+ Int64, m_out.operation(operationStrCat2), m_callFrame,
+ lowJSValue(m_node->child1(), ManualOperandSpeculation),
+ lowJSValue(m_node->child2(), ManualOperandSpeculation));
+ }
+ setJSValue(result);
+ }
+
+ void compileArithAddOrSub()
+ {
+ bool isSub = m_node->op() == ArithSub;
+ switch (m_node->binaryUseKind()) {
+ case Int32Use: {
+ LValue left = lowInt32(m_node->child1());
+ LValue right = lowInt32(m_node->child2());
+
+ if (!shouldCheckOverflow(m_node->arithMode())) {
+ setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
+ break;
+ }
+
+ CheckValue* result =
+ isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
+ blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
+ setInt32(result);
+ break;
+ }
+
+ case Int52RepUse: {
+ if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
+ && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
+ Int52Kind kind;
+ LValue left = lowWhicheverInt52(m_node->child1(), kind);
+ LValue right = lowInt52(m_node->child2(), kind);
+ setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
+ break;
+ }
+
+ LValue left = lowInt52(m_node->child1());
+ LValue right = lowInt52(m_node->child2());
+ CheckValue* result =
+ isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
+ blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
+ setInt52(result);
+ break;
+ }
+
+ case DoubleRepUse: {
+ LValue C1 = lowDouble(m_node->child1());
+ LValue C2 = lowDouble(m_node->child2());
+
+ setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
+ break;
+ }
+
+ case UntypedUse: {
+ if (!isSub) {
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+
+ ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
+ JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
+ auto repatchingFunction = operationValueSubOptimize;
+ auto nonRepatchingFunction = operationValueSub;
+ compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileArithClz32()
+ {
+ if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
+ LValue operand = lowInt32(m_node->child1());
+ setInt32(m_out.ctlz32(operand));
+ return;
+ }
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
+ setInt32(result);
+ }
+
+ void compileArithMul()
+ {
+ switch (m_node->binaryUseKind()) {
+ case Int32Use: {
+ LValue left = lowInt32(m_node->child1());
+ LValue right = lowInt32(m_node->child2());
+
+ LValue result;
+
+ if (!shouldCheckOverflow(m_node->arithMode()))
+ result = m_out.mul(left, right);
+ else {
+ CheckValue* speculation = m_out.speculateMul(left, right);
+ blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
+ result = speculation;
+ }
+
+ if (shouldCheckNegativeZero(m_node->arithMode())) {
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.notZero32(result), usually(continuation), rarely(slowCase));
+
+ LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
+ speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
+ speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ setInt32(result);
+ break;
+ }
+
+ case Int52RepUse: {
+ Int52Kind kind;
+ LValue left = lowWhicheverInt52(m_node->child1(), kind);
+ LValue right = lowInt52(m_node->child2(), opposite(kind));
+
+ CheckValue* result = m_out.speculateMul(left, right);
+ blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
+
+ if (shouldCheckNegativeZero(m_node->arithMode())) {
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.notZero64(result), usually(continuation), rarely(slowCase));
+
+ LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
+ speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
+ speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ setInt52(result);
+ break;
+ }
+
+ case DoubleRepUse: {
+ setDouble(
+ m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
+ break;
+ }
+
+ case UntypedUse: {
+ ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
+ JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
+ auto repatchingFunction = operationValueMulOptimize;
+ auto nonRepatchingFunction = operationValueMul;
+ compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileArithDiv()
+ {
+ switch (m_node->binaryUseKind()) {
+ case Int32Use: {
+ LValue numerator = lowInt32(m_node->child1());
+ LValue denominator = lowInt32(m_node->child2());
+
+ if (shouldCheckNegativeZero(m_node->arithMode())) {
+ LBasicBlock zeroNumerator = m_out.newBlock();
+ LBasicBlock numeratorContinuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.isZero32(numerator),
+ rarely(zeroNumerator), usually(numeratorContinuation));
+
+ LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
+
+ speculate(
+ NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
+
+ m_out.jump(numeratorContinuation);
+
+ m_out.appendTo(numeratorContinuation, innerLastNext);
+ }
+
+ if (shouldCheckOverflow(m_node->arithMode())) {
+ LBasicBlock unsafeDenominator = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
+ m_out.branch(
+ m_out.above(adjustedDenominator, m_out.int32One),
+ usually(continuation), rarely(unsafeDenominator));
+
+ LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
+ LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
+ speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
+ speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.div(numerator, denominator);
+ speculate(
+ Overflow, noValue(), 0,
+ m_out.notEqual(m_out.mul(result, denominator), numerator));
+ setInt32(result);
+ } else
+ setInt32(m_out.chillDiv(numerator, denominator));
+
+ break;
+ }
+
+ case DoubleRepUse: {
+ setDouble(m_out.doubleDiv(
+ lowDouble(m_node->child1()), lowDouble(m_node->child2())));
+ break;
+ }
+
+ case UntypedUse: {
+ emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileArithMod()
+ {
+ switch (m_node->binaryUseKind()) {
+ case Int32Use: {
+ LValue numerator = lowInt32(m_node->child1());
+ LValue denominator = lowInt32(m_node->child2());
+
+ LValue remainder;
+ if (shouldCheckOverflow(m_node->arithMode())) {
+ LBasicBlock unsafeDenominator = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
+ m_out.branch(
+ m_out.above(adjustedDenominator, m_out.int32One),
+ usually(continuation), rarely(unsafeDenominator));
+
+ LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
+ LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
+ speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
+ speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.mod(numerator, denominator);
+ remainder = result;
+ } else
+ remainder = m_out.chillMod(numerator, denominator);
+
+ if (shouldCheckNegativeZero(m_node->arithMode())) {
+ LBasicBlock negativeNumerator = m_out.newBlock();
+ LBasicBlock numeratorContinuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.lessThan(numerator, m_out.int32Zero),
+ unsure(negativeNumerator), unsure(numeratorContinuation));
+
+ LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
+
+ speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
+
+ m_out.jump(numeratorContinuation);
+
+ m_out.appendTo(numeratorContinuation, innerLastNext);
+ }
+
+ setInt32(remainder);
+ break;
+ }
+
+ case DoubleRepUse: {
+ setDouble(
+ m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileArithMinOrMax()
+ {
+ switch (m_node->binaryUseKind()) {
+ case Int32Use: {
+ LValue left = lowInt32(m_node->child1());
+ LValue right = lowInt32(m_node->child2());
+
+ setInt32(
+ m_out.select(
+ m_node->op() == ArithMin
+ ? m_out.lessThan(left, right)
+ : m_out.lessThan(right, left),
+ left, right));
+ break;
+ }
+
+ case DoubleRepUse: {
+ LValue left = lowDouble(m_node->child1());
+ LValue right = lowDouble(m_node->child2());
+
+ LBasicBlock notLessThan = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 2> results;
+
+ results.append(m_out.anchor(left));
+ m_out.branch(
+ m_node->op() == ArithMin
+ ? m_out.doubleLessThan(left, right)
+ : m_out.doubleGreaterThan(left, right),
+ unsure(continuation), unsure(notLessThan));
+
+ LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
+ results.append(m_out.anchor(m_out.select(
+ m_node->op() == ArithMin
+ ? m_out.doubleGreaterThanOrEqual(left, right)
+ : m_out.doubleLessThanOrEqual(left, right),
+ right, m_out.constDouble(PNaN))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setDouble(m_out.phi(Double, results));
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileArithAbs()
+ {
+ switch (m_node->child1().useKind()) {
+ case Int32Use: {
+ LValue value = lowInt32(m_node->child1());
+
+ LValue mask = m_out.aShr(value, m_out.constInt32(31));
+ LValue result = m_out.bitXor(mask, m_out.add(mask, value));
+
+ if (shouldCheckOverflow(m_node->arithMode()))
+ speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
+
+ setInt32(result);
+ break;
+ }
+
+ case DoubleRepUse: {
+ setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
+ break;
+ }
+
+ default: {
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
+ setDouble(result);
+ break;
+ }
+ }
+ }
+
+ void compileArithSin()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.doubleSin(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithSin), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithCos()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.doubleCos(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithCos), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithTan()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.doubleTan(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithTan), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithPow()
+ {
+ if (m_node->child2().useKind() == Int32Use)
+ setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
+ else {
+ LValue base = lowDouble(m_node->child1());
+ LValue exponent = lowDouble(m_node->child2());
+
+ LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
+ LBasicBlock integerExponentPowBlock = m_out.newBlock();
+ LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
+ LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
+ LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
+ LBasicBlock testExponentIsOneHalf = m_out.newBlock();
+ LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
+ LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
+ LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
+ LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
+ LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
+ LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
+ LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
+ LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
+ LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
+ LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
+ LBasicBlock powBlock = m_out.newBlock();
+ LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue integerExponent = m_out.doubleToInt(exponent);
+ LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
+ LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
+ m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
+
+ LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
+ LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
+ m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
+
+ m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
+ ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
+ m_out.jump(continuation);
+
+ // If y is NaN, the result is NaN.
+ m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
+ LValue exponentIsNaN;
+ if (provenType(m_node->child2()) & SpecDoubleNaN)
+ exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
+ else
+ exponentIsNaN = m_out.booleanFalse;
+ m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
+
+ // If abs(x) is 1 and y is +infinity, the result is NaN.
+ // If abs(x) is 1 and y is -infinity, the result is NaN.
+
+ // Test if base == 1.
+ m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
+ LValue absoluteBase = m_out.doubleAbs(base);
+ LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
+ m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
+
+ // Test if abs(y) == Infinity.
+ m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
+ LValue absoluteExponent = m_out.doubleAbs(exponent);
+ LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
+
+ // If y == 0.5 or y == -0.5, handle it through SQRT.
+ // We have be carefuly with -0 and -Infinity.
+
+ // Test if y == 0.5
+ m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
+ LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
+ m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
+
+ // Handle x == -0.
+ m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
+ LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
+ ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
+ m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
+
+ // Test if abs(x) == Infinity.
+ m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
+ LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
+
+ // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
+ m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
+ ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
+ m_out.jump(continuation);
+
+ // The exponent is 0.5, the base is infinite, the result is always infinite.
+ m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
+ ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.jump(continuation);
+
+ // Test if y == -0.5
+ m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
+ LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
+ m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
+
+ // Handle x == -0.
+ m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
+ LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
+ m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
+
+ m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
+ ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.jump(continuation);
+
+ // Test if abs(x) == Infinity.
+ m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
+ LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
+ m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
+
+ // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
+ m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
+ LValue sqrtBase = m_out.doubleSqrt(base);
+ ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
+ m_out.jump(continuation);
+
+ // The exponent is -0.5, the base is infinite, the result is always zero.
+ m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
+ ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
+ m_out.jump(continuation);
+
+ m_out.appendTo(powBlock, nanExceptionResultIsNaN);
+ ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
+ m_out.jump(continuation);
+
+ m_out.appendTo(nanExceptionResultIsNaN, continuation);
+ ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
+ }
+ }
+
+ void compileArithRandom()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ // Inlined WeakRandom::advance().
+ // uint64_t x = m_low;
+ void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
+ LValue low = m_out.load64(m_out.absolute(lowAddress));
+ // uint64_t y = m_high;
+ void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
+ LValue high = m_out.load64(m_out.absolute(highAddress));
+ // m_low = y;
+ m_out.store64(high, m_out.absolute(lowAddress));
+
+ // x ^= x << 23;
+ LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
+
+ // x ^= x >> 17;
+ LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
+
+ // x ^= y ^ (y >> 26);
+ LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
+
+ // m_high = x;
+ m_out.store64(phase3, m_out.absolute(highAddress));
+
+ // return x + y;
+ LValue random64 = m_out.add(phase3, high);
+
+ // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
+ LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
+
+ LValue double53Integer = m_out.intToDouble(random53);
+
+ // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
+ // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
+ static const double scale = 1.0 / (1ULL << 53);
+
+ // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
+ // It just reduces the exp part of the given 53bit double integer.
+ // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
+ // Now we get 53bit precision random double value in [0, 1).
+ LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
+
+ setDouble(result);
+ }
+
+ void compileArithRound()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ LValue result = nullptr;
+ if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
+ LValue value = lowDouble(m_node->child1());
+ result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
+ } else {
+ LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue value = lowDouble(m_node->child1());
+ LValue integerValue = m_out.doubleCeil(value);
+ ValueFromBlock integerValueResult = m_out.anchor(integerValue);
+
+ LValue realPart = m_out.doubleSub(integerValue, value);
+
+ m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
+ LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
+ ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+
+ result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
+ }
+
+ if (producesInteger(m_node->arithRoundingMode())) {
+ LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
+ setInt32(integerValue);
+ } else
+ setDouble(result);
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
+ }
+
+ void compileArithFloor()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ LValue value = lowDouble(m_node->child1());
+ LValue integerValue = m_out.doubleFloor(value);
+ if (producesInteger(m_node->arithRoundingMode()))
+ setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
+ else
+ setDouble(integerValue);
+ return;
+ }
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
+ }
+
+ void compileArithCeil()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ LValue value = lowDouble(m_node->child1());
+ LValue integerValue = m_out.doubleCeil(value);
+ if (producesInteger(m_node->arithRoundingMode()))
+ setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
+ else
+ setDouble(integerValue);
+ return;
+ }
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
+ }
+
+ void compileArithTrunc()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ LValue value = lowDouble(m_node->child1());
+ LValue result = m_out.doubleTrunc(value);
+ if (producesInteger(m_node->arithRoundingMode()))
+ setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
+ else
+ setDouble(result);
+ return;
+ }
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ LValue argument = lowJSValue(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
+ }
+
+ void compileArithSqrt()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithLog()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.doubleLog(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithLog), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithFRound()
+ {
+ if (m_node->child1().useKind() == DoubleRepUse) {
+ setDouble(m_out.fround(lowDouble(m_node->child1())));
+ return;
+ }
+ LValue argument = lowJSValue(m_node->child1());
+ LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
+ setDouble(result);
+ }
+
+ void compileArithNegate()
+ {
+ switch (m_node->child1().useKind()) {
+ case Int32Use: {
+ LValue value = lowInt32(m_node->child1());
+
+ LValue result;
+ if (!shouldCheckOverflow(m_node->arithMode()))
+ result = m_out.neg(value);
+ else if (!shouldCheckNegativeZero(m_node->arithMode())) {
+ CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
+ blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
+ result = check;
+ } else {
+ speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
+ result = m_out.neg(value);
+ }
+
+ setInt32(result);
+ break;
+ }
+
+ case Int52RepUse: {
+ if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
+ Int52Kind kind;
+ LValue value = lowWhicheverInt52(m_node->child1(), kind);
+ LValue result = m_out.neg(value);
+ if (shouldCheckNegativeZero(m_node->arithMode()))
+ speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
+ setInt52(result, kind);
+ break;
+ }
+
+ LValue value = lowInt52(m_node->child1());
+ CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
+ blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
+ if (shouldCheckNegativeZero(m_node->arithMode()))
+ speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
+ setInt52(result);
+ break;
+ }
+
+ case DoubleRepUse: {
+ setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
+ break;
+ }
+
+ default:
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
+ ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
+ JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
+ auto repatchingFunction = operationArithNegateOptimize;
+ auto nonRepatchingFunction = operationArithNegate;
+ compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
+ break;
+ }
+ }
+
+ void compileBitAnd()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
+ return;
+ }
+ setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ }
+
+ void compileBitOr()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
+ return;
+ }
+ setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ }
+
+ void compileBitXor()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
+ return;
+ }
+ setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ }
+
+ void compileBitRShift()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
+ return;
+ }
+ setInt32(m_out.aShr(
+ lowInt32(m_node->child1()),
+ m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
+ }
+
+ void compileBitLShift()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
+ return;
+ }
+ setInt32(m_out.shl(
+ lowInt32(m_node->child1()),
+ m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
+ }
+
+ void compileBitURShift()
+ {
+ if (m_node->isBinaryUseKind(UntypedUse)) {
+ emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
+ return;
+ }
+ setInt32(m_out.lShr(
+ lowInt32(m_node->child1()),
+ m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
+ }
+
+ void compileUInt32ToNumber()
+ {
+ LValue value = lowInt32(m_node->child1());
+
+ if (doesOverflow(m_node->arithMode())) {
+ setStrictInt52(m_out.zeroExtPtr(value));
+ return;
+ }
+
+ speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
+ setInt32(value);
+ }
+
+ void compileCheckStructure()
+ {
+ ExitKind exitKind;
+ if (m_node->child1()->hasConstant())
+ exitKind = BadConstantCache;
+ else
+ exitKind = BadCache;
+
+ switch (m_node->child1().useKind()) {
+ case CellUse:
+ case KnownCellUse: {
+ LValue cell = lowCell(m_node->child1());
+
+ checkStructure(
+ m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
+ exitKind, m_node->structureSet(),
+ [&] (RegisteredStructure structure) {
+ return weakStructureID(structure);
+ });
+ return;
+ }
+
+ case CellOrOtherUse: {
+ LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+ checkStructure(
+ m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
+ exitKind, m_node->structureSet(),
+ [&] (RegisteredStructure structure) {
+ return weakStructureID(structure);
+ });
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+ FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+ }
+
+ void compileCheckCell()
+ {
+ LValue cell = lowCell(m_node->child1());
+
+ speculate(
+ BadCell, jsValueValue(cell), m_node->child1().node(),
+ m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
+ }
+
+ void compileCheckBadCell()
+ {
+ terminate(BadCell);
+ }
+
+ void compileCheckNotEmpty()
+ {
+ speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
+ }
+
+ void compileCheckStringIdent()
+ {
+ UniquedStringImpl* uid = m_node->uidOperand();
+ LValue stringImpl = lowStringIdent(m_node->child1());
+ speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
+ }
+
+ void compileGetExecutable()
+ {
+ LValue cell = lowCell(m_node->child1());
+ speculateFunction(m_node->child1(), cell);
+ setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
+ }
+
+ void compileArrayifyToStructure()
+ {
+ LValue cell = lowCell(m_node->child1());
+ LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
+
+ LBasicBlock unexpectedStructure = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
+
+ m_out.branch(
+ m_out.notEqual(structureID, weakStructureID(m_node->structure())),
+ rarely(unexpectedStructure), usually(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
+
+ if (property) {
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ speculate(
+ Uncountable, noValue(), 0,
+ m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
+ break;
+ case Array::Double:
+ vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
+ break;
+ case Array::Contiguous:
+ vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
+ break;
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage:
+ vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ break;
+ }
+
+ structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
+ speculate(
+ BadIndexingType, jsValueValue(cell), 0,
+ m_out.notEqual(structureID, weakStructureID(m_node->structure())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void compilePutStructure()
+ {
+ m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
+
+ RegisteredStructure oldStructure = m_node->transition()->previous;
+ RegisteredStructure newStructure = m_node->transition()->next;
+ ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
+ ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
+ ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
+
+ LValue cell = lowCell(m_node->child1());
+ m_out.store32(
+ weakStructureID(newStructure),
+ cell, m_heaps.JSCell_structureID);
+ }
+
+ void compileGetById(AccessType type)
+ {
+ ASSERT(type == AccessType::Get || type == AccessType::TryGet);
+ switch (m_node->child1().useKind()) {
+ case CellUse: {
+ setJSValue(getById(lowCell(m_node->child1()), type));
+ return;
+ }
+
+ case UntypedUse: {
+ // This is pretty weird, since we duplicate the slow path both here and in the
+ // code generated by the IC. We should investigate making this less bad.
+ // https://bugs.webkit.org/show_bug.cgi?id=127830
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+ ValueFromBlock cellResult = m_out.anchor(getById(value, type));
+ m_out.jump(continuation);
+
+ J_JITOperation_EJI getByIdFunction;
+ if (type == AccessType::Get)
+ getByIdFunction = operationGetByIdGeneric;
+ else
+ getByIdFunction = operationTryGetByIdGeneric;
+
+ m_out.appendTo(notCellCase, continuation);
+ ValueFromBlock notCellResult = m_out.anchor(vmCall(
+ Int64, m_out.operation(getByIdFunction),
+ m_callFrame, value,
+ m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, cellResult, notCellResult));
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+ }
+
+ void compileGetByIdWithThis()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue thisValue = lowJSValue(m_node->child2());
+ LValue result = vmCall(Int64, m_out.operation(operationGetByIdWithThis), m_callFrame, base, thisValue, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
+ setJSValue(result);
+ }
+
+ void compileGetByValWithThis()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue thisValue = lowJSValue(m_node->child2());
+ LValue subscript = lowJSValue(m_node->child3());
+
+ LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
+ setJSValue(result);
+ }
+
+ void compilePutByIdWithThis()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue thisValue = lowJSValue(m_node->child2());
+ LValue value = lowJSValue(m_node->child3());
+
+ vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
+ m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
+ }
+
+ void compilePutByValWithThis()
+ {
+ LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
+ LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
+ LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
+ LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
+
+ vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
+ m_callFrame, base, thisValue, property, value);
+ }
+
+ void compileDefineDataProperty()
+ {
+ LValue base = lowCell(m_graph.varArgChild(m_node, 0));
+ LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
+ LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
+ Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
+ switch (propertyEdge.useKind()) {
+ case StringUse: {
+ LValue property = lowString(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
+ break;
+ }
+ case StringIdentUse: {
+ LValue property = lowStringIdent(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
+ break;
+ }
+ case SymbolUse: {
+ LValue property = lowSymbol(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
+ break;
+ }
+ case UntypedUse: {
+ LValue property = lowJSValue(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void compileDefineAccessorProperty()
+ {
+ LValue base = lowCell(m_graph.varArgChild(m_node, 0));
+ LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
+ LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
+ LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
+ Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
+ switch (propertyEdge.useKind()) {
+ case StringUse: {
+ LValue property = lowString(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
+ break;
+ }
+ case StringIdentUse: {
+ LValue property = lowStringIdent(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
+ break;
+ }
+ case SymbolUse: {
+ LValue property = lowSymbol(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
+ break;
+ }
+ case UntypedUse: {
+ LValue property = lowJSValue(propertyEdge);
+ vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void compilePutById()
+ {
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse);
+
+ Node* node = m_node;
+ LValue base = lowCell(node->child1());
+ LValue value = lowJSValue(node->child2());
+ auto uid = m_graph.identifiers()[node->identifierNumber()];
+
+ B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
+ patchpoint->appendSomeRegister(base);
+ patchpoint->appendSomeRegister(value);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
+ // https://bugs.webkit.org/show_bug.cgi?id=152848
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+
+ State* state = &m_ftlState;
+ ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
+
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CallSiteIndex callSiteIndex =
+ state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ // JS setter call ICs generated by the PutById IC will need this.
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ auto generator = Box<JITPutByIdGenerator>::create(
+ jit.codeBlock(), node->origin.semantic, callSiteIndex,
+ params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
+ node->op() == PutByIdDirect ? Direct : NotDirect);
+
+ generator->generateFastPath(jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generator->slowPathJump().link(&jit);
+ CCallHelpers::Label slowPathBegin = jit.label();
+ CCallHelpers::Call slowPathCall = callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
+ CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
+ params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
+ jit.jump().linkTo(done, &jit);
+
+ generator->reportSlowPathCall(slowPathBegin, slowPathCall);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ generator->finalize(linkBuffer);
+ });
+ });
+ });
+ }
+
+ void compileGetButterfly()
+ {
+ setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
+ }
+
+ void compileConstantStoragePointer()
+ {
+ setStorage(m_out.constIntPtr(m_node->storagePointer()));
+ }
+
+ void compileGetIndexedPropertyStorage()
+ {
+ LValue cell = lowCell(m_node->child1());
+
+ if (m_node->arrayMode().type() == Array::String) {
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+
+ m_out.branch(
+ m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath));
+
+ LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
+
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()));
+ setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
+ }
+
+ void compileCheckArray()
+ {
+ Edge edge = m_node->child1();
+ LValue cell = lowCell(edge);
+
+ if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
+ return;
+
+ speculate(
+ BadIndexingType, jsValueValue(cell), 0,
+ m_out.logicalNot(isArrayType(cell, m_node->arrayMode())));
+ }
+
+ void compileGetTypedArrayByteOffset()
+ {
+ LValue basePtr = lowCell(m_node->child1());
+
+ LBasicBlock simpleCase = m_out.newBlock();
+ LBasicBlock wastefulCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
+ m_out.branch(
+ m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
+ unsure(simpleCase), unsure(wastefulCase));
+
+ LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
+
+ ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(wastefulCase, continuation);
+
+ LValue vectorPtr = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
+ LValue butterflyPtr = m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly);
+ LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
+ LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
+
+ ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
+
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+
+ setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, wastefulOut)));
+ }
+
+ void compileGetArrayLength()
+ {
+ switch (m_node->arrayMode().type()) {
+ case Array::Undecided:
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous: {
+ setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
+ return;
+ }
+
+ case Array::String: {
+ LValue string = lowCell(m_node->child1());
+ setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
+ return;
+ }
+
+ case Array::DirectArguments: {
+ LValue arguments = lowCell(m_node->child1());
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
+ setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
+ return;
+ }
+
+ case Array::ScopedArguments: {
+ LValue arguments = lowCell(m_node->child1());
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notZero32(m_out.load8ZeroExt32(arguments, m_heaps.ScopedArguments_overrodeThings)));
+ setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
+ return;
+ }
+
+ default:
+ if (m_node->arrayMode().isSomeTypedArrayView()) {
+ setInt32(
+ m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
+ return;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ return;
+ }
+ }
+
+ void compileCheckInBounds()
+ {
+ speculate(
+ OutOfBounds, noValue(), 0,
+ m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ }
+
+ void compileGetByVal()
+ {
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
+ m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
+
+ if (m_node->arrayMode().isInBounds()) {
+ LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
+ LValue isHole = m_out.isZero64(result);
+ if (m_node->arrayMode().isSaneChain()) {
+ DFG_ASSERT(
+ m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous);
+ result = m_out.select(
+ isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
+ } else
+ speculate(LoadFromHole, noValue(), 0, isHole);
+ setJSValue(result);
+ return;
+ }
+
+ LValue base = lowCell(m_node->child1());
+
+ LBasicBlock fastCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(fastCase));
+
+ LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
+
+ LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+ m_out.branch(
+ m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ return;
+ }
+
+ case Array::Double: {
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
+
+ if (m_node->arrayMode().isInBounds()) {
+ LValue result = m_out.loadDouble(
+ baseIndex(heap, storage, index, m_node->child2()));
+
+ if (!m_node->arrayMode().isSaneChain()) {
+ speculate(
+ LoadFromHole, noValue(), 0,
+ m_out.doubleNotEqualOrUnordered(result, result));
+ }
+ setDouble(result);
+ break;
+ }
+
+ LValue base = lowCell(m_node->child1());
+
+ LBasicBlock inBounds = m_out.newBlock();
+ LBasicBlock boxPath = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(inBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
+ LValue doubleValue = m_out.loadDouble(
+ baseIndex(heap, storage, index, m_node->child2()));
+ m_out.branch(
+ m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
+ rarely(slowCase), usually(boxPath));
+
+ m_out.appendTo(boxPath, slowCase);
+ ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ return;
+ }
+
+ case Array::Undecided: {
+ LValue index = lowInt32(m_node->child2());
+
+ speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
+ setJSValue(m_out.constInt64(ValueUndefined));
+ return;
+ }
+
+ case Array::DirectArguments: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.aboveOrEqual(
+ index,
+ m_out.load32NonNegative(base, m_heaps.DirectArguments_length)));
+
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
+ setJSValue(m_out.load64(address));
+ return;
+ }
+
+ case Array::ScopedArguments: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.aboveOrEqual(
+ index,
+ m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
+
+ LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
+ LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
+
+ LBasicBlock namedCase = m_out.newBlock();
+ LBasicBlock overflowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
+
+ LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
+
+ LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
+ LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
+
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
+ LValue scopeOffset = m_out.load32(address);
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
+
+ address = m_out.baseIndex(
+ m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset));
+ ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
+ m_out.jump(continuation);
+
+ m_out.appendTo(overflowCase, continuation);
+
+ address = m_out.baseIndex(
+ m_heaps.ScopedArguments_overflowStorage, base,
+ m_out.zeroExtPtr(m_out.sub(index, namedLength)));
+ LValue overflowValue = m_out.load64(address);
+ speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
+ ValueFromBlock overflowResult = m_out.anchor(overflowValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, namedResult, overflowResult));
+ return;
+ }
+
+ case Array::Generic: {
+ setJSValue(vmCall(
+ Int64, m_out.operation(operationGetByVal), m_callFrame,
+ lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
+ return;
+ }
+
+ case Array::String: {
+ compileStringCharAt();
+ return;
+ }
+
+ default: {
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ TypedArrayType type = m_node->arrayMode().typedArrayType();
+
+ if (isTypedView(type)) {
+ TypedPointer pointer = TypedPointer(
+ m_heaps.typedArrayProperties,
+ m_out.add(
+ storage,
+ m_out.shl(
+ m_out.zeroExtPtr(index),
+ m_out.constIntPtr(logElementSize(type)))));
+
+ if (isInt(type)) {
+ LValue result;
+ switch (elementSize(type)) {
+ case 1:
+ result = isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
+ break;
+ case 2:
+ result = isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
+ break;
+ case 4:
+ result = m_out.load32(pointer);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad element size");
+ }
+
+ if (elementSize(type) < 4 || isSigned(type)) {
+ setInt32(result);
+ return;
+ }
+
+ if (m_node->shouldSpeculateInt32()) {
+ speculate(
+ Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
+ setInt32(result);
+ return;
+ }
+
+ if (m_node->shouldSpeculateAnyInt()) {
+ setStrictInt52(m_out.zeroExt(result, Int64));
+ return;
+ }
+
+ setDouble(m_out.unsignedToDouble(result));
+ return;
+ }
+
+ ASSERT(isFloat(type));
+
+ LValue result;
+ switch (type) {
+ case TypeFloat32:
+ result = m_out.floatToDouble(m_out.loadFloat(pointer));
+ break;
+ case TypeFloat64:
+ result = m_out.loadDouble(pointer);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad typed array type");
+ }
+
+ setDouble(result);
+ return;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ return;
+ } }
+ }
+
+ void compileGetMyArgumentByVal()
+ {
+ InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+
+ LValue index = lowInt32(m_node->child2());
+ if (m_node->numberOfArgumentsToSkip())
+ index = m_out.add(index, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
+
+ LValue limit;
+ if (inlineCallFrame && !inlineCallFrame->isVarargs())
+ limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1);
+ else {
+ VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
+ limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
+ }
+
+ LValue isOutOfBounds = m_out.aboveOrEqual(index, limit);
+ LBasicBlock continuation = nullptr;
+ LBasicBlock lastNext = nullptr;
+ ValueFromBlock slowResult;
+ if (m_node->op() == GetMyArgumentByValOutOfBounds) {
+ LBasicBlock normalCase = m_out.newBlock();
+ continuation = m_out.newBlock();
+
+ slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
+ m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
+
+ lastNext = m_out.appendTo(normalCase, continuation);
+ } else
+ speculate(ExoticObjectMode, noValue(), 0, isOutOfBounds);
+
+ TypedPointer base;
+ if (inlineCallFrame) {
+ if (inlineCallFrame->arguments.size() > 1)
+ base = addressFor(inlineCallFrame->arguments[1].virtualRegister());
+ } else
+ base = addressFor(virtualRegisterForArgument(1));
+
+ LValue result;
+ if (base) {
+ LValue pointer = m_out.baseIndex(
+ base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
+ result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
+ } else
+ result = m_out.constInt64(JSValue::encode(jsUndefined()));
+
+ if (m_node->op() == GetMyArgumentByValOutOfBounds) {
+ ValueFromBlock normalResult = m_out.anchor(result);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ result = m_out.phi(Int64, slowResult, normalResult);
+ }
+
+ setJSValue(result);
+ }
+
+ void compilePutByVal()
+ {
+ Edge child1 = m_graph.varArgChild(m_node, 0);
+ Edge child2 = m_graph.varArgChild(m_node, 1);
+ Edge child3 = m_graph.varArgChild(m_node, 2);
+ Edge child4 = m_graph.varArgChild(m_node, 3);
+ Edge child5 = m_graph.varArgChild(m_node, 4);
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Generic: {
+ V_JITOperation_EJJJ operation;
+ if (m_node->op() == PutByValDirect) {
+ if (m_graph.isStrictModeFor(m_node->origin.semantic))
+ operation = operationPutByValDirectStrict;
+ else
+ operation = operationPutByValDirectNonStrict;
+ } else {
+ if (m_graph.isStrictModeFor(m_node->origin.semantic))
+ operation = operationPutByValStrict;
+ else
+ operation = operationPutByValNonStrict;
+ }
+
+ vmCall(
+ Void, m_out.operation(operation), m_callFrame,
+ lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ LValue base = lowCell(child1);
+ LValue index = lowInt32(child2);
+ LValue storage = lowStorage(child4);
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous: {
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ LValue value = lowJSValue(child3, ManualOperandSpeculation);
+
+ if (m_node->arrayMode().type() == Array::Int32)
+ FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
+
+ TypedPointer elementPointer = m_out.baseIndex(
+ m_node->arrayMode().type() == Array::Int32 ?
+ m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
+ storage, m_out.zeroExtPtr(index), provenValue(child2));
+
+ if (m_node->op() == PutByValAlias) {
+ m_out.store64(value, elementPointer);
+ break;
+ }
+
+ contiguousPutByValOutOfBounds(
+ codeBlock()->isStrictMode()
+ ? operationPutByValBeyondArrayBoundsStrict
+ : operationPutByValBeyondArrayBoundsNonStrict,
+ base, storage, index, value, continuation);
+
+ m_out.store64(value, elementPointer);
+ break;
+ }
+
+ case Array::Double: {
+ LValue value = lowDouble(child3);
+
+ FTL_TYPE_CHECK(
+ doubleValue(value), child3, SpecDoubleReal,
+ m_out.doubleNotEqualOrUnordered(value, value));
+
+ TypedPointer elementPointer = m_out.baseIndex(
+ m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
+ provenValue(child2));
+
+ if (m_node->op() == PutByValAlias) {
+ m_out.storeDouble(value, elementPointer);
+ break;
+ }
+
+ contiguousPutByValOutOfBounds(
+ codeBlock()->isStrictMode()
+ ? operationPutDoubleByValBeyondArrayBoundsStrict
+ : operationPutDoubleByValBeyondArrayBoundsNonStrict,
+ base, storage, index, value, continuation);
+
+ m_out.storeDouble(value, elementPointer);
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ }
+
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, outerLastNext);
+ return;
+ }
+
+ default:
+ TypedArrayType type = m_node->arrayMode().typedArrayType();
+
+ if (isTypedView(type)) {
+ TypedPointer pointer = TypedPointer(
+ m_heaps.typedArrayProperties,
+ m_out.add(
+ storage,
+ m_out.shl(
+ m_out.zeroExt(index, pointerType()),
+ m_out.constIntPtr(logElementSize(type)))));
+
+ Output::StoreType storeType;
+ LValue valueToStore;
+
+ if (isInt(type)) {
+ LValue intValue;
+ switch (child3.useKind()) {
+ case Int52RepUse:
+ case Int32Use: {
+ if (child3.useKind() == Int32Use)
+ intValue = lowInt32(child3);
+ else
+ intValue = m_out.castToInt32(lowStrictInt52(child3));
+
+ if (isClamped(type)) {
+ ASSERT(elementSize(type) == 1);
+
+ LBasicBlock atLeastZero = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 2> intValues;
+ intValues.append(m_out.anchor(m_out.int32Zero));
+ m_out.branch(
+ m_out.lessThan(intValue, m_out.int32Zero),
+ unsure(continuation), unsure(atLeastZero));
+
+ LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
+
+ intValues.append(m_out.anchor(m_out.select(
+ m_out.greaterThan(intValue, m_out.constInt32(255)),
+ m_out.constInt32(255),
+ intValue)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ intValue = m_out.phi(Int32, intValues);
+ }
+ break;
+ }
+
+ case DoubleRepUse: {
+ LValue doubleValue = lowDouble(child3);
+
+ if (isClamped(type)) {
+ ASSERT(elementSize(type) == 1);
+
+ LBasicBlock atLeastZero = m_out.newBlock();
+ LBasicBlock withinRange = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 3> intValues;
+ intValues.append(m_out.anchor(m_out.int32Zero));
+ m_out.branch(
+ m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
+ unsure(continuation), unsure(atLeastZero));
+
+ LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
+ intValues.append(m_out.anchor(m_out.constInt32(255)));
+ m_out.branch(
+ m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
+ unsure(continuation), unsure(withinRange));
+
+ m_out.appendTo(withinRange, continuation);
+ intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ intValue = m_out.phi(Int32, intValues);
+ } else
+ intValue = doubleToInt32(doubleValue);
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ }
+
+ valueToStore = intValue;
+ switch (elementSize(type)) {
+ case 1:
+ storeType = Output::Store32As8;
+ break;
+ case 2:
+ storeType = Output::Store32As16;
+ break;
+ case 4:
+ storeType = Output::Store32;
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad element size");
+ }
+ } else /* !isInt(type) */ {
+ LValue value = lowDouble(child3);
+ switch (type) {
+ case TypeFloat32:
+ valueToStore = m_out.doubleToFloat(value);
+ storeType = Output::StoreFloat;
+ break;
+ case TypeFloat64:
+ valueToStore = value;
+ storeType = Output::StoreDouble;
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad typed array type");
+ }
+ }
+
+ if (m_node->arrayMode().isInBounds() || m_node->op() == PutByValAlias)
+ m_out.store(valueToStore, pointer, storeType);
+ else {
+ LBasicBlock isInBounds = m_out.newBlock();
+ LBasicBlock isOutOfBounds = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(index, lowInt32(child5)),
+ unsure(isOutOfBounds), unsure(isInBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
+ m_out.store(valueToStore, pointer, storeType);
+ m_out.jump(continuation);
+
+ m_out.appendTo(isOutOfBounds, continuation);
+ speculateTypedArrayIsNotNeutered(base);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ return;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ break;
+ }
+ }
+
+ void compilePutAccessorById()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue accessor = lowCell(m_node->child2());
+ auto uid = m_graph.identifiers()[m_node->identifierNumber()];
+ vmCall(
+ Void,
+ m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById),
+ m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
+ }
+
+ void compilePutGetterSetterById()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue getter = lowJSValue(m_node->child2());
+ LValue setter = lowJSValue(m_node->child3());
+ auto uid = m_graph.identifiers()[m_node->identifierNumber()];
+ vmCall(
+ Void, m_out.operation(operationPutGetterSetter),
+ m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
+
+ }
+
+ void compilePutAccessorByVal()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue subscript = lowJSValue(m_node->child2());
+ LValue accessor = lowCell(m_node->child3());
+ vmCall(
+ Void,
+ m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal),
+ m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
+ }
+
+ void compileArrayPush()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue storage = lowStorage(m_node->child3());
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous:
+ case Array::Double: {
+ LValue value;
+ Output::StoreType storeType;
+
+ if (m_node->arrayMode().type() != Array::Double) {
+ value = lowJSValue(m_node->child2(), ManualOperandSpeculation);
+ if (m_node->arrayMode().type() == Array::Int32) {
+ FTL_TYPE_CHECK(
+ jsValueValue(value), m_node->child2(), SpecInt32Only, isNotInt32(value));
+ }
+ storeType = Output::Store64;
+ } else {
+ value = lowDouble(m_node->child2());
+ FTL_TYPE_CHECK(
+ doubleValue(value), m_node->child2(), SpecDoubleReal,
+ m_out.doubleNotEqualOrUnordered(value, value));
+ storeType = Output::StoreDouble;
+ }
+
+ IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
+
+ LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
+
+ LBasicBlock fastPath = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(
+ prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
+ unsure(slowPath), unsure(fastPath));
+
+ LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
+ m_out.store(
+ value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
+ LValue newLength = m_out.add(prevLength, m_out.int32One);
+ m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
+
+ ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue operation;
+ if (m_node->arrayMode().type() != Array::Double)
+ operation = m_out.operation(operationArrayPush);
+ else
+ operation = m_out.operation(operationArrayPushDouble);
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int64, operation, m_callFrame, value, base));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ return;
+ }
+ }
+
+ void compileArraySlice()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ LValue sourceStorage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
+ LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
+
+ LValue endBoundary;
+ if (m_node->numChildren() == 3)
+ endBoundary = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
+ else {
+ endBoundary = lowInt32(m_graph.varArgChild(m_node, 2));
+ endBoundary = m_out.select(m_out.greaterThanOrEqual(endBoundary, m_out.constInt32(0)),
+ m_out.select(m_out.above(endBoundary, inputLength), inputLength, endBoundary),
+ m_out.select(m_out.lessThan(m_out.add(inputLength, endBoundary), m_out.constInt32(0)), m_out.constInt32(0), m_out.add(inputLength, endBoundary)));
+ }
+
+ LValue startIndex = lowInt32(m_graph.varArgChild(m_node, 1));
+ startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.constInt32(0)),
+ m_out.select(m_out.above(startIndex, inputLength), inputLength, startIndex),
+ m_out.select(m_out.lessThan(m_out.add(inputLength, startIndex), m_out.constInt32(0)), m_out.constInt32(0), m_out.add(inputLength, startIndex)));
+
+ LValue resultLength = m_out.select(m_out.below(startIndex, endBoundary),
+ m_out.sub(endBoundary, startIndex),
+ m_out.constInt32(0));
+
+ ArrayValues arrayResult;
+ {
+ LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
+ indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllArrayTypesAndHistory));
+ // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
+ // to ensure the incoming array is one to be one of the original array structures
+ // with one of the following indexing shapes: Int32, Contiguous, Double.
+ LValue structure = m_out.select(
+ m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
+ weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
+ m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
+ weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
+ weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
+ arrayResult = allocateJSArray(resultLength, structure, indexingType, false, false);
+ }
+
+ LBasicBlock loop = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ resultLength = m_out.zeroExtPtr(resultLength);
+ ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
+ ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
+
+ m_out.branch(
+ m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(loop, continuation);
+ LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
+ LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
+ LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
+ m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
+ LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
+ m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
+ m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
+ m_out.branch(
+ m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+
+ mutatorFence();
+ setJSValue(arrayResult.array);
+ }
+
+ void compileArrayPop()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue storage = lowStorage(m_node->child2());
+
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous: {
+ IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
+
+ LBasicBlock fastCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
+
+ Vector<ValueFromBlock, 3> results;
+ results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
+ m_out.branch(
+ m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
+
+ LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
+ LValue newLength = m_out.sub(prevLength, m_out.int32One);
+ m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
+ TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
+ if (m_node->arrayMode().type() != Array::Double) {
+ LValue result = m_out.load64(pointer);
+ m_out.store64(m_out.int64Zero, pointer);
+ results.append(m_out.anchor(result));
+ m_out.branch(
+ m_out.notZero64(result), usually(continuation), rarely(slowCase));
+ } else {
+ LValue result = m_out.loadDouble(pointer);
+ m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
+ results.append(m_out.anchor(boxDouble(result)));
+ m_out.branch(
+ m_out.doubleEqual(result, result),
+ usually(continuation), rarely(slowCase));
+ }
+
+ m_out.appendTo(slowCase, continuation);
+ results.append(m_out.anchor(vmCall(
+ Int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, results));
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad array type");
+ return;
+ }
+ }
+
+ void compileCreateActivation()
+ {
+ LValue scope = lowCell(m_node->child1());
+ SymbolTable* table = m_node->castOperand<SymbolTable*>();
+ RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
+ JSValue initializationValue = m_node->initializationValueForActivation();
+ ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
+ if (table->singletonScope()->isStillValid()) {
+ LValue callResult = vmCall(
+ Int64,
+ m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
+ scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
+ setJSValue(callResult);
+ return;
+ }
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ LValue fastObject = allocateObject<JSLexicalEnvironment>(
+ JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
+
+ // We don't need memory barriers since we just fast-created the activation, so the
+ // activation must be young.
+ m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
+ m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
+
+ for (unsigned i = 0; i < table->scopeSize(); ++i) {
+ m_out.store64(
+ m_out.constInt64(JSValue::encode(initializationValue)),
+ fastObject, m_heaps.JSEnvironmentRecord_variables[i]);
+ }
+
+ mutatorFence();
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue callResult = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationCreateActivationDirect, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
+ CCallHelpers::TrustedImmPtr(table),
+ CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
+ },
+ scope);
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+ }
+
+ void compileNewFunction()
+ {
+ ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncFunction);
+ bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
+ bool isAsyncFunction = m_node->op() == NewAsyncFunction;
+
+ LValue scope = lowCell(m_node->child1());
+
+ FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
+ if (executable->singletonFunction()->isStillValid()) {
+ LValue callResult =
+ isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
+ isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
+ vmCall(Int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
+ setJSValue(callResult);
+ return;
+ }
+
+ RegisteredStructure structure = m_graph.registerStructure(
+ isGeneratorFunction ? m_graph.globalObjectFor(m_node->origin.semantic)->generatorFunctionStructure() :
+ isAsyncFunction ? m_graph.globalObjectFor(m_node->origin.semantic)->asyncFunctionStructure() :
+ m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure());
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ LValue fastObject =
+ isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
+ allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
+
+
+ // We don't need memory barriers since we just fast-created the function, so it
+ // must be young.
+ m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
+ m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
+ m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
+
+ mutatorFence();
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ Vector<LValue> slowPathArguments;
+ slowPathArguments.append(scope);
+ LValue callResult = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ if (isGeneratorFunction) {
+ return createLazyCallGenerator(
+ operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint,
+ locations[0].directGPR(), locations[1].directGPR(),
+ CCallHelpers::TrustedImmPtr(executable));
+ }
+ if (isAsyncFunction) {
+ return createLazyCallGenerator(
+ operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint,
+ locations[0].directGPR(), locations[1].directGPR(),
+ CCallHelpers::TrustedImmPtr(executable));
+ }
+ return createLazyCallGenerator(
+ operationNewFunctionWithInvalidatedReallocationWatchpoint,
+ locations[0].directGPR(), locations[1].directGPR(),
+ CCallHelpers::TrustedImmPtr(executable));
+ },
+ slowPathArguments);
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+ }
+
+ void compileCreateDirectArguments()
+ {
+ // FIXME: A more effective way of dealing with the argument count and callee is to have
+ // them be explicit arguments to this node.
+ // https://bugs.webkit.org/show_bug.cgi?id=142207
+
+ RegisteredStructure structure =
+ m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
+
+ unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ ArgumentsLength length = getArgumentsLength();
+
+ LValue fastObject;
+ if (length.isKnown) {
+ fastObject = allocateObject<DirectArguments>(
+ DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
+ m_out.intPtrZero, slowPath);
+ } else {
+ LValue size = m_out.add(
+ m_out.shl(length.value, m_out.constInt32(3)),
+ m_out.constInt32(DirectArguments::storageOffset()));
+
+ size = m_out.select(
+ m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
+ size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
+
+ fastObject = allocateVariableSizedObject<DirectArguments>(
+ m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
+ }
+
+ m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
+ m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
+ m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
+ m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue callResult = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationCreateDirectArguments, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
+ CCallHelpers::TrustedImm32(minCapacity));
+ }, length.value);
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(pointerType(), fastResult, slowResult);
+
+ m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
+
+ if (length.isKnown) {
+ VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
+ for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
+ m_out.store64(
+ m_out.load64(addressFor(start + i)),
+ result, m_heaps.DirectArguments_storage[i]);
+ }
+ } else {
+ LValue stackBase = getArgumentsStart();
+
+ LBasicBlock loop = m_out.newBlock();
+ LBasicBlock end = m_out.newBlock();
+
+ ValueFromBlock originalLength;
+ if (minCapacity) {
+ LValue capacity = m_out.select(
+ m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
+ length.value,
+ m_out.constInt32(minCapacity));
+ LValue originalLengthValue = m_out.zeroExtPtr(capacity);
+ originalLength = m_out.anchor(originalLengthValue);
+ m_out.jump(loop);
+ } else {
+ LValue originalLengthValue = m_out.zeroExtPtr(length.value);
+ originalLength = m_out.anchor(originalLengthValue);
+ m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
+ }
+
+ lastNext = m_out.appendTo(loop, end);
+ LValue previousIndex = m_out.phi(pointerType(), originalLength);
+ LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
+ m_out.store64(
+ m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
+ m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
+ ValueFromBlock nextIndex = m_out.anchor(index);
+ m_out.addIncomingToPhi(previousIndex, nextIndex);
+ m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
+
+ m_out.appendTo(end, lastNext);
+ }
+
+ mutatorFence();
+
+ setJSValue(result);
+ }
+
+ void compileCreateScopedArguments()
+ {
+ LValue scope = lowCell(m_node->child1());
+
+ LValue result = vmCall(
+ Int64, m_out.operation(operationCreateScopedArguments), m_callFrame,
+ weakPointer(
+ m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
+ getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
+
+ setJSValue(result);
+ }
+
+ void compileCreateClonedArguments()
+ {
+ LValue result = vmCall(
+ Int64, m_out.operation(operationCreateClonedArguments), m_callFrame,
+ weakPointer(
+ m_graph.globalObjectFor(m_node->origin.semantic)->clonedArgumentsStructure()),
+ getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
+
+ setJSValue(result);
+ }
+
+ void compileCreateRest()
+ {
+ if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
+ LBasicBlock continuation = m_out.newBlock();
+ LValue arrayLength = lowInt32(m_node->child1());
+ LBasicBlock loopStart = m_out.newBlock();
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ RegisteredStructure structure = m_graph.registerStructure(globalObject->restParameterStructure());
+ ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
+ LValue array = arrayValues.array;
+ LValue butterfly = arrayValues.butterfly;
+ ValueFromBlock startLength = m_out.anchor(arrayLength);
+ LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
+ m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
+ unsure(continuation), unsure(loopStart));
+
+ LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
+ LValue phiOffset = m_out.phi(Int32, startLength);
+ LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
+ m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
+ LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
+ IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
+ m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
+ m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
+
+ m_out.appendTo(continuation, lastNext);
+ mutatorFence();
+ setJSValue(array);
+ return;
+ }
+
+ LValue arrayLength = lowInt32(m_node->child1());
+ LValue argumentStart = getArgumentsStart();
+ LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
+ setJSValue(vmCall(
+ Int64, m_out.operation(operationCreateRest), m_callFrame, argumentStart, numberOfArgumentsToSkip, arrayLength));
+ }
+
+ void compileGetRestLength()
+ {
+ LBasicBlock nonZeroLength = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
+
+ LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
+ LValue argumentsLength = getArgumentsLength().value;
+ m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
+ unsure(nonZeroLength), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
+ ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
+ }
+
+ void compileNewObject()
+ {
+ setJSValue(allocateObject(m_node->structure()));
+ mutatorFence();
+ }
+
+ void compileNewArray()
+ {
+ // First speculate appropriately on all of the children. Do this unconditionally up here
+ // because some of the slow paths may otherwise forget to do it. It's sort of arguable
+ // that doing the speculations up here might be unprofitable for RA - so we can consider
+ // sinking this to below the allocation fast path if we find that this has a lot of
+ // register pressure.
+ for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
+ speculate(m_graph.varArgChild(m_node, operandIndex));
+
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+ m_node->indexingType()));
+
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
+ unsigned numElements = m_node->numChildren();
+
+ ArrayValues arrayValues =
+ allocateUninitializedContiguousJSArray(m_out.constInt32(numElements), structure);
+
+ for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
+ Edge edge = m_graph.varArgChild(m_node, operandIndex);
+
+ switch (m_node->indexingType()) {
+ case ALL_BLANK_INDEXING_TYPES:
+ case ALL_UNDECIDED_INDEXING_TYPES:
+ DFG_CRASH(m_graph, m_node, "Bad indexing type");
+ break;
+
+ case ALL_DOUBLE_INDEXING_TYPES:
+ m_out.storeDouble(
+ lowDouble(edge),
+ arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
+ break;
+
+ case ALL_INT32_INDEXING_TYPES:
+ case ALL_CONTIGUOUS_INDEXING_TYPES:
+ m_out.store64(
+ lowJSValue(edge, ManualOperandSpeculation),
+ arrayValues.butterfly,
+ m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
+ break;
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
+ break;
+ }
+ }
+
+ setJSValue(arrayValues.array);
+ mutatorFence();
+ return;
+ }
+
+ if (!m_node->numChildren()) {
+ setJSValue(vmCall(
+ Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
+ weakStructure(structure)));
+ return;
+ }
+
+ size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
+ ASSERT(scratchSize);
+ ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+
+ for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
+ Edge edge = m_graph.varArgChild(m_node, operandIndex);
+ m_out.store64(
+ lowJSValue(edge, ManualOperandSpeculation),
+ m_out.absolute(buffer + operandIndex));
+ }
+
+ m_out.storePtr(
+ m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->activeLengthPtr()));
+
+ LValue result = vmCall(
+ Int64, m_out.operation(operationNewArray), m_callFrame,
+ weakStructure(structure), m_out.constIntPtr(buffer),
+ m_out.constIntPtr(m_node->numChildren()));
+
+ m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->activeLengthPtr()));
+
+ setJSValue(result);
+ }
+
+ void compileNewArrayWithSpread()
+ {
+ if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
+ unsigned startLength = 0;
+ BitVector* bitVector = m_node->bitVector();
+ HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
+
+ for (unsigned i = 0; i < m_node->numChildren(); ++i) {
+ if (!bitVector->get(i))
+ ++startLength;
+ }
+
+ LValue length = m_out.constInt32(startLength);
+
+ for (unsigned i = 0; i < m_node->numChildren(); ++i) {
+ if (bitVector->get(i)) {
+ Edge use = m_graph.varArgChild(m_node, i);
+ CheckValue* lengthCheck = nullptr;
+ if (use->op() == PhantomSpread) {
+ RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
+ LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
+ }).iterator->value;
+ lengthCheck = m_out.speculateAdd(length, spreadLength);
+ } else {
+ LValue fixedArray = lowCell(use);
+ lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
+ }
+ blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
+ length = lengthCheck;
+ }
+ }
+
+ RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
+ ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
+ LValue result = arrayValues.array;
+ LValue storage = arrayValues.butterfly;
+ LValue index = m_out.constIntPtr(0);
+
+ for (unsigned i = 0; i < m_node->numChildren(); ++i) {
+ Edge use = m_graph.varArgChild(m_node, i);
+ if (bitVector->get(i)) {
+ if (use->op() == PhantomSpread) {
+ RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
+
+ LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
+ LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
+
+ LBasicBlock loopStart = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
+ ValueFromBlock arrayIndexStart = m_out.anchor(index);
+ ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
+
+ m_out.branch(
+ m_out.isZero64(length),
+ unsure(continuation), unsure(loopStart));
+
+ LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
+
+ LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
+ LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
+
+ LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
+ m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
+
+ LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
+ LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
+ ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
+
+ m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
+ m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
+
+ m_out.branch(
+ m_out.below(nextLoadIndex, length),
+ unsure(loopStart), unsure(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+ index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
+ } else {
+ LBasicBlock loopStart = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue fixedArray = lowCell(use);
+
+ ValueFromBlock fixedIndexStart = m_out.anchor(m_out.constIntPtr(0));
+ ValueFromBlock arrayIndexStart = m_out.anchor(index);
+ ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
+
+ LValue fixedArraySize = m_out.zeroExtPtr(m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
+
+ m_out.branch(
+ m_out.isZero64(fixedArraySize),
+ unsure(continuation), unsure(loopStart));
+
+ LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
+
+ LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
+ LValue fixedArrayIndex = m_out.phi(pointerType(), fixedIndexStart);
+
+ LValue item = m_out.load64(m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, fixedArrayIndex));
+ m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
+
+ LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
+ LValue nextFixedArrayIndex = m_out.add(fixedArrayIndex, m_out.constIntPtr(1));
+ ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
+
+ m_out.addIncomingToPhi(fixedArrayIndex, m_out.anchor(nextFixedArrayIndex));
+ m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
+
+ m_out.branch(
+ m_out.below(nextFixedArrayIndex, fixedArraySize),
+ unsure(loopStart), unsure(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+ index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
+ }
+ } else {
+ IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
+ LValue item = lowJSValue(use);
+ m_out.store64(item, m_out.baseIndex(heap, storage, index));
+ index = m_out.add(index, m_out.constIntPtr(1));
+ }
+ }
+
+ mutatorFence();
+ setJSValue(result);
+ return;
+ }
+
+ ASSERT(m_node->numChildren());
+ size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
+ ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+ BitVector* bitVector = m_node->bitVector();
+ for (unsigned i = 0; i < m_node->numChildren(); ++i) {
+ Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
+ LValue value;
+ if (bitVector->get(i))
+ value = lowCell(use);
+ else
+ value = lowJSValue(use);
+ m_out.store64(value, m_out.absolute(&buffer[i]));
+ }
+
+ m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->activeLengthPtr()));
+ LValue result = vmCall(Int64, m_out.operation(operationNewArrayWithSpreadSlow), m_callFrame, m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
+ m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->activeLengthPtr()));
+
+ setJSValue(result);
+ }
+
+ void compileSpread()
+ {
+ // It would be trivial to support this, but for now, we never create
+ // IR that would necessitate this. The reason is that Spread is only
+ // consumed by NewArrayWithSpread and never anything else. Also, any
+ // Spread(PhantomCreateRest) will turn into PhantomSpread(PhantomCreateRest).
+ RELEASE_ASSERT(m_node->child1()->op() != PhantomCreateRest);
+
+ LValue argument = lowCell(m_node->child1());
+
+ LValue result;
+ if (m_node->child1().useKind() == ArrayUse) {
+ speculateArray(m_node->child1());
+
+ LBasicBlock preLoop = m_out.newBlock();
+ LBasicBlock loopSelection = m_out.newBlock();
+ LBasicBlock contiguousLoopStart = m_out.newBlock();
+ LBasicBlock doubleLoopStart = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue indexingShape = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
+ indexingShape = m_out.bitAnd(indexingShape, m_out.constInt32(IndexingShapeMask));
+ LValue isOKIndexingType = m_out.belowOrEqual(
+ m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
+ m_out.constInt32(ContiguousShape - Int32Shape));
+
+ m_out.branch(isOKIndexingType, unsure(preLoop), unsure(slowPath));
+ LBasicBlock lastNext = m_out.appendTo(preLoop, loopSelection);
+
+ LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
+ LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
+ static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
+ LValue size = m_out.add(
+ m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
+ m_out.constIntPtr(JSFixedArray::offsetOfData()));
+
+ LValue fastAllocation = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowPath);
+ ValueFromBlock fastResult = m_out.anchor(fastAllocation);
+ m_out.store32(length, fastAllocation, m_heaps.JSFixedArray_size);
+
+ ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
+ ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
+
+ m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
+
+ m_out.appendTo(loopSelection, contiguousLoopStart);
+ m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
+ unsure(doubleLoopStart), unsure(contiguousLoopStart));
+
+ {
+ m_out.appendTo(contiguousLoopStart, doubleLoopStart);
+ LValue index = m_out.phi(pointerType(), startIndexForContiguous);
+
+ TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
+ LValue value = m_out.load64(loadSite);
+ value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
+ m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
+
+ LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
+ m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
+
+ m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
+ unsure(contiguousLoopStart), unsure(continuation));
+ }
+
+ {
+ m_out.appendTo(doubleLoopStart, slowPath);
+ LValue index = m_out.phi(pointerType(), startIndexForDouble);
+
+ LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
+ LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
+ LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
+ LValue normalResult = boxDouble(value);
+ value = m_out.select(isNaN, holeResult, normalResult);
+ m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
+
+ LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
+ m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
+
+ m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
+ unsure(doubleLoopStart), unsure(continuation));
+ }
+
+ m_out.appendTo(slowPath, continuation);
+ ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationSpreadFastArray), m_callFrame, argument));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ result = m_out.phi(Int64, fastResult, slowResult);
+ mutatorFence();
+ } else
+ result = vmCall(Int64, m_out.operation(operationSpreadGeneric), m_callFrame, argument);
+
+ setJSValue(result);
+ }
+
+ void compileNewArrayBuffer()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+ m_node->indexingType()));
+
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
+ unsigned numElements = m_node->numConstants();
+
+ ArrayValues arrayValues =
+ allocateUninitializedContiguousJSArray(m_out.constInt32(numElements), structure);
+
+ JSValue* data = codeBlock()->constantBuffer(m_node->startConstant());
+ for (unsigned index = 0; index < m_node->numConstants(); ++index) {
+ int64_t value;
+ if (hasDouble(m_node->indexingType()))
+ value = bitwise_cast<int64_t>(data[index].asNumber());
+ else
+ value = JSValue::encode(data[index]);
+
+ m_out.store64(
+ m_out.constInt64(value),
+ arrayValues.butterfly,
+ m_heaps.forIndexingType(m_node->indexingType())->at(index));
+ }
+
+ mutatorFence();
+ setJSValue(arrayValues.array);
+ return;
+ }
+
+ setJSValue(vmCall(
+ Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
+ weakStructure(structure), m_out.constIntPtr(m_node->startConstant()),
+ m_out.constIntPtr(m_node->numConstants())));
+ }
+
+ void compileNewArrayWithSize()
+ {
+ LValue publicLength = lowInt32(m_node->child1());
+
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
+ m_node->indexingType()));
+
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
+ IndexingType indexingType = m_node->indexingType();
+ setJSValue(
+ allocateJSArray(
+ publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
+ mutatorFence();
+ return;
+ }
+
+ LValue structureValue = m_out.select(
+ m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
+ weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
+ weakStructure(structure));
+ setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
+ }
+
+ void compileNewTypedArray()
+ {
+ TypedArrayType type = m_node->typedArrayType();
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ switch (m_node->child1().useKind()) {
+ case Int32Use: {
+ RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(type));
+
+ LValue size = lowInt32(m_node->child1());
+
+ LBasicBlock smallEnoughCase = m_out.newBlock();
+ LBasicBlock nonZeroCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
+
+ m_out.branch(
+ m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
+ rarely(slowCase), usually(smallEnoughCase));
+
+ LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, nonZeroCase);
+
+ m_out.branch(m_out.notZero32(size), usually(nonZeroCase), rarely(slowCase));
+
+ m_out.appendTo(nonZeroCase, slowCase);
+
+ LValue byteSize =
+ m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(type)));
+ if (elementSize(type) < 8) {
+ byteSize = m_out.bitAnd(
+ m_out.add(byteSize, m_out.constIntPtr(7)),
+ m_out.constIntPtr(~static_cast<intptr_t>(7)));
+ }
+
+ LValue allocator = allocatorForSize(vm().auxiliarySpace, byteSize, slowCase);
+ LValue storage = allocateHeapCell(allocator, slowCase);
+
+ splatWords(
+ storage,
+ m_out.int32Zero,
+ m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
+ m_out.int64Zero,
+ m_heaps.typedArrayProperties);
+
+ ValueFromBlock haveStorage = m_out.anchor(storage);
+
+ LValue fastResultValue =
+ allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase);
+
+ m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
+ m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
+ m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
+
+ mutatorFence();
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+ LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
+
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNewTypedArrayWithSizeForType(type), locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
+ locations[2].directGPR());
+ },
+ size, storageValue);
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+ return;
+ }
+
+ case UntypedUse: {
+ LValue argument = lowJSValue(m_node->child1());
+
+ LValue result = vmCall(
+ pointerType(), m_out.operation(operationNewTypedArrayWithOneArgumentForType(type)),
+ m_callFrame, weakPointer(globalObject->typedArrayStructureConcurrently(type)), argument);
+
+ setJSValue(result);
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+ }
+
+ void compileAllocatePropertyStorage()
+ {
+ LValue object = lowCell(m_node->child1());
+ setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
+ }
+
+ void compileReallocatePropertyStorage()
+ {
+ Transition* transition = m_node->transition();
+ LValue object = lowCell(m_node->child1());
+ LValue oldStorage = lowStorage(m_node->child2());
+
+ setStorage(
+ reallocatePropertyStorage(
+ object, oldStorage, transition->previous.get(), transition->next.get()));
+ }
+
+ void compileNukeStructureAndSetButterfly()
+ {
+ nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
+ }
+
+ void compileToNumber()
+ {
+ LValue value = lowJSValue(m_node->child1());
+
+ if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
+ setJSValue(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
+ else {
+ LBasicBlock notNumber = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock fastResult = m_out.anchor(value);
+ m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
+
+ // notNumber case.
+ LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
+ // We have several attempts to remove ToNumber. But ToNumber still exists.
+ // It means that converting non-numbers to numbers by this ToNumber is not rare.
+ // Instead of the lazy slow path generator, we call the operation here.
+ ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
+ m_out.jump(continuation);
+
+ // continuation case.
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ }
+ }
+
+ void compileToStringOrCallStringConstructor()
+ {
+ switch (m_node->child1().useKind()) {
+ case StringObjectUse: {
+ LValue cell = lowCell(m_node->child1());
+ speculateStringObjectForCell(m_node->child1(), cell);
+ m_interpreter.filter(m_node->child1(), SpecStringObject);
+
+ setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
+ return;
+ }
+
+ case StringOrStringObjectUse: {
+ LValue cell = lowCell(m_node->child1());
+ LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
+
+ LBasicBlock notString = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock simpleResult = m_out.anchor(cell);
+ m_out.branch(
+ m_out.equal(structureID, m_out.constInt32(vm().stringStructure->id())),
+ unsure(continuation), unsure(notString));
+
+ LBasicBlock lastNext = m_out.appendTo(notString, continuation);
+ speculateStringObjectForStructureID(m_node->child1(), structureID);
+ ValueFromBlock unboxedResult = m_out.anchor(
+ m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
+
+ m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
+ return;
+ }
+
+ case CellUse:
+ case NotCellUse:
+ case UntypedUse: {
+ LValue value;
+ if (m_node->child1().useKind() == CellUse)
+ value = lowCell(m_node->child1());
+ else if (m_node->child1().useKind() == NotCellUse)
+ value = lowNotCell(m_node->child1());
+ else
+ value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCell = m_out.newBlock();
+ LBasicBlock notString = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue isCellPredicate;
+ if (m_node->child1().useKind() == CellUse)
+ isCellPredicate = m_out.booleanTrue;
+ else if (m_node->child1().useKind() == NotCellUse)
+ isCellPredicate = m_out.booleanFalse;
+ else
+ isCellPredicate = this->isCell(value, provenType(m_node->child1()));
+ m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
+
+ LBasicBlock lastNext = m_out.appendTo(isCell, notString);
+ ValueFromBlock simpleResult = m_out.anchor(value);
+ LValue isStringPredicate;
+ if (m_node->child1()->prediction() & SpecString) {
+ isStringPredicate = isString(value, provenType(m_node->child1()));
+ } else
+ isStringPredicate = m_out.booleanFalse;
+ m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
+
+ m_out.appendTo(notString, continuation);
+ LValue operation;
+ if (m_node->child1().useKind() == CellUse)
+ operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
+ else
+ operation = m_out.operation(m_node->op() == ToString ? operationToString : operationCallStringConstructor);
+ ValueFromBlock convertedResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
+ return;
+ }
+
+ case Int32Use:
+ setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
+ return;
+
+ case Int52RepUse:
+ setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
+ return;
+
+ case DoubleRepUse:
+ setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
+ return;
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+ }
+
+ void compileToPrimitive()
+ {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock isObjectCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 3> results;
+
+ results.append(m_out.anchor(value));
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
+ results.append(m_out.anchor(value));
+ m_out.branch(
+ isObject(value, provenType(m_node->child1())),
+ unsure(isObjectCase), unsure(continuation));
+
+ m_out.appendTo(isObjectCase, continuation);
+ results.append(m_out.anchor(vmCall(
+ Int64, m_out.operation(operationToPrimitive), m_callFrame, value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, results));
+ }
+
+ void compileMakeRope()
+ {
+ LValue kids[3];
+ unsigned numKids;
+ kids[0] = lowCell(m_node->child1());
+ kids[1] = lowCell(m_node->child2());
+ if (m_node->child3()) {
+ kids[2] = lowCell(m_node->child3());
+ numKids = 3;
+ } else {
+ kids[2] = 0;
+ numKids = 2;
+ }
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ MarkedAllocator* allocator = subspaceFor<JSRopeString>(vm())->allocatorFor(sizeof(JSRopeString));
+ DFG_ASSERT(m_graph, m_node, allocator);
+
+ LValue result = allocateCell(
+ m_out.constIntPtr(allocator), vm().stringStructure.get(), slowPath);
+
+ m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSString_value);
+ for (unsigned i = 0; i < numKids; ++i)
+ m_out.storePtr(kids[i], result, m_heaps.JSRopeString_fibers[i]);
+ for (unsigned i = numKids; i < JSRopeString::s_maxInternalRopeLength; ++i)
+ m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSRopeString_fibers[i]);
+ LValue flags = m_out.load32(kids[0], m_heaps.JSString_flags);
+ LValue length = m_out.load32(kids[0], m_heaps.JSString_length);
+ for (unsigned i = 1; i < numKids; ++i) {
+ flags = m_out.bitAnd(flags, m_out.load32(kids[i], m_heaps.JSString_flags));
+ CheckValue* lengthCheck = m_out.speculateAdd(
+ length, m_out.load32(kids[i], m_heaps.JSString_length));
+ blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
+ length = lengthCheck;
+ }
+ m_out.store32(
+ m_out.bitAnd(m_out.constInt32(JSString::Is8Bit), flags),
+ result, m_heaps.JSString_flags);
+ m_out.store32(length, result, m_heaps.JSString_length);
+
+ mutatorFence();
+ ValueFromBlock fastResult = m_out.anchor(result);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ LValue slowResultValue;
+ switch (numKids) {
+ case 2:
+ slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
+ locations[2].directGPR());
+ }, kids[0], kids[1]);
+ break;
+ case 3:
+ slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
+ locations[2].directGPR(), locations[3].directGPR());
+ }, kids[0], kids[1], kids[2]);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad number of children");
+ break;
+ }
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ }
+
+ void compileStringCharAt()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ LBasicBlock fastPath = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(base, m_heaps.JSString_length)),
+ rarely(slowPath), usually(fastPath));
+
+ LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
+
+ LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
+
+ LBasicBlock is8Bit = m_out.newBlock();
+ LBasicBlock is16Bit = m_out.newBlock();
+ LBasicBlock bitsContinuation = m_out.newBlock();
+ LBasicBlock bigCharacter = m_out.newBlock();
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(is16Bit), unsure(is8Bit));
+
+ m_out.appendTo(is8Bit, is16Bit);
+
+ ValueFromBlock char8Bit = m_out.anchor(
+ m_out.load8ZeroExt32(m_out.baseIndex(
+ m_heaps.characters8, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))));
+ m_out.jump(bitsContinuation);
+
+ m_out.appendTo(is16Bit, bigCharacter);
+
+ LValue char16BitValue = m_out.load16ZeroExt32(
+ m_out.baseIndex(
+ m_heaps.characters16, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2())));
+ ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
+ m_out.branch(
+ m_out.aboveOrEqual(char16BitValue, m_out.constInt32(0x100)),
+ rarely(bigCharacter), usually(bitsContinuation));
+
+ m_out.appendTo(bigCharacter, bitsContinuation);
+
+ Vector<ValueFromBlock, 4> results;
+ results.append(m_out.anchor(vmCall(
+ Int64, m_out.operation(operationSingleCharacterString),
+ m_callFrame, char16BitValue)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(bitsContinuation, slowPath);
+
+ LValue character = m_out.phi(Int32, char8Bit, char16Bit);
+
+ LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
+
+ results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
+ m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ if (m_node->arrayMode().isInBounds()) {
+ speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
+ results.append(m_out.anchor(m_out.intPtrZero));
+ } else {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ bool prototypeChainIsSane = false;
+ if (globalObject->stringPrototypeChainIsSane()) {
+ // FIXME: This could be captured using a Speculation mode that means
+ // "out-of-bounds loads return a trivial value", something like
+ // SaneChainOutOfBounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=144668
+
+ m_graph.watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
+ m_graph.watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
+
+ prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
+ }
+ if (prototypeChainIsSane) {
+ LBasicBlock negativeIndex = m_out.newBlock();
+
+ results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
+ m_out.branch(
+ m_out.lessThan(index, m_out.int32Zero),
+ rarely(negativeIndex), usually(continuation));
+
+ m_out.appendTo(negativeIndex, continuation);
+ }
+
+ results.append(m_out.anchor(vmCall(
+ Int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
+ }
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, results));
+ }
+
+ void compileStringCharCodeAt()
+ {
+ LBasicBlock is8Bit = m_out.newBlock();
+ LBasicBlock is16Bit = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+
+ speculate(
+ Uncountable, noValue(), 0,
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(base, m_heaps.JSString_length)));
+
+ LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(is16Bit), unsure(is8Bit));
+
+ LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
+
+ ValueFromBlock char8Bit = m_out.anchor(
+ m_out.load8ZeroExt32(m_out.baseIndex(
+ m_heaps.characters8, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(is16Bit, continuation);
+
+ ValueFromBlock char16Bit = m_out.anchor(
+ m_out.load16ZeroExt32(m_out.baseIndex(
+ m_heaps.characters16, storage, m_out.zeroExtPtr(index),
+ provenValue(m_node->child2()))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ setInt32(m_out.phi(Int32, char8Bit, char16Bit));
+ }
+
+ void compileStringFromCharCode()
+ {
+ Edge childEdge = m_node->child1();
+
+ if (childEdge.useKind() == UntypedUse) {
+ LValue result = vmCall(
+ Int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame,
+ lowJSValue(childEdge));
+ setJSValue(result);
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use);
+
+ LValue value = lowInt32(childEdge);
+
+ LBasicBlock smallIntCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.aboveOrEqual(value, m_out.constInt32(0xff)),
+ rarely(slowCase), usually(smallIntCase));
+
+ LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
+
+ LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
+ LValue fastResultValue = m_out.loadPtr(
+ m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+
+ LValue slowResultValue = vmCall(
+ pointerType(), m_out.operation(operationStringFromCharCode), m_callFrame, value);
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ setJSValue(m_out.phi(Int64, fastResult, slowResult));
+ }
+
+ void compileGetByOffset()
+ {
+ StorageAccessData& data = m_node->storageAccessData();
+
+ setJSValue(loadProperty(
+ lowStorage(m_node->child1()), data.identifierNumber, data.offset));
+ }
+
+ void compileGetGetter()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
+ }
+
+ void compileGetSetter()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
+ }
+
+ void compileMultiGetByOffset()
+ {
+ LValue base = lowCell(m_node->child1());
+
+ MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
+
+ if (data.cases.isEmpty()) {
+ // Protect against creating a Phi function with zero inputs. LLVM didn't like that.
+ // It's not clear if this is needed anymore.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=154382
+ terminate(BadCache);
+ return;
+ }
+
+ Vector<LBasicBlock, 2> blocks(data.cases.size());
+ for (unsigned i = data.cases.size(); i--;)
+ blocks[i] = m_out.newBlock();
+ LBasicBlock exit = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<SwitchCase, 2> cases;
+ RegisteredStructureSet baseSet;
+ for (unsigned i = data.cases.size(); i--;) {
+ MultiGetByOffsetCase getCase = data.cases[i];
+ for (unsigned j = getCase.set().size(); j--;) {
+ RegisteredStructure structure = getCase.set()[j];
+ baseSet.add(structure);
+ cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
+ }
+ }
+ m_out.switchInstruction(
+ m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
+
+ LBasicBlock lastNext = m_out.m_nextBlock;
+
+ Vector<ValueFromBlock, 2> results;
+ for (unsigned i = data.cases.size(); i--;) {
+ MultiGetByOffsetCase getCase = data.cases[i];
+ GetByOffsetMethod method = getCase.method();
+
+ m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
+
+ LValue result;
+
+ switch (method.kind()) {
+ case GetByOffsetMethod::Invalid:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ case GetByOffsetMethod::Constant:
+ result = m_out.constInt64(JSValue::encode(method.constant()->value()));
+ break;
+
+ case GetByOffsetMethod::Load:
+ case GetByOffsetMethod::LoadFromPrototype: {
+ LValue propertyBase;
+ if (method.kind() == GetByOffsetMethod::Load)
+ propertyBase = base;
+ else
+ propertyBase = weakPointer(method.prototype()->value().asCell());
+ if (!isInlineOffset(method.offset()))
+ propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
+ result = loadProperty(
+ propertyBase, data.identifierNumber, method.offset());
+ break;
+ } }
+
+ results.append(m_out.anchor(result));
+ m_out.jump(continuation);
+ }
+
+ m_out.appendTo(exit, continuation);
+ if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet))
+ speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
+ m_out.unreachable();
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, results));
+ }
+
+ void compilePutByOffset()
+ {
+ StorageAccessData& data = m_node->storageAccessData();
+
+ storeProperty(
+ lowJSValue(m_node->child3()),
+ lowStorage(m_node->child1()), data.identifierNumber, data.offset);
+ }
+
+ void compileMultiPutByOffset()
+ {
+ LValue base = lowCell(m_node->child1());
+ LValue value = lowJSValue(m_node->child2());
+
+ MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
+
+ Vector<LBasicBlock, 2> blocks(data.variants.size());
+ for (unsigned i = data.variants.size(); i--;)
+ blocks[i] = m_out.newBlock();
+ LBasicBlock exit = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<SwitchCase, 2> cases;
+ RegisteredStructureSet baseSet;
+ for (unsigned i = data.variants.size(); i--;) {
+ PutByIdVariant variant = data.variants[i];
+ for (unsigned j = variant.oldStructure().size(); j--;) {
+ RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
+ baseSet.add(structure);
+ cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
+ }
+ }
+ m_out.switchInstruction(
+ m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
+
+ LBasicBlock lastNext = m_out.m_nextBlock;
+
+ for (unsigned i = data.variants.size(); i--;) {
+ m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
+
+ PutByIdVariant variant = data.variants[i];
+
+ checkInferredType(m_node->child2(), value, variant.requiredType());
+
+ LValue storage;
+ if (variant.kind() == PutByIdVariant::Replace) {
+ if (isInlineOffset(variant.offset()))
+ storage = base;
+ else
+ storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
+ } else {
+ DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition);
+ m_graph.m_plan.transitions.addLazily(
+ codeBlock(), m_node->origin.semantic.codeOriginOwner(),
+ variant.oldStructureForTransition(), variant.newStructure());
+
+ storage = storageForTransition(
+ base, variant.offset(),
+ variant.oldStructureForTransition(), variant.newStructure());
+ }
+
+ storeProperty(value, storage, data.identifierNumber, variant.offset());
+
+ if (variant.kind() == PutByIdVariant::Transition) {
+ ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
+ ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
+ ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
+ m_out.store32(
+ weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
+ }
+
+ m_out.jump(continuation);
+ }
+
+ m_out.appendTo(exit, continuation);
+ if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet))
+ speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
+ m_out.unreachable();
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void compileGetGlobalVariable()
+ {
+ setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
+ }
+
+ void compilePutGlobalVariable()
+ {
+ m_out.store64(
+ lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
+ }
+
+ void compileNotifyWrite()
+ {
+ WatchpointSet* set = m_node->watchpointSet();
+
+ LBasicBlock isNotInvalidated = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
+ m_out.branch(
+ m_out.equal(state, m_out.constInt32(IsInvalidated)),
+ usually(continuation), rarely(isNotInvalidated));
+
+ LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
+
+ lazySlowPath(
+ [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
+ });
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void compileGetCallee()
+ {
+ setJSValue(m_out.loadPtr(addressFor(CallFrameSlot::callee)));
+ }
+
+ void compileGetArgumentCountIncludingThis()
+ {
+ setInt32(m_out.load32(payloadFor(CallFrameSlot::argumentCount)));
+ }
+
+ void compileGetScope()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
+ }
+
+ void compileSkipScope()
+ {
+ setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
+ }
+
+ void compileGetGlobalObject()
+ {
+ LValue structure = loadStructure(lowCell(m_node->child1()));
+ setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
+ }
+
+ void compileGetClosureVar()
+ {
+ setJSValue(
+ m_out.load64(
+ lowCell(m_node->child1()),
+ m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()]));
+ }
+
+ void compilePutClosureVar()
+ {
+ m_out.store64(
+ lowJSValue(m_node->child2()),
+ lowCell(m_node->child1()),
+ m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()]);
+ }
+
+ void compileGetFromArguments()
+ {
+ setJSValue(
+ m_out.load64(
+ lowCell(m_node->child1()),
+ m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
+ }
+
+ void compilePutToArguments()
+ {
+ m_out.store64(
+ lowJSValue(m_node->child2()),
+ lowCell(m_node->child1()),
+ m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
+ }
+
+ void compileGetArgument()
+ {
+ LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_node->origin.semantic)));
+
+ LBasicBlock inBounds = m_out.newBlock();
+ LBasicBlock outOfBounds = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
+ VirtualRegister arg = AssemblyHelpers::argumentsStart(m_node->origin.semantic) + m_node->argumentIndex() - 1;
+ ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfBounds, continuation);
+ ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
+ }
+
+ void compileCompareEq()
+ {
+ if (m_node->isBinaryUseKind(Int32Use)
+ || m_node->isBinaryUseKind(Int52RepUse)
+ || m_node->isBinaryUseKind(DoubleRepUse)
+ || m_node->isBinaryUseKind(ObjectUse)
+ || m_node->isBinaryUseKind(BooleanUse)
+ || m_node->isBinaryUseKind(SymbolUse)
+ || m_node->isBinaryUseKind(StringIdentUse)
+ || m_node->isBinaryUseKind(StringUse)) {
+ compileCompareStrictEq();
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
+ compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
+ compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
+ return;
+ }
+
+ if (m_node->child1().useKind() == OtherUse) {
+ ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
+ setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
+ return;
+ }
+
+ if (m_node->child2().useKind() == OtherUse) {
+ ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
+ setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse));
+ nonSpeculativeCompare(
+ [&] (LValue left, LValue right) {
+ return m_out.equal(left, right);
+ },
+ operationCompareEq);
+ }
+
+ void compileCompareStrictEq()
+ {
+ if (m_node->isBinaryUseKind(Int32Use)) {
+ setBoolean(
+ m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(Int52RepUse)) {
+ Int52Kind kind;
+ LValue left = lowWhicheverInt52(m_node->child1(), kind);
+ LValue right = lowInt52(m_node->child2(), kind);
+ setBoolean(m_out.equal(left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(DoubleRepUse)) {
+ setBoolean(
+ m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringIdentUse)) {
+ setBoolean(
+ m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringUse)) {
+ LValue left = lowCell(m_node->child1());
+ LValue right = lowCell(m_node->child2());
+
+ LBasicBlock notTriviallyEqualCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ speculateString(m_node->child1(), left);
+
+ ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
+
+ LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
+
+ speculateString(m_node->child2(), right);
+
+ ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, fastResult, slowResult));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
+ setBoolean(
+ m_out.equal(
+ lowNonNullObject(m_node->child1()),
+ lowJSValue(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
+ setBoolean(
+ m_out.equal(
+ lowNonNullObject(m_node->child2()),
+ lowJSValue(m_node->child1())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(ObjectUse)) {
+ setBoolean(
+ m_out.equal(
+ lowNonNullObject(m_node->child1()),
+ lowNonNullObject(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(BooleanUse)) {
+ setBoolean(
+ m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(SymbolUse)) {
+ LValue leftSymbol = lowSymbol(m_node->child1());
+ LValue rightSymbol = lowSymbol(m_node->child2());
+ setBoolean(m_out.equal(leftSymbol, rightSymbol));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
+ || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
+ Edge symbolEdge = m_node->child1();
+ Edge untypedEdge = m_node->child2();
+ if (symbolEdge.useKind() != SymbolUse)
+ std::swap(symbolEdge, untypedEdge);
+
+ LValue leftSymbol = lowSymbol(symbolEdge);
+ LValue untypedValue = lowJSValue(untypedEdge);
+
+ setBoolean(m_out.equal(leftSymbol, untypedValue));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
+ || m_node->isBinaryUseKind(UntypedUse, MiscUse)) {
+ speculate(m_node->child1());
+ speculate(m_node->child2());
+ LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+ LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
+ setBoolean(m_out.equal(left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
+ || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
+ Edge leftEdge = m_node->childFor(StringIdentUse);
+ Edge rightEdge = m_node->childFor(NotStringVarUse);
+
+ LValue left = lowStringIdent(leftEdge);
+ LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock isStringCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(rightValue, provenType(rightEdge)),
+ unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
+ ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isString(rightValue, provenType(rightEdge)),
+ unsure(isStringCase), unsure(continuation));
+
+ m_out.appendTo(isStringCase, continuation);
+ LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
+ speculateStringIdent(rightEdge, rightValue, right);
+ ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
+ compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
+ return;
+ }
+ if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
+ compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse));
+ nonSpeculativeCompare(
+ [&] (LValue left, LValue right) {
+ return m_out.equal(left, right);
+ },
+ operationCompareStrictEq);
+ }
+
+ void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
+ {
+ ASSERT(stringEdge.useKind() == StringUse);
+ ASSERT(untypedEdge.useKind() == UntypedUse);
+
+ LValue leftString = lowCell(stringEdge);
+ LValue rightValue = lowJSValue(untypedEdge);
+ SpeculatedType rightValueType = provenType(untypedEdge);
+
+ // Verify left is string.
+ speculateString(stringEdge, leftString);
+
+ LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
+ LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
+ LBasicBlock testStringEquality = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ // Given left is string. If the value are strictly equal, rightValue has to be the same string.
+ ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
+
+ LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
+ ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
+
+ // Check if the untyped edge is a string.
+ m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
+ m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
+
+ // Full String compare.
+ m_out.appendTo(testStringEquality, continuation);
+ ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue));
+ m_out.jump(continuation);
+
+ // Continuation.
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
+ }
+
+ void compileCompareEqPtr()
+ {
+ setBoolean(
+ m_out.equal(
+ lowJSValue(m_node->child1()),
+ weakPointer(m_node->cellOperand()->cell())));
+ }
+
+ void compileCompareLess()
+ {
+ compare(
+ [&] (LValue left, LValue right) {
+ return m_out.lessThan(left, right);
+ },
+ [&] (LValue left, LValue right) {
+ return m_out.doubleLessThan(left, right);
+ },
+ operationCompareStringImplLess,
+ operationCompareStringLess,
+ operationCompareLess);
+ }
+
+ void compileCompareLessEq()
+ {
+ compare(
+ [&] (LValue left, LValue right) {
+ return m_out.lessThanOrEqual(left, right);
+ },
+ [&] (LValue left, LValue right) {
+ return m_out.doubleLessThanOrEqual(left, right);
+ },
+ operationCompareStringImplLessEq,
+ operationCompareStringLessEq,
+ operationCompareLessEq);
+ }
+
+ void compileCompareGreater()
+ {
+ compare(
+ [&] (LValue left, LValue right) {
+ return m_out.greaterThan(left, right);
+ },
+ [&] (LValue left, LValue right) {
+ return m_out.doubleGreaterThan(left, right);
+ },
+ operationCompareStringImplGreater,
+ operationCompareStringGreater,
+ operationCompareGreater);
+ }
+
+ void compileCompareGreaterEq()
+ {
+ compare(
+ [&] (LValue left, LValue right) {
+ return m_out.greaterThanOrEqual(left, right);
+ },
+ [&] (LValue left, LValue right) {
+ return m_out.doubleGreaterThanOrEqual(left, right);
+ },
+ operationCompareStringImplGreaterEq,
+ operationCompareStringGreaterEq,
+ operationCompareGreaterEq);
+ }
+
+ void compileLogicalNot()
+ {
+ setBoolean(m_out.logicalNot(boolify(m_node->child1())));
+ }
+
+ void compileCallOrConstruct()
+ {
+ Node* node = m_node;
+ unsigned numArgs = node->numChildren() - 1;
+
+ LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
+
+ unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
+ unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
+
+ // JS->JS calling convention requires that the caller allows this much space on top of stack to
+ // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
+ // B3 this explicitly for two reasons:
+ //
+ // - We will only pass frameSize worth of stuff.
+ // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
+ // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
+ // up seeing.
+ m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
+
+ // Collect the arguments, since this can generate code and we want to generate it before we emit
+ // the call.
+ Vector<ConstrainedValue> arguments;
+
+ // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
+ // callee to be.
+ arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
+
+ auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
+ intptr_t offsetFromSP =
+ (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
+ arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
+ };
+
+ addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
+ addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
+ for (unsigned i = 0; i < numArgs; ++i)
+ addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendVector(arguments);
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ CCallHelpers::DataLabelPtr targetToCheck;
+ CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ CCallHelpers::Call fastCall = jit.nearCall();
+ CCallHelpers::Jump done = jit.jump();
+
+ slowPath.link(&jit);
+
+ jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
+ CCallHelpers::Call slowCall = jit.nearCall();
+ done.link(&jit);
+
+ callLinkInfo->setUpCall(
+ node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call,
+ node->origin.semantic, GPRInfo::regT0);
+
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(-params.proc().frameSize()),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ MacroAssemblerCodePtr linkCall =
+ linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code();
+ linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress()));
+
+ callLinkInfo->setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(slowCall)),
+ CodeLocationLabel(linkBuffer.locationOf(targetToCheck)),
+ linkBuffer.locationOfNearCall(fastCall));
+ });
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ void compileDirectCallOrConstruct()
+ {
+ Node* node = m_node;
+ bool isTail = node->op() == DirectTailCall;
+ bool isConstruct = node->op() == DirectConstruct;
+
+ ExecutableBase* executable = node->castOperand<ExecutableBase*>();
+ FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
+
+ unsigned numPassedArgs = node->numChildren() - 1;
+ unsigned numAllocatedArgs = numPassedArgs;
+
+ if (functionExecutable) {
+ numAllocatedArgs = std::max(
+ numAllocatedArgs,
+ std::min(
+ static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
+ Options::maximumDirectCallStackSize()));
+ }
+
+ LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
+
+ if (!isTail) {
+ unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
+ unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
+
+ m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
+ }
+
+ Vector<ConstrainedValue> arguments;
+
+ arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
+ if (!isTail) {
+ auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
+ intptr_t offsetFromSP =
+ (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
+ arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
+ };
+
+ addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
+ addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
+ for (unsigned i = 0; i < numPassedArgs; ++i)
+ addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
+ for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
+ addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgument(i), 0);
+ } else {
+ for (unsigned i = 0; i < numPassedArgs; ++i)
+ arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
+ }
+
+ PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
+ patchpoint->appendVector(arguments);
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+
+ if (isTail) {
+ // The shuffler needs tags.
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+ }
+
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ if (!isTail) {
+ patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+ }
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ GPRReg calleeGPR = params[!isTail].gpr();
+
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ if (isTail) {
+ CallFrameShuffleData shuffleData;
+ shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
+
+ RegisterSet toSave = params.unavailableRegisters();
+ shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
+ toSave.set(calleeGPR);
+ for (unsigned i = 0; i < numPassedArgs; ++i) {
+ ValueRecovery recovery = params[1 + i].recoveryForJSValue();
+ shuffleData.args.append(recovery);
+ recovery.forEachReg(
+ [&] (Reg reg) {
+ toSave.set(reg);
+ });
+ }
+ for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
+ shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
+ shuffleData.numPassedArgs = numPassedArgs;
+ shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
+ CCallHelpers::Label mainPath = jit.label();
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ callLinkInfo->setFrameShuffleData(shuffleData);
+ CallFrameShuffler(jit, shuffleData).prepareForTailCall();
+
+ CCallHelpers::Call call = jit.nearTailCall();
+
+ jit.abortWithReason(JITDidReturnFromTailCall);
+
+ CCallHelpers::Label slowPath = jit.label();
+ patchableJump.m_jump.linkTo(slowPath, &jit);
+ callOperation(
+ *state, toSave, jit,
+ node->origin.semantic, exceptions.get(), operationLinkDirectCall,
+ InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
+ jit.jump().linkTo(mainPath, &jit);
+
+ callLinkInfo->setUpCall(
+ CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
+ callLinkInfo->setExecutableDuringCompilation(executable);
+ if (numAllocatedArgs > numPassedArgs)
+ callLinkInfo->setMaxNumArguments(numAllocatedArgs);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ CodeLocationLabel patchableJumpLocation = linkBuffer.locationOf(patchableJump);
+ CodeLocationNearCall callLocation = linkBuffer.locationOfNearCall(call);
+ CodeLocationLabel slowPathLocation = linkBuffer.locationOf(slowPath);
+
+ callLinkInfo->setCallLocations(
+ patchableJumpLocation,
+ slowPathLocation,
+ callLocation);
+ });
+ return;
+ }
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ CCallHelpers::Label mainPath = jit.label();
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CCallHelpers::Call call = jit.nearCall();
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(-params.proc().frameSize()),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ callLinkInfo->setUpCall(
+ isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall,
+ node->origin.semantic, InvalidGPRReg);
+ callLinkInfo->setExecutableDuringCompilation(executable);
+ if (numAllocatedArgs > numPassedArgs)
+ callLinkInfo->setMaxNumArguments(numAllocatedArgs);
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CCallHelpers::Label slowPath = jit.label();
+ if (isX86())
+ jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
+
+ callOperation(
+ *state, params.unavailableRegisters(), jit,
+ node->origin.semantic, exceptions.get(), operationLinkDirectCall,
+ InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
+ calleeGPR).call();
+ jit.jump().linkTo(mainPath, &jit);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ CodeLocationNearCall callLocation = linkBuffer.locationOfNearCall(call);
+ CodeLocationLabel slowPathLocation = linkBuffer.locationOf(slowPath);
+
+ linkBuffer.link(call, slowPathLocation);
+
+ callLinkInfo->setCallLocations(
+ CodeLocationLabel(),
+ slowPathLocation,
+ callLocation);
+ });
+ });
+ });
+
+ if (isTail)
+ patchpoint->effects.terminal = true;
+ else
+ setJSValue(patchpoint);
+ }
+
+ void compileTailCall()
+ {
+ Node* node = m_node;
+ unsigned numArgs = node->numChildren() - 1;
+
+ LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
+
+ // We want B3 to give us all of the arguments using whatever mechanism it thinks is
+ // convenient. The generator then shuffles those arguments into our own call frame,
+ // destroying our frame in the process.
+
+ // Note that we don't have to do anything special for exceptions. A tail call is only a
+ // tail call if it is not inside a try block.
+
+ Vector<ConstrainedValue> arguments;
+
+ arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
+
+ for (unsigned i = 0; i < numArgs; ++i) {
+ // Note: we could let the shuffler do boxing for us, but it's not super clear that this
+ // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
+ // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
+
+ ConstrainedValue constrainedValue(
+ lowJSValue(m_graph.varArgChild(node, 1 + i)),
+ ValueRep::WarmAny);
+ arguments.append(constrainedValue);
+ }
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Void);
+ patchpoint->appendVector(arguments);
+
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
+ // Prevent any of the arguments from using the scratch register.
+ patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
+
+ patchpoint->effects.terminal = true;
+
+ // We don't have to tell the patchpoint that we will clobber registers, since we won't return
+ // anyway.
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ // Yes, this is really necessary. You could throw an exception in a host call on the
+ // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting
+ // with the call site index of our frame. Bad things happen if it's not set.
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CallFrameShuffleData shuffleData;
+ shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
+ shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
+
+ for (unsigned i = 0; i < numArgs; ++i)
+ shuffleData.args.append(params[1 + i].recoveryForJSValue());
+
+ shuffleData.numPassedArgs = numArgs;
+
+ shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ CCallHelpers::DataLabelPtr targetToCheck;
+ CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ callLinkInfo->setFrameShuffleData(shuffleData);
+ CallFrameShuffler(jit, shuffleData).prepareForTailCall();
+
+ CCallHelpers::Call fastCall = jit.nearTailCall();
+
+ slowPath.link(&jit);
+
+ CallFrameShuffler slowPathShuffler(jit, shuffleData);
+ slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
+ slowPathShuffler.prepareForSlowPath();
+
+ jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
+ CCallHelpers::Call slowCall = jit.nearCall();
+
+ jit.abortWithReason(JITDidReturnFromTailCall);
+
+ callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ MacroAssemblerCodePtr linkCall =
+ linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code();
+ linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress()));
+
+ callLinkInfo->setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(slowCall)),
+ CodeLocationLabel(linkBuffer.locationOf(targetToCheck)),
+ linkBuffer.locationOfNearCall(fastCall));
+ });
+ });
+ }
+
+ void compileCallOrConstructVarargsSpread()
+ {
+ Node* node = m_node;
+ LValue jsCallee = lowJSValue(m_node->child1());
+ LValue thisArg = lowJSValue(m_node->child2());
+
+ RELEASE_ASSERT(node->child3()->op() == PhantomNewArrayWithSpread);
+ Node* arrayWithSpread = node->child3().node();
+ BitVector* bitVector = arrayWithSpread->bitVector();
+ unsigned numNonSpreadParameters = 0;
+ Vector<LValue, 2> spreadLengths;
+ Vector<LValue, 8> patchpointArguments;
+ HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
+
+ for (unsigned i = 0; i < arrayWithSpread->numChildren(); i++) {
+ if (bitVector->get(i)) {
+ Node* spread = m_graph.varArgChild(arrayWithSpread, i).node();
+ RELEASE_ASSERT(spread->op() == PhantomSpread);
+ RELEASE_ASSERT(spread->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+ unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
+ LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return m_out.zeroExtPtr(getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
+ }).iterator->value;
+ patchpointArguments.append(length);
+ spreadLengths.append(length);
+ } else {
+ ++numNonSpreadParameters;
+ LValue argument = lowJSValue(m_graph.varArgChild(arrayWithSpread, i));
+ patchpointArguments.append(argument);
+ }
+ }
+
+ LValue argumentCountIncludingThis = m_out.constIntPtr(numNonSpreadParameters + 1);
+ for (LValue length : spreadLengths)
+ argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+
+ patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
+ patchpoint->append(thisArg, ValueRep::WarmAny);
+ patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
+ patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+
+ patchpoint->numGPScratchRegisters = 0;
+
+ // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
+ unsigned minimumJSCallAreaSize =
+ sizeof(CallerFrameAndPC) +
+ WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
+
+ m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex =
+ state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ RegisterSet usedRegisters = RegisterSet::allRegisters();
+ usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
+ GPRReg calleeGPR = params[1].gpr();
+ usedRegisters.set(calleeGPR);
+
+ ScratchRegisterAllocator allocator(usedRegisters);
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR3 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR4 = allocator.allocateScratchGPR();
+ RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
+
+ auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
+ ASSERT(!usedRegisters.get(result));
+
+ if (rep.isConstant()) {
+ jit.move(CCallHelpers::Imm64(rep.value()), result);
+ return;
+ }
+
+ // Note: in this function, we only request 64 bit values.
+ if (rep.isStack()) {
+ jit.load64(
+ CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
+ result);
+ return;
+ }
+
+ RELEASE_ASSERT(rep.isGPR());
+ ASSERT(usedRegisters.get(rep.gpr()));
+ jit.move(rep.gpr(), result);
+ };
+
+ auto callWithExceptionCheck = [&] (void* callee) {
+ jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+ };
+
+ CCallHelpers::JumpList slowCase;
+ unsigned originalStackHeight = params.proc().frameSize();
+
+ {
+ unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
+ B3::ValueRep argumentCountIncludingThisRep = params[3];
+ getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
+ slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
+
+ jit.move(scratchGPR2, scratchGPR1);
+ jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
+ // scratchGPR1 now has the required frame size in Register units
+ // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
+ jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
+ jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
+ jit.negPtr(scratchGPR1);
+ jit.lshiftPtr(CCallHelpers::Imm32(3), scratchGPR1);
+ jit.addPtr(GPRInfo::callFrameRegister, scratchGPR1);
+
+ // Before touching stack values, we should update the stack pointer to protect them from signal stack.
+ jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
+
+ jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
+
+ int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
+
+ for (unsigned i = arrayWithSpread->numChildren(); i--; ) {
+ unsigned paramsOffset = 4;
+
+ if (bitVector->get(i)) {
+ Node* spread = state->graph.varArgChild(arrayWithSpread, i).node();
+ RELEASE_ASSERT(spread->op() == PhantomSpread);
+ RELEASE_ASSERT(spread->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame;
+
+ unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip();
+
+ B3::ValueRep numArgumentsToCopy = params[paramsOffset + i];
+ getValueFromRep(numArgumentsToCopy, scratchGPR3);
+ int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
+
+ auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
+ auto loopStart = jit.label();
+ jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
+ jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
+ jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
+ jit.store64(scratchGPR4,
+ CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
+ jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
+ done.link(&jit);
+ } else {
+ jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
+ getValueFromRep(params[paramsOffset + i], scratchGPR3);
+ jit.store64(scratchGPR3,
+ CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
+ }
+ }
+ }
+
+ {
+ CCallHelpers::Jump dontThrow = jit.jump();
+ slowCase.link(&jit);
+ jit.setupArgumentsExecState();
+ callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
+ jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
+
+ dontThrow.link(&jit);
+ }
+
+ ASSERT(calleeGPR == GPRInfo::regT0);
+ jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
+ getValueFromRep(params[2], scratchGPR3);
+ jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
+
+ CallLinkInfo::CallType callType;
+ if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
+ callType = CallLinkInfo::ConstructVarargs;
+ else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
+ callType = CallLinkInfo::TailCallVarargs;
+ else
+ callType = CallLinkInfo::CallVarargs;
+
+ bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
+
+ CCallHelpers::DataLabelPtr targetToCheck;
+ CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
+ CCallHelpers::TrustedImmPtr(nullptr));
+
+ CCallHelpers::Call fastCall;
+ CCallHelpers::Jump done;
+
+ if (isTailCall) {
+ jit.emitRestoreCalleeSaves();
+ jit.prepareForTailCallSlow();
+ fastCall = jit.nearTailCall();
+ } else {
+ fastCall = jit.nearCall();
+ done = jit.jump();
+ }
+
+ slowPath.link(&jit);
+
+ if (isTailCall)
+ jit.emitRestoreCalleeSaves();
+ ASSERT(!usedRegisters.get(GPRInfo::regT2));
+ jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
+ CCallHelpers::Call slowCall = jit.nearCall();
+
+ if (isTailCall)
+ jit.abortWithReason(JITDidReturnFromTailCall);
+ else
+ done.link(&jit);
+
+ callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
+
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(-originalStackHeight),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ MacroAssemblerCodePtr linkCall =
+ linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code();
+ linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress()));
+
+ callLinkInfo->setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(slowCall)),
+ CodeLocationLabel(linkBuffer.locationOf(targetToCheck)),
+ linkBuffer.locationOfNearCall(fastCall));
+ });
+ });
+
+ switch (node->op()) {
+ case TailCallForwardVarargs:
+ m_out.unreachable();
+ break;
+
+ default:
+ setJSValue(patchpoint);
+ break;
+ }
+ }
+
+ void compileCallOrConstructVarargs()
+ {
+ Node* node = m_node;
+ LValue jsCallee = lowJSValue(m_node->child1());
+ LValue thisArg = lowJSValue(m_node->child2());
+
+ LValue jsArguments = nullptr;
+ bool forwarding = false;
+
+ switch (node->op()) {
+ case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case ConstructVarargs:
+ jsArguments = lowJSValue(node->child3());
+ break;
+ case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
+ case ConstructForwardVarargs:
+ forwarding = true;
+ break;
+ default:
+ DFG_CRASH(m_graph, node, "bad node type");
+ break;
+ }
+
+ if (forwarding && m_node->child3() && m_node->child3()->op() == PhantomNewArrayWithSpread) {
+ compileCallOrConstructVarargsSpread();
+ return;
+ }
+
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+
+ // Append the forms of the arguments that we will use before any clobbering happens.
+ patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
+ if (jsArguments)
+ patchpoint->appendSomeRegister(jsArguments);
+ patchpoint->appendSomeRegister(thisArg);
+
+ if (!forwarding) {
+ // Now append them again for after clobbering. Note that the compiler may ask us to use a
+ // different register for the late for the post-clobbering version of the value. This gives
+ // the compiler a chance to spill these values without having to burn any callee-saves.
+ patchpoint->append(jsCallee, ValueRep::LateColdAny);
+ patchpoint->append(jsArguments, ValueRep::LateColdAny);
+ patchpoint->append(thisArg, ValueRep::LateColdAny);
+ }
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+
+ // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
+ unsigned minimumJSCallAreaSize =
+ sizeof(CallerFrameAndPC) +
+ WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
+
+ m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex =
+ state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+ CallVarargsData* data = node->callVarargsData();
+
+ unsigned argIndex = 1;
+ GPRReg calleeGPR = params[argIndex++].gpr();
+ ASSERT(calleeGPR == GPRInfo::regT0);
+ GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
+ GPRReg thisGPR = params[argIndex++].gpr();
+
+ B3::ValueRep calleeLateRep;
+ B3::ValueRep argumentsLateRep;
+ B3::ValueRep thisLateRep;
+ if (!forwarding) {
+ // If we're not forwarding then we'll need callee, arguments, and this after we
+ // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
+ // for this is to supply all of those operands as late uses in addition to
+ // specifying them as early uses. It's possible that the late use uses a spill
+ // while the early use uses a register, and it's possible for the late and early
+ // uses to use different registers. We do know that the late uses interfere with
+ // all volatile registers and so won't use those, but the early uses may use
+ // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
+ // definitely will.
+ //
+ // Note that we have to be super careful with these. It's possible that these
+ // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
+ // thisGPR. If that happens and we do for example:
+ //
+ // calleeLateRep.emitRestore(jit, calleeGPR);
+ // argumentsLateRep.emitRestore(jit, calleeGPR);
+ //
+ // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
+ // argumentsLateRep.gpr() == calleeGPR.
+ //
+ // We do a variety of things to prevent this from happening. For example, we use
+ // argumentsLateRep before needing the other two and after we've already stopped
+ // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
+ // the *LateReps cannot use volatile registers (so they cannot be regT0, so
+ // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
+ //
+ // An alternative would have been to just use early uses and early-clobber all
+ // volatile registers. But that would force callee, arguments, and this into
+ // callee-save registers even if we have to spill them. We don't want spilling to
+ // use up three callee-saves.
+ //
+ // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
+ // some desirable performance properties, so don't mistake the cleverness for
+ // elegance.
+ calleeLateRep = params[argIndex++];
+ argumentsLateRep = params[argIndex++];
+ thisLateRep = params[argIndex++];
+ }
+
+ // Get some scratch registers.
+ RegisterSet usedRegisters;
+ usedRegisters.merge(RegisterSet::stackRegisters());
+ usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
+ usedRegisters.merge(RegisterSet::calleeSaveRegisters());
+ usedRegisters.set(calleeGPR);
+ if (argumentsGPR != InvalidGPRReg)
+ usedRegisters.set(argumentsGPR);
+ usedRegisters.set(thisGPR);
+ if (calleeLateRep.isReg())
+ usedRegisters.set(calleeLateRep.reg());
+ if (argumentsLateRep.isReg())
+ usedRegisters.set(argumentsLateRep.reg());
+ if (thisLateRep.isReg())
+ usedRegisters.set(thisLateRep.reg());
+ ScratchRegisterAllocator allocator(usedRegisters);
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+ GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
+ RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
+
+ auto callWithExceptionCheck = [&] (void* callee) {
+ jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+ };
+
+ unsigned originalStackHeight = params.proc().frameSize();
+
+ if (forwarding) {
+ jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
+
+ CCallHelpers::JumpList slowCase;
+ InlineCallFrame* inlineCallFrame;
+ if (node->child3())
+ inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
+ else
+ inlineCallFrame = node->origin.semantic.inlineCallFrame;
+
+ // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
+ emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
+
+ CCallHelpers::Jump done = jit.jump();
+ slowCase.link(&jit);
+ jit.setupArgumentsExecState();
+ callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
+ jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
+
+ done.link(&jit);
+ } else {
+ jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
+ jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
+ callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
+
+ jit.move(GPRInfo::returnValueGPR, scratchGPR1);
+ jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
+ argumentsLateRep.emitRestore(jit, argumentsGPR);
+ emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
+ jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
+ jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
+ callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
+
+ jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
+
+ calleeLateRep.emitRestore(jit, GPRInfo::regT0);
+
+ // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
+ // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
+ thisLateRep.emitRestore(jit, thisGPR);
+ }
+
+ jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
+ jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
+
+ CallLinkInfo::CallType callType;
+ if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
+ callType = CallLinkInfo::ConstructVarargs;
+ else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
+ callType = CallLinkInfo::TailCallVarargs;
+ else
+ callType = CallLinkInfo::CallVarargs;
+
+ bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
+
+ CCallHelpers::DataLabelPtr targetToCheck;
+ CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ CCallHelpers::Call fastCall;
+ CCallHelpers::Jump done;
+
+ if (isTailCall) {
+ jit.emitRestoreCalleeSaves();
+ jit.prepareForTailCallSlow();
+ fastCall = jit.nearTailCall();
+ } else {
+ fastCall = jit.nearCall();
+ done = jit.jump();
+ }
+
+ slowPath.link(&jit);
+
+ if (isTailCall)
+ jit.emitRestoreCalleeSaves();
+ jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
+ CCallHelpers::Call slowCall = jit.nearCall();
+
+ if (isTailCall)
+ jit.abortWithReason(JITDidReturnFromTailCall);
+ else
+ done.link(&jit);
+
+ callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
+
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(-originalStackHeight),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ MacroAssemblerCodePtr linkCall =
+ linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code();
+ linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress()));
+
+ callLinkInfo->setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(slowCall)),
+ CodeLocationLabel(linkBuffer.locationOf(targetToCheck)),
+ linkBuffer.locationOfNearCall(fastCall));
+ });
+ });
+
+ switch (node->op()) {
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ m_out.unreachable();
+ break;
+
+ default:
+ setJSValue(patchpoint);
+ break;
+ }
+ }
+
+ void compileCallEval()
+ {
+ Node* node = m_node;
+ unsigned numArgs = node->numChildren() - 1;
+
+ LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
+
+ unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
+ unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
+
+ m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
+
+ Vector<ConstrainedValue> arguments;
+ arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
+
+ auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
+ intptr_t offsetFromSP =
+ (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
+ arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
+ };
+
+ addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
+ addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
+ for (unsigned i = 0; i < numArgs; ++i)
+ addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendVector(arguments);
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
+ patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+
+ CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+ Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
+
+ CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+ callLinkInfo->setUpCall(CallLinkInfo::Call, node->origin.semantic, GPRInfo::regT0);
+
+ jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
+ jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
+
+ // Now we need to make room for:
+ // - The caller frame and PC for a call to operationCallEval.
+ // - Potentially two arguments on the stack.
+ unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
+ requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
+ jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
+ jit.setupArgumentsWithExecState(GPRInfo::regT1);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR);
+ jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+ exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+
+ CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
+
+ jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
+ jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
+ jit.emitDumbVirtualCall(callLinkInfo);
+
+ done.link(&jit);
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(-params.proc().frameSize()),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ void compileLoadVarargs()
+ {
+ LoadVarargsData* data = m_node->loadVarargsData();
+ LValue jsArguments = lowJSValue(m_node->child1());
+
+ LValue length = vmCall(
+ Int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
+ m_out.constInt32(data->offset));
+
+ // FIXME: There is a chance that we will call an effectful length property twice. This is safe
+ // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
+ // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
+ // past the sizing.
+ // https://bugs.webkit.org/show_bug.cgi?id=141448
+
+ LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
+ speculate(
+ VarargsOverflow, noValue(), nullptr,
+ m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
+
+ m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
+
+ // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
+ // of a VirtualRegister, we wouldn't have to do this.
+ // https://bugs.webkit.org/show_bug.cgi?id=141660
+ LValue machineStart = m_out.lShr(
+ m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
+ m_out.constIntPtr(3));
+
+ vmCall(
+ Void, m_out.operation(operationLoadVarargs), m_callFrame,
+ m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
+ length, m_out.constInt32(data->mandatoryMinimum));
+ }
+
+ void compileForwardVarargs()
+ {
+ if (m_node->child1() && m_node->child1()->op() == PhantomNewArrayWithSpread) {
+ compileForwardVarargsWithSpread();
+ return;
+ }
+
+ LoadVarargsData* data = m_node->loadVarargsData();
+ InlineCallFrame* inlineCallFrame;
+ if (m_node->child1())
+ inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
+ else
+ inlineCallFrame = m_node->origin.semantic.inlineCallFrame;
+
+ LValue length = nullptr;
+ LValue lengthIncludingThis = nullptr;
+ ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
+ if (argumentsLength.isKnown) {
+ unsigned knownLength = argumentsLength.known;
+ if (knownLength >= data->offset)
+ knownLength = knownLength - data->offset;
+ else
+ knownLength = 0;
+ length = m_out.constInt32(knownLength);
+ lengthIncludingThis = m_out.constInt32(knownLength + 1);
+ } else {
+ // We need to perform the same logical operation as the code above, but through dynamic operations.
+ if (!data->offset)
+ length = argumentsLength.value;
+ else {
+ LBasicBlock isLarger = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
+ m_out.branch(
+ m_out.above(argumentsLength.value, m_out.constInt32(data->offset)), unsure(isLarger), unsure(continuation));
+ LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
+ ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(data->offset)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ length = m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
+ }
+ lengthIncludingThis = m_out.add(length, m_out.constInt32(1));
+ }
+
+ speculate(
+ VarargsOverflow, noValue(), nullptr,
+ m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
+
+ m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
+
+ unsigned numberOfArgumentsToSkip = data->offset;
+ LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
+ LValue targetStart = addressFor(data->machineStart).value();
+
+ LBasicBlock undefinedLoop = m_out.newBlock();
+ LBasicBlock mainLoopEntry = m_out.newBlock();
+ LBasicBlock mainLoop = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue lengthAsPtr = m_out.zeroExtPtr(length);
+ LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
+ ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
+ m_out.branch(
+ m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
+
+ LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
+ LValue previousIndex = m_out.phi(pointerType(), loopBound);
+ LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
+ m_out.store64(
+ m_out.constInt64(JSValue::encode(jsUndefined())),
+ m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
+ ValueFromBlock nextIndex = m_out.anchor(currentIndex);
+ m_out.addIncomingToPhi(previousIndex, nextIndex);
+ m_out.branch(
+ m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
+
+ m_out.appendTo(mainLoopEntry, mainLoop);
+ loopBound = m_out.anchor(lengthAsPtr);
+ m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
+
+ m_out.appendTo(mainLoop, continuation);
+ previousIndex = m_out.phi(pointerType(), loopBound);
+ currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
+ LValue value = m_out.load64(
+ m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
+ m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
+ nextIndex = m_out.anchor(currentIndex);
+ m_out.addIncomingToPhi(previousIndex, nextIndex);
+ m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
+ {
+ ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
+ if (argumentsLength.isKnown) {
+ unsigned knownLength = argumentsLength.known;
+ if (knownLength >= numberOfArgumentsToSkip)
+ knownLength = knownLength - numberOfArgumentsToSkip;
+ else
+ knownLength = 0;
+ return m_out.constInt32(knownLength);
+ }
+
+
+ // We need to perform the same logical operation as the code above, but through dynamic operations.
+ if (!numberOfArgumentsToSkip)
+ return argumentsLength.value;
+
+ LBasicBlock isLarger = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
+ m_out.branch(
+ m_out.above(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)), unsure(isLarger), unsure(continuation));
+ LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
+ ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
+ }
+
+ void compileForwardVarargsWithSpread()
+ {
+ HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
+
+ Node* arrayWithSpread = m_node->child1().node();
+ RELEASE_ASSERT(arrayWithSpread->op() == PhantomNewArrayWithSpread);
+ BitVector* bitVector = arrayWithSpread->bitVector();
+
+ unsigned numberOfStaticArguments = 0;
+ Vector<LValue, 2> spreadLengths;
+ for (unsigned i = 0; i < arrayWithSpread->numChildren(); i++) {
+ if (bitVector->get(i)) {
+ Node* child = m_graph.varArgChild(arrayWithSpread, i).node();
+ ASSERT(child->op() == PhantomSpread);
+ ASSERT(child->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = child->child1()->origin.semantic.inlineCallFrame;
+ LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
+ return getSpreadLengthFromInlineCallFrame(inlineCallFrame, child->child1()->numberOfArgumentsToSkip());
+ }).iterator->value;
+ spreadLengths.append(length);
+ } else
+ ++numberOfStaticArguments;
+ }
+
+ LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
+ for (LValue length : spreadLengths)
+ lengthIncludingThis = m_out.add(lengthIncludingThis, length);
+
+ LoadVarargsData* data = m_node->loadVarargsData();
+ speculate(
+ VarargsOverflow, noValue(), nullptr,
+ m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
+
+ m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
+
+ LValue targetStart = addressFor(data->machineStart).value();
+ LValue storeIndex = m_out.constIntPtr(0);
+ for (unsigned i = 0; i < arrayWithSpread->numChildren(); i++) {
+ if (bitVector->get(i)) {
+ Node* child = m_graph.varArgChild(arrayWithSpread, i).node();
+ RELEASE_ASSERT(child->op() == PhantomSpread);
+ RELEASE_ASSERT(child->child1()->op() == PhantomCreateRest);
+ InlineCallFrame* inlineCallFrame = child->child1()->origin.semantic.inlineCallFrame;
+
+ LValue sourceStart = getArgumentsStart(inlineCallFrame, child->child1()->numberOfArgumentsToSkip());
+ LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
+
+ LBasicBlock loop = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+ ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
+ ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
+ ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
+
+ m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
+
+ LBasicBlock lastNext = m_out.appendTo(loop, continuation);
+ LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
+ LValue loadIndex = m_out.phi(Int64, startLoadIndex);
+ LValue value = m_out.load64(
+ m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
+ m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
+ LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
+ m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
+ LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
+ m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
+ ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
+ m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+ storeIndex = m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
+ } else {
+ LValue value = lowJSValue(m_graph.varArgChild(arrayWithSpread, i));
+ m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
+ storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
+ }
+ }
+
+ LBasicBlock undefinedLoop = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
+ LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
+ m_out.branch(m_out.below(storeIndex, loopBoundValue),
+ unsure(undefinedLoop), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
+ LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
+ m_out.store64(
+ m_out.constInt64(JSValue::encode(jsUndefined())),
+ m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
+ LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
+ m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
+ m_out.branch(
+ m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void compileJump()
+ {
+ m_out.jump(lowBlock(m_node->targetBlock()));
+ }
+
+ void compileBranch()
+ {
+ m_out.branch(
+ boolify(m_node->child1()),
+ WeightedTarget(
+ lowBlock(m_node->branchData()->taken.block),
+ m_node->branchData()->taken.count),
+ WeightedTarget(
+ lowBlock(m_node->branchData()->notTaken.block),
+ m_node->branchData()->notTaken.count));
+ }
+
+ void compileSwitch()
+ {
+ SwitchData* data = m_node->switchData();
+ switch (data->kind) {
+ case SwitchImm: {
+ Vector<ValueFromBlock, 2> intValues;
+ LBasicBlock switchOnInts = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
+
+ switch (m_node->child1().useKind()) {
+ case Int32Use: {
+ intValues.append(m_out.anchor(lowInt32(m_node->child1())));
+ m_out.jump(switchOnInts);
+ break;
+ }
+
+ case UntypedUse: {
+ LBasicBlock isInt = m_out.newBlock();
+ LBasicBlock isNotInt = m_out.newBlock();
+ LBasicBlock isDouble = m_out.newBlock();
+
+ LValue boxedValue = lowJSValue(m_node->child1());
+ m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
+
+ LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
+
+ intValues.append(m_out.anchor(unboxInt32(boxedValue)));
+ m_out.jump(switchOnInts);
+
+ m_out.appendTo(isNotInt, isDouble);
+ m_out.branch(
+ isCellOrMisc(boxedValue, provenType(m_node->child1())),
+ usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
+
+ m_out.appendTo(isDouble, innerLastNext);
+ LValue doubleValue = unboxDouble(boxedValue);
+ LValue intInDouble = m_out.doubleToInt(doubleValue);
+ intValues.append(m_out.anchor(intInDouble));
+ m_out.branch(
+ m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
+ unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+
+ m_out.appendTo(switchOnInts, lastNext);
+ buildSwitch(data, Int32, m_out.phi(Int32, intValues));
+ return;
+ }
+
+ case SwitchChar: {
+ LValue stringValue;
+
+ // FIXME: We should use something other than unsure() for the branch weight
+ // of the fallThrough block. The main challenge is just that we have multiple
+ // branches to fallThrough but a single count, so we would need to divvy it up
+ // among the different lowered branches.
+ // https://bugs.webkit.org/show_bug.cgi?id=129082
+
+ switch (m_node->child1().useKind()) {
+ case StringUse: {
+ stringValue = lowString(m_node->child1());
+ break;
+ }
+
+ case UntypedUse: {
+ LValue unboxedValue = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock isStringCase = m_out.newBlock();
+
+ m_out.branch(
+ isNotCell(unboxedValue, provenType(m_node->child1())),
+ unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
+ LValue cellValue = unboxedValue;
+ m_out.branch(
+ isNotString(cellValue, provenType(m_node->child1())),
+ unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
+
+ m_out.appendTo(isStringCase, lastNext);
+ stringValue = cellValue;
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ break;
+ }
+
+ LBasicBlock lengthIs1 = m_out.newBlock();
+ LBasicBlock needResolution = m_out.newBlock();
+ LBasicBlock resolved = m_out.newBlock();
+ LBasicBlock is8Bit = m_out.newBlock();
+ LBasicBlock is16Bit = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.notEqual(
+ m_out.load32NonNegative(stringValue, m_heaps.JSString_length),
+ m_out.int32One),
+ unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
+
+ LBasicBlock lastNext = m_out.appendTo(lengthIs1, needResolution);
+ Vector<ValueFromBlock, 2> values;
+ LValue fastValue = m_out.loadPtr(stringValue, m_heaps.JSString_value);
+ values.append(m_out.anchor(fastValue));
+ m_out.branch(m_out.isNull(fastValue), rarely(needResolution), usually(resolved));
+
+ m_out.appendTo(needResolution, resolved);
+ values.append(m_out.anchor(
+ vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue)));
+ m_out.jump(resolved);
+
+ m_out.appendTo(resolved, is8Bit);
+ LValue value = m_out.phi(pointerType(), values);
+ LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
+ m_out.branch(
+ m_out.testNonZero32(
+ m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(is8Bit), unsure(is16Bit));
+
+ Vector<ValueFromBlock, 2> characters;
+ m_out.appendTo(is8Bit, is16Bit);
+ characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
+ m_out.jump(continuation);
+
+ m_out.appendTo(is16Bit, continuation);
+ characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ buildSwitch(data, Int32, m_out.phi(Int32, characters));
+ return;
+ }
+
+ case SwitchString: {
+ switch (m_node->child1().useKind()) {
+ case StringIdentUse: {
+ LValue stringImpl = lowStringIdent(m_node->child1());
+
+ Vector<SwitchCase> cases;
+ for (unsigned i = 0; i < data->cases.size(); ++i) {
+ LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
+ LBasicBlock block = lowBlock(data->cases[i].target.block);
+ Weight weight = Weight(data->cases[i].target.count);
+ cases.append(SwitchCase(value, block, weight));
+ }
+
+ m_out.switchInstruction(
+ stringImpl, cases, lowBlock(data->fallThrough.block),
+ Weight(data->fallThrough.count));
+ return;
+ }
+
+ case StringUse: {
+ switchString(data, lowString(m_node->child1()));
+ return;
+ }
+
+ case UntypedUse: {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellBlock = m_out.newBlock();
+ LBasicBlock isStringBlock = m_out.newBlock();
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())),
+ unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
+
+ m_out.branch(
+ isString(value, provenType(m_node->child1())),
+ unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
+
+ m_out.appendTo(isStringBlock, lastNext);
+
+ switchString(data, value);
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+ return;
+ }
+
+ case SwitchCell: {
+ LValue cell;
+ switch (m_node->child1().useKind()) {
+ case CellUse: {
+ cell = lowCell(m_node->child1());
+ break;
+ }
+
+ case UntypedUse: {
+ LValue value = lowJSValue(m_node->child1());
+ LBasicBlock cellCase = m_out.newBlock();
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())),
+ unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
+ m_out.appendTo(cellCase);
+ cell = value;
+ break;
+ }
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return;
+ }
+
+ buildSwitch(m_node->switchData(), pointerType(), cell);
+ return;
+ } }
+
+ DFG_CRASH(m_graph, m_node, "Bad switch kind");
+ }
+
+ void compileReturn()
+ {
+ m_out.ret(lowJSValue(m_node->child1()));
+ }
+
+ void compileForceOSRExit()
+ {
+ terminate(InadequateCoverage);
+ }
+
+ void compileThrow()
+ {
+ terminate(Uncountable);
+ }
+
+ void compileInvalidationPoint()
+ {
+ if (verboseCompilationEnabled())
+ dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
+
+ DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Void);
+ OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
+ NodeOrigin origin = m_origin;
+ patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
+
+ State* state = &m_ftlState;
+
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
+ // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
+ // will ensure that this is followed by a nop shadow but only when this is actually
+ // necessary.
+ CCallHelpers::Label label = jit.watchpointLabel();
+
+ RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
+ *state, UncountableInvalidation, origin, params);
+
+ RefPtr<JITCode> jitCode = state->jitCode.get();
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ JumpReplacement jumpReplacement(
+ linkBuffer.locationOf(label),
+ linkBuffer.locationOf(handle->label));
+ jitCode->common.jumpReplacements.append(jumpReplacement);
+ });
+ });
+
+ // Set some obvious things.
+ patchpoint->effects.terminal = false;
+ patchpoint->effects.writesLocalState = false;
+ patchpoint->effects.readsLocalState = false;
+
+ // This is how we tell B3 about the possibility of jump replacement.
+ patchpoint->effects.exitsSideways = true;
+
+ // It's not possible for some prior branch to determine the safety of this operation. It's always
+ // fine to execute this on some path that wouldn't have originally executed it before
+ // optimization.
+ patchpoint->effects.controlDependent = false;
+
+ // If this falls through then it won't write anything.
+ patchpoint->effects.writes = HeapRange();
+
+ // When this abruptly terminates, it could read any heap location.
+ patchpoint->effects.reads = HeapRange::top();
+ }
+
+ void compileIsEmpty()
+ {
+ setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
+ }
+
+ void compileIsUndefined()
+ {
+ setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
+ }
+
+ void compileIsBoolean()
+ {
+ setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
+ }
+
+ void compileIsNumber()
+ {
+ setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
+ }
+
+ void compileIsCellWithType()
+ {
+ if (m_node->child1().useKind() == UntypedUse) {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
+ ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, notCellResult, cellResult));
+ } else {
+ ASSERT(m_node->child1().useKind() == CellUse);
+ setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
+ }
+ }
+
+ void compileIsObject()
+ {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
+ ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, notCellResult, cellResult));
+ }
+
+ LValue wangsInt64Hash(LValue input)
+ {
+ // key += ~(key << 32);
+ LValue key = input;
+ LValue temp = key;
+ temp = m_out.shl(temp, m_out.constInt32(32));
+ temp = m_out.bitNot(temp);
+ key = m_out.add(key, temp);
+ // key ^= (key >> 22);
+ temp = key;
+ temp = m_out.lShr(temp, m_out.constInt32(22));
+ key = m_out.bitXor(key, temp);
+ // key += ~(key << 13);
+ temp = key;
+ temp = m_out.shl(temp, m_out.constInt32(13));
+ temp = m_out.bitNot(temp);
+ key = m_out.add(key, temp);
+ // key ^= (key >> 8);
+ temp = key;
+ temp = m_out.lShr(temp, m_out.constInt32(8));
+ key = m_out.bitXor(key, temp);
+ // key += (key << 3);
+ temp = key;
+ temp = m_out.shl(temp, m_out.constInt32(3));
+ key = m_out.add(key, temp);
+ // key ^= (key >> 15);
+ temp = key;
+ temp = m_out.lShr(temp, m_out.constInt32(15));
+ key = m_out.bitXor(key, temp);
+ // key += ~(key << 27);
+ temp = key;
+ temp = m_out.shl(temp, m_out.constInt32(27));
+ temp = m_out.bitNot(temp);
+ key = m_out.add(key, temp);
+ // key ^= (key >> 31);
+ temp = key;
+ temp = m_out.lShr(temp, m_out.constInt32(31));
+ key = m_out.bitXor(key, temp);
+ key = m_out.castToInt32(key);
+
+ return key;
+ }
+
+ LValue mapHashString(LValue string)
+ {
+ LBasicBlock nonEmptyStringCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+ m_out.branch(
+ m_out.equal(stringImpl, m_out.constIntPtr(0)), unsure(slowCase), unsure(nonEmptyStringCase));
+
+ LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
+ LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
+ ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
+ m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
+ unsure(slowCase), unsure(continuation));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, string));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
+ }
+
+ void compileMapHash()
+ {
+ switch (m_node->child1().useKind()) {
+ case BooleanUse:
+ case Int32Use:
+ case SymbolUse:
+ case ObjectUse: {
+ LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
+ speculate(m_node->child1());
+ setInt32(wangsInt64Hash(key));
+ return;
+ }
+
+ case CellUse: {
+ LBasicBlock isString = m_out.newBlock();
+ LBasicBlock notString = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue value = lowCell(m_node->child1());
+ LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
+ m_out.branch(
+ isStringValue, unsure(isString), unsure(notString));
+
+ LBasicBlock lastNext = m_out.appendTo(isString, notString);
+ ValueFromBlock stringResult = m_out.anchor(mapHashString(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notString, continuation);
+ ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setInt32(m_out.phi(Int32, stringResult, notStringResult));
+ return;
+ }
+
+ case StringUse: {
+ LValue string = lowString(m_node->child1());
+ setInt32(mapHashString(string));
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
+ break;
+ }
+
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock notCell = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock straightHash = m_out.newBlock();
+ LBasicBlock isNumberCase = m_out.newBlock();
+ LBasicBlock isStringCase = m_out.newBlock();
+ LBasicBlock nonEmptyStringCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(notCell));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
+ LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
+ m_out.branch(
+ isString, unsure(isStringCase), unsure(straightHash));
+
+ m_out.appendTo(isStringCase, nonEmptyStringCase);
+ LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
+ m_out.branch(
+ m_out.equal(stringImpl, m_out.constIntPtr(0)), rarely(slowCase), usually(nonEmptyStringCase));
+
+ m_out.appendTo(nonEmptyStringCase, notCell);
+ LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
+ ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
+ m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
+ unsure(slowCase), unsure(continuation));
+
+ m_out.appendTo(notCell, isNumberCase);
+ m_out.branch(
+ isNumber(value), unsure(isNumberCase), unsure(straightHash));
+
+ m_out.appendTo(isNumberCase, straightHash);
+ m_out.branch(
+ isInt32(value), unsure(straightHash), unsure(slowCase));
+
+ m_out.appendTo(straightHash, slowCase);
+ ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(
+ vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
+ }
+
+ void compileGetMapBucket()
+ {
+ LBasicBlock loopStart = m_out.newBlock();
+ LBasicBlock loopAround = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock notPresentInTable = m_out.newBlock();
+ LBasicBlock notEmptyValue = m_out.newBlock();
+ LBasicBlock notDeletedValue = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
+
+ LValue map;
+ if (m_node->child1().useKind() == MapObjectUse)
+ map = lowMapObject(m_node->child1());
+ else if (m_node->child1().useKind() == SetObjectUse)
+ map = lowSetObject(m_node->child1());
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
+ if (m_node->child2().useKind() != UntypedUse)
+ speculate(m_node->child2());
+
+ LValue hash = lowInt32(m_node->child3());
+
+ LValue hashMapImpl = m_out.loadPtr(map, m_node->child1().useKind() == MapObjectUse ? m_heaps.JSMap_hashMapImpl : m_heaps.JSSet_hashMapImpl);
+ LValue buffer = m_out.loadPtr(hashMapImpl, m_heaps.HashMapImpl_buffer);
+ LValue mask = m_out.sub(m_out.load32(hashMapImpl, m_heaps.HashMapImpl_capacity), m_out.int32One);
+
+ ValueFromBlock indexStart = m_out.anchor(hash);
+ m_out.jump(loopStart);
+
+ m_out.appendTo(loopStart, notEmptyValue);
+ LValue unmaskedIndex = m_out.phi(Int32, indexStart);
+ LValue index = m_out.bitAnd(mask, unmaskedIndex);
+ LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
+ ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
+ m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
+ unsure(notPresentInTable), unsure(notEmptyValue));
+
+ m_out.appendTo(notEmptyValue, notDeletedValue);
+ m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
+ unsure(loopAround), unsure(notDeletedValue));
+
+ m_out.appendTo(notDeletedValue, loopAround);
+ LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
+
+ // Perform Object.is()
+ switch (m_node->child2().useKind()) {
+ case BooleanUse:
+ case Int32Use:
+ case SymbolUse:
+ case ObjectUse: {
+ m_out.branch(m_out.equal(key, bucketKey),
+ unsure(continuation), unsure(loopAround));
+ break;
+ }
+ case StringUse: {
+ LBasicBlock notBitEqual = m_out.newBlock();
+ LBasicBlock bucketKeyIsCell = m_out.newBlock();
+
+ m_out.branch(m_out.equal(key, bucketKey),
+ unsure(continuation), unsure(notBitEqual));
+
+ m_out.appendTo(notBitEqual, bucketKeyIsCell);
+ m_out.branch(isCell(bucketKey),
+ unsure(bucketKeyIsCell), unsure(loopAround));
+
+ m_out.appendTo(bucketKeyIsCell, loopAround);
+ m_out.branch(isString(bucketKey),
+ unsure(slowPath), unsure(loopAround));
+ break;
+ }
+ case CellUse: {
+ LBasicBlock notBitEqual = m_out.newBlock();
+ LBasicBlock bucketKeyIsCell = m_out.newBlock();
+ LBasicBlock bucketKeyIsString = m_out.newBlock();
+
+ m_out.branch(m_out.equal(key, bucketKey),
+ unsure(continuation), unsure(notBitEqual));
+
+ m_out.appendTo(notBitEqual, bucketKeyIsCell);
+ m_out.branch(isCell(bucketKey),
+ unsure(bucketKeyIsCell), unsure(loopAround));
+
+ m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
+ m_out.branch(isString(bucketKey),
+ unsure(bucketKeyIsString), unsure(loopAround));
+
+ m_out.appendTo(bucketKeyIsString, loopAround);
+ m_out.branch(isString(key),
+ unsure(slowPath), unsure(loopAround));
+ break;
+ }
+ case UntypedUse: {
+ LBasicBlock notBitEqual = m_out.newBlock();
+ LBasicBlock bucketKeyIsCell = m_out.newBlock();
+ LBasicBlock bothAreCells = m_out.newBlock();
+ LBasicBlock bucketKeyIsString = m_out.newBlock();
+ LBasicBlock bucketKeyNotCell = m_out.newBlock();
+ LBasicBlock bucketKeyIsNumber = m_out.newBlock();
+ LBasicBlock bothAreNumbers = m_out.newBlock();
+ LBasicBlock bucketKeyIsInt32 = m_out.newBlock();
+
+ m_out.branch(m_out.equal(key, bucketKey),
+ unsure(continuation), unsure(notBitEqual));
+
+ m_out.appendTo(notBitEqual, bucketKeyIsCell);
+ m_out.branch(isCell(bucketKey),
+ unsure(bucketKeyIsCell), unsure(bucketKeyNotCell));
+
+ m_out.appendTo(bucketKeyIsCell, bothAreCells);
+ m_out.branch(isCell(key),
+ unsure(bothAreCells), unsure(loopAround));
+
+ m_out.appendTo(bothAreCells, bucketKeyIsString);
+ m_out.branch(isString(bucketKey),
+ unsure(bucketKeyIsString), unsure(loopAround));
+
+ m_out.appendTo(bucketKeyIsString, bucketKeyNotCell);
+ m_out.branch(isString(key),
+ unsure(slowPath), unsure(loopAround));
+
+ m_out.appendTo(bucketKeyNotCell, bucketKeyIsNumber);
+ m_out.branch(isNotNumber(bucketKey),
+ unsure(loopAround), unsure(bucketKeyIsNumber));
+
+ m_out.appendTo(bucketKeyIsNumber, bothAreNumbers);
+ m_out.branch(isNotNumber(key),
+ unsure(loopAround), unsure(bothAreNumbers));
+
+ m_out.appendTo(bothAreNumbers, bucketKeyIsInt32);
+ m_out.branch(isNotInt32(bucketKey),
+ unsure(slowPath), unsure(bucketKeyIsInt32));
+
+ m_out.appendTo(bucketKeyIsInt32, loopAround);
+ m_out.branch(isNotInt32(key),
+ unsure(slowPath), unsure(loopAround));
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ m_out.appendTo(loopAround, slowPath);
+ m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
+ m_out.jump(loopStart);
+
+ m_out.appendTo(slowPath, notPresentInTable);
+ ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
+ m_out.operation(m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket), m_callFrame, map, key, hash));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notPresentInTable, continuation);
+ ValueFromBlock notPresentResult = m_out.anchor(m_out.constIntPtr(0));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setMapBucket(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
+ }
+
+ void compileLoadFromJSMapBucket()
+ {
+ LValue mapBucket = lowMapBucket(m_node->child1());
+
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock hasBucket = m_out.newBlock();
+
+ ValueFromBlock noBucketResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
+
+ m_out.branch(m_out.equal(mapBucket, m_out.constIntPtr(0)),
+ unsure(continuation), unsure(hasBucket));
+
+ LBasicBlock lastNext = m_out.appendTo(hasBucket, continuation);
+ ValueFromBlock bucketResult = m_out.anchor(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, noBucketResult, bucketResult));
+ }
+
+ void compileIsNonEmptyMapBucket()
+ {
+ LValue bucket = lowMapBucket(m_node->child1());
+ LValue result = m_out.notEqual(bucket, m_out.constIntPtr(0));
+ setBoolean(result);
+ }
+
+ void compileIsObjectOrNull()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notFunctionCase = m_out.newBlock();
+ LBasicBlock objectCase = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
+ ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isFunction(value, provenType(child)),
+ unsure(continuation), unsure(notFunctionCase));
+
+ m_out.appendTo(notFunctionCase, objectCase);
+ ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isObject(value, provenType(child)),
+ unsure(objectCase), unsure(continuation));
+
+ m_out.appendTo(objectCase, slowPath);
+ ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child)),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(slowPath, notCellCase);
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationObjectIsObject, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
+ }, value);
+ ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+ LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
+ ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(
+ Int32,
+ isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
+ setBoolean(result);
+ }
+
+ void compileIsFunction()
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notFunctionCase = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
+ ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ isFunction(value, provenType(child)),
+ unsure(continuation), unsure(notFunctionCase));
+
+ m_out.appendTo(notFunctionCase, slowPath);
+ ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child)),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(slowPath, continuation);
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationObjectIsFunction, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
+ }, value);
+ ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(
+ Int32, notCellResult, functionResult, objectResult, slowResult);
+ setBoolean(result);
+ }
+
+ void compileIsTypedArrayView()
+ {
+ LValue value = lowJSValue(m_node->child1());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
+ ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, notCellResult, cellResult));
+ }
+
+ void compileTypeOf()
+ {
+ Edge child = m_node->child1();
+ LValue value = lowJSValue(child);
+
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+
+ Vector<ValueFromBlock> results;
+
+ buildTypeOf(
+ child, value,
+ [&] (TypeofType type) {
+ results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
+ m_out.jump(continuation);
+ });
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, results));
+ }
+
+ void compileIn()
+ {
+ DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse);
+
+ Node* node = m_node;
+ Edge base = node->child1();
+ LValue cell = lowCell(base);
+ if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(vm())) {
+ if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
+ UniquedStringImpl* str = bitwise_cast<UniquedStringImpl*>(string->tryGetValueImpl());
+ B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(cell);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ // This is the direct exit target for operation calls. We don't need a JS exceptionHandle because we don't
+ // cache Proxy objects.
+ Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ GPRReg baseGPR = params[1].gpr();
+ GPRReg resultGPR = params[0].gpr();
+
+ StructureStubInfo* stubInfo =
+ jit.codeBlock()->addStubInfo(AccessType::In);
+ stubInfo->callSiteIndex =
+ state->jitCode->common.addCodeOrigin(node->origin.semantic);
+ stubInfo->codeOrigin = node->origin.semantic;
+ stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
+ stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
+ stubInfo->patch.usedRegisters = params.unavailableRegisters();
+
+ CCallHelpers::PatchableJump jump = jit.patchableJump();
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ jump.m_jump.link(&jit);
+ CCallHelpers::Label slowPathBegin = jit.label();
+ CCallHelpers::Call slowPathCall = callOperation(
+ *state, params.unavailableRegisters(), jit,
+ node->origin.semantic, exceptions.get(), operationInOptimize,
+ resultGPR, CCallHelpers::TrustedImmPtr(stubInfo), baseGPR,
+ CCallHelpers::TrustedImmPtr(str)).call();
+ jit.jump().linkTo(done, &jit);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ CodeLocationLabel start = linkBuffer.locationOf(jump);
+ stubInfo->patch.start = start;
+ ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(done));
+ RELEASE_ASSERT(inlineSize >= 0);
+ stubInfo->patch.inlineSize = inlineSize;
+
+ stubInfo->patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(slowPathCall));
+
+ stubInfo->patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(slowPathBegin));
+
+ });
+ });
+ });
+
+ setJSValue(patchpoint);
+ return;
+ }
+ }
+
+ setJSValue(vmCall(Int64, m_out.operation(operationGenericIn), m_callFrame, cell, lowJSValue(m_node->child2())));
+ }
+
+ void compileHasOwnProperty()
+ {
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock lastNext = nullptr;
+
+ LValue object = lowObject(m_node->child1());
+ LValue uniquedStringImpl;
+ LValue keyAsValue = nullptr;
+ switch (m_node->child2().useKind()) {
+ case StringUse: {
+ LBasicBlock isNonEmptyString = m_out.newBlock();
+ LBasicBlock isAtomicString = m_out.newBlock();
+
+ keyAsValue = lowString(m_node->child2());
+ uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
+ m_out.branch(m_out.notNull(uniquedStringImpl), usually(isNonEmptyString), rarely(slowCase));
+
+ lastNext = m_out.appendTo(isNonEmptyString, isAtomicString);
+ LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
+ m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomicString));
+
+ m_out.appendTo(isAtomicString, slowCase);
+ break;
+ }
+ case SymbolUse: {
+ keyAsValue = lowSymbol(m_node->child2());
+ uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
+ lastNext = m_out.insertNewBlocksBefore(slowCase);
+ break;
+ }
+ case UntypedUse: {
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock isStringCase = m_out.newBlock();
+ LBasicBlock notStringCase = m_out.newBlock();
+ LBasicBlock isNonEmptyString = m_out.newBlock();
+ LBasicBlock isSymbolCase = m_out.newBlock();
+ LBasicBlock hasUniquedStringImpl = m_out.newBlock();
+
+ keyAsValue = lowJSValue(m_node->child2());
+ m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
+
+ lastNext = m_out.appendTo(isCellCase, isStringCase);
+ m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
+
+ m_out.appendTo(isStringCase, isNonEmptyString);
+ LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
+ ValueFromBlock stringResult = m_out.anchor(implFromString);
+ m_out.branch(m_out.notNull(implFromString), usually(isNonEmptyString), rarely(slowCase));
+
+ m_out.appendTo(isNonEmptyString, notStringCase);
+ LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
+ m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
+
+ m_out.appendTo(notStringCase, isSymbolCase);
+ m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
+
+ m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
+ ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
+ m_out.jump(hasUniquedStringImpl);
+
+ m_out.appendTo(hasUniquedStringImpl, slowCase);
+ uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ ASSERT(keyAsValue);
+
+ // Note that we don't test if the hash is zero here. AtomicStringImpl's can't have a zero
+ // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
+ // ever load the result from the cache if the cache entry matches what we are querying for.
+ // So we either get super lucky and use zero for the hash and somehow collide with the entity
+ // we're looking for, or we realize we're comparing against another entity, and go to the
+ // slow path anyways.
+ LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
+
+ LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
+ LValue index = m_out.add(hash, structureID);
+ index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
+ ASSERT(vm().hasOwnPropertyCache());
+ LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
+
+ IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
+ LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
+ LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
+ ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
+ LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
+
+ m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult;
+ slowResult = m_out.anchor(vmCall(Int32, m_out.operation(operationHasOwnProperty), m_callFrame, object, keyAsValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, fastResult, slowResult));
+ }
+
+ void compileParseInt()
+ {
+ RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
+ LValue result;
+ if (m_node->child2()) {
+ LValue radix = lowInt32(m_node->child2());
+ if (m_node->child1().useKind() == UntypedUse)
+ result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
+ else
+ result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
+ } else {
+ if (m_node->child1().useKind() == UntypedUse)
+ result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
+ else
+ result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
+ }
+ setJSValue(result);
+ }
+
+ void compileOverridesHasInstance()
+ {
+ FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
+ ASSERT(defaultHasInstanceFunction->cell()->inherits(vm(), JSFunction::info()));
+
+ LValue constructor = lowCell(m_node->child1());
+ LValue hasInstance = lowJSValue(m_node->child2());
+
+ LBasicBlock defaultHasInstance = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
+
+ ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
+
+ LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
+ ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
+ m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(ImplementsDefaultHasInstance)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
+ }
+
+ void compileCheckTypeInfoFlags()
+ {
+ speculate(
+ BadTypeInfoFlags, noValue(), 0,
+ m_out.testIsZero32(
+ m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(m_node->typeInfoOperand())));
+ }
+
+ void compileInstanceOf()
+ {
+ LValue cell;
+
+ if (m_node->child1().useKind() == UntypedUse)
+ cell = lowJSValue(m_node->child1());
+ else
+ cell = lowCell(m_node->child1());
+
+ LValue prototype = lowCell(m_node->child2());
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock loop = m_out.newBlock();
+ LBasicBlock notYetInstance = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock loadPrototypeDirect = m_out.newBlock();
+ LBasicBlock defaultHasInstanceSlow = m_out.newBlock();
+
+ LValue condition;
+ if (m_node->child1().useKind() == UntypedUse)
+ condition = isCell(cell, provenType(m_node->child1()));
+ else
+ condition = m_out.booleanTrue;
+
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.branch(condition, unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, loop);
+
+ speculate(BadType, noValue(), 0, isNotObject(prototype, provenType(m_node->child2())));
+
+ ValueFromBlock originalValue = m_out.anchor(cell);
+ m_out.jump(loop);
+
+ m_out.appendTo(loop, loadPrototypeDirect);
+ LValue value = m_out.phi(Int64, originalValue);
+ LValue type = m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType);
+ m_out.branch(
+ m_out.notEqual(type, m_out.constInt32(ProxyObjectType)),
+ usually(loadPrototypeDirect), rarely(defaultHasInstanceSlow));
+
+ m_out.appendTo(loadPrototypeDirect, notYetInstance);
+ LValue structure = loadStructure(value);
+ LValue currentPrototype = m_out.load64(structure, m_heaps.Structure_prototype);
+ ValueFromBlock isInstanceResult = m_out.anchor(m_out.booleanTrue);
+ m_out.branch(
+ m_out.equal(currentPrototype, prototype),
+ unsure(continuation), unsure(notYetInstance));
+
+ m_out.appendTo(notYetInstance, defaultHasInstanceSlow);
+ ValueFromBlock notInstanceResult = m_out.anchor(m_out.booleanFalse);
+ m_out.addIncomingToPhi(value, m_out.anchor(currentPrototype));
+ m_out.branch(isCell(currentPrototype), unsure(loop), unsure(continuation));
+
+ m_out.appendTo(defaultHasInstanceSlow, continuation);
+ // We can use the value that we're looping with because we
+ // can just continue off from wherever we bailed from the
+ // loop.
+ ValueFromBlock defaultHasInstanceResult = m_out.anchor(
+ vmCall(Int32, m_out.operation(operationDefaultHasInstance), m_callFrame, value, prototype));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(
+ m_out.phi(Int32, notCellResult, isInstanceResult, notInstanceResult, defaultHasInstanceResult));
+ }
+
+ void compileInstanceOfCustom()
+ {
+ LValue value = lowJSValue(m_node->child1());
+ LValue constructor = lowCell(m_node->child2());
+ LValue hasInstance = lowJSValue(m_node->child3());
+
+ setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(Int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance))));
+ }
+
+ void compileCountExecution()
+ {
+ TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
+ m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
+ }
+
+ void compileStoreBarrier()
+ {
+ emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
+ }
+
+ void compileHasIndexedProperty()
+ {
+ switch (m_node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+ LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
+
+ IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
+ m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
+
+ LBasicBlock checkHole = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ if (!m_node->arrayMode().isInBounds()) {
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(checkHole));
+ } else
+ m_out.jump(checkHole);
+
+ LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase);
+ LValue checkHoleResultValue =
+ m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_node->child2())));
+ ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
+ m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index, internalMethodType)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
+ return;
+ }
+ case Array::Double: {
+ LValue base = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+ LValue storage = lowStorage(m_node->child3());
+ LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
+
+ IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
+
+ LBasicBlock checkHole = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ if (!m_node->arrayMode().isInBounds()) {
+ m_out.branch(
+ m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
+ rarely(slowCase), usually(checkHole));
+ } else
+ m_out.jump(checkHole);
+
+ LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase);
+ LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_node->child2()));
+ LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
+ ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
+ m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowResult = m_out.anchor(m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(Int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index, internalMethodType)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+ }
+
+ void compileHasGenericProperty()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue property = lowCell(m_node->child2());
+ setJSValue(vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
+ }
+
+ void compileHasStructureProperty()
+ {
+ LValue base = lowJSValue(m_node->child1());
+ LValue property = lowString(m_node->child2());
+ LValue enumerator = lowCell(m_node->child3());
+
+ LBasicBlock correctStructure = m_out.newBlock();
+ LBasicBlock wrongStructure = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(m_out.notEqual(
+ m_out.load32(base, m_heaps.JSCell_structureID),
+ m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
+ rarely(wrongStructure), usually(correctStructure));
+
+ LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
+ ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(wrongStructure, continuation);
+ ValueFromBlock wrongStructureResult = m_out.anchor(
+ m_out.equal(
+ m_out.constInt64(JSValue::encode(jsBoolean(true))),
+ vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, correctStructureResult, wrongStructureResult));
+ }
+
+ void compileGetDirectPname()
+ {
+ LValue base = lowCell(m_graph.varArgChild(m_node, 0));
+ LValue property = lowCell(m_graph.varArgChild(m_node, 1));
+ LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
+ LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
+
+ LBasicBlock checkOffset = m_out.newBlock();
+ LBasicBlock inlineLoad = m_out.newBlock();
+ LBasicBlock outOfLineLoad = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(m_out.notEqual(
+ m_out.load32(base, m_heaps.JSCell_structureID),
+ m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
+ rarely(slowCase), usually(checkOffset));
+
+ LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
+ m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
+ unsure(outOfLineLoad), unsure(inlineLoad));
+
+ m_out.appendTo(inlineLoad, outOfLineLoad);
+ ValueFromBlock inlineResult = m_out.anchor(
+ m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
+ base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfLineLoad, slowCase);
+ LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
+ LValue realIndex = m_out.signExt32To64(
+ m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ ValueFromBlock outOfLineResult = m_out.anchor(
+ m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+ ValueFromBlock slowCaseResult = m_out.anchor(
+ vmCall(Int64, m_out.operation(operationGetByVal), m_callFrame, base, property));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
+ }
+
+ void compileGetEnumerableLength()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
+ }
+
+ void compileGetPropertyEnumerator()
+ {
+ LValue base = lowCell(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, base));
+ }
+
+ void compileGetEnumeratorStructurePname()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ LBasicBlock inBounds = m_out.newBlock();
+ LBasicBlock outOfBounds = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
+ usually(inBounds), rarely(outOfBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
+ LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
+ ValueFromBlock inBoundsResult = m_out.anchor(
+ m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfBounds, continuation);
+ ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
+ }
+
+ void compileGetEnumeratorGenericPname()
+ {
+ LValue enumerator = lowCell(m_node->child1());
+ LValue index = lowInt32(m_node->child2());
+
+ LBasicBlock inBounds = m_out.newBlock();
+ LBasicBlock outOfBounds = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
+ usually(inBounds), rarely(outOfBounds));
+
+ LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
+ LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
+ ValueFromBlock inBoundsResult = m_out.anchor(
+ m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(outOfBounds, continuation);
+ ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
+ }
+
+ void compileToIndexString()
+ {
+ LValue index = lowInt32(m_node->child1());
+ setJSValue(vmCall(Int64, m_out.operation(operationToIndexString), m_callFrame, index));
+ }
+
+ void compileCheckStructureImmediate()
+ {
+ LValue structure = lowCell(m_node->child1());
+ checkStructure(
+ structure, noValue(), BadCache, m_node->structureSet(),
+ [this] (RegisteredStructure structure) {
+ return weakStructure(structure);
+ });
+ }
+
+ void compileMaterializeNewObject()
+ {
+ ObjectMaterializationData& data = m_node->objectMaterializationData();
+
+ // Lower the values first, to avoid creating values inside a control flow diamond.
+
+ Vector<LValue, 8> values;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i) {
+ Edge edge = m_graph.varArgChild(m_node, 1 + i);
+ switch (data.m_properties[i].kind()) {
+ case PublicLengthPLoc:
+ case VectorLengthPLoc:
+ values.append(lowInt32(edge));
+ break;
+ default:
+ values.append(lowJSValue(edge));
+ break;
+ }
+ }
+
+ RegisteredStructureSet set = m_node->structureSet();
+
+ Vector<LBasicBlock, 1> blocks(set.size());
+ for (unsigned i = set.size(); i--;)
+ blocks[i] = m_out.newBlock();
+ LBasicBlock dummyDefault = m_out.newBlock();
+ LBasicBlock outerContinuation = m_out.newBlock();
+
+ Vector<SwitchCase, 1> cases(set.size());
+ for (unsigned i = set.size(); i--;)
+ cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
+ m_out.switchInstruction(
+ lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
+
+ LBasicBlock outerLastNext = m_out.m_nextBlock;
+
+ Vector<ValueFromBlock, 1> results;
+
+ for (unsigned i = set.size(); i--;) {
+ m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
+
+ RegisteredStructure structure = set.at(i);
+
+ LValue object;
+ LValue butterfly;
+
+ if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* cellAllocator = subspaceFor<JSFinalObject>(vm())->allocatorFor(allocationSize);
+ DFG_ASSERT(m_graph, m_node, cellAllocator);
+
+ bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
+ unsigned indexingHeaderSize = 0;
+ LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
+ LValue vectorLength = m_out.int32Zero;
+ LValue publicLength = m_out.int32Zero;
+ if (hasIndexingHeader) {
+ indexingHeaderSize = sizeof(IndexingHeader);
+ for (unsigned i = data.m_properties.size(); i--;) {
+ PromotedLocationDescriptor descriptor = data.m_properties[i];
+ switch (descriptor.kind()) {
+ case PublicLengthPLoc:
+ publicLength = values[i];
+ break;
+ case VectorLengthPLoc:
+ vectorLength = values[i];
+ break;
+ default:
+ break;
+ }
+ }
+ indexingPayloadSizeInBytes =
+ m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
+ }
+
+ LValue butterflySize = m_out.add(
+ m_out.constIntPtr(
+ structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
+ indexingPayloadSizeInBytes);
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
+
+ LValue startOfStorage = allocateHeapCell(
+ allocatorForSize(vm().auxiliarySpace, butterflySize, slowPath),
+ slowPath);
+
+ LValue fastButterflyValue = m_out.add(
+ startOfStorage,
+ m_out.constIntPtr(
+ structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
+
+ ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
+
+ splatWords(
+ fastButterflyValue,
+ m_out.constInt32(-structure->outOfLineCapacity() - 1),
+ m_out.constInt32(-1),
+ m_out.int64Zero, m_heaps.properties.atAnyNumber());
+
+ m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
+
+ LValue fastObjectValue = allocateObject(
+ m_out.constIntPtr(cellAllocator), structure, fastButterflyValue, slowPath);
+
+ ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
+ ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
+
+ LValue slowObjectValue;
+ if (hasIndexingHeader) {
+ slowObjectValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
+ locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
+ locations[1].directGPR(), locations[2].directGPR());
+ },
+ vectorLength, butterflyValue);
+ } else {
+ slowObjectValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNewObjectWithButterfly, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
+ },
+ butterflyValue);
+ }
+ ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
+ ValueFromBlock slowButterfly = m_out.anchor(
+ m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ object = m_out.phi(pointerType(), fastObject, slowObject);
+ butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
+
+ m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
+
+ initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
+
+ HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
+ Vector<int32_t> indices;
+ for (unsigned i = data.m_properties.size(); i--;) {
+ PromotedLocationDescriptor descriptor = data.m_properties[i];
+ if (descriptor.kind() != IndexedPropertyPLoc)
+ continue;
+ int32_t index = static_cast<int32_t>(descriptor.info());
+
+ auto result = indexMap.add(index, values[i]);
+ DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
+
+ indices.append(index);
+ }
+
+ if (!indices.isEmpty()) {
+ std::sort(indices.begin(), indices.end());
+
+ Vector<LBasicBlock> blocksWithStores(indices.size());
+ Vector<LBasicBlock> blocksWithChecks(indices.size());
+
+ for (unsigned i = indices.size(); i--;) {
+ blocksWithStores[i] = m_out.newBlock();
+ blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
+ }
+
+ LBasicBlock indexLastNext = m_out.m_nextBlock;
+
+ for (unsigned i = indices.size(); i--;) {
+ int32_t index = indices[i];
+ LValue value = indexMap.get(index);
+
+ m_out.branch(
+ m_out.below(m_out.constInt32(index), publicLength),
+ unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
+
+ m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
+
+ // This has to type-check and convert its inputs, but it cannot do so in a
+ // way that updates AI. That's a bit annoying, but if you think about how
+ // sinking works, it's actually not a bad thing. We are virtually guaranteed
+ // that these type checks will not fail, since the type checks that guarded
+ // the original stores to the array are still somewhere above this point.
+ Output::StoreType storeType;
+ IndexedAbstractHeap* heap;
+ switch (structure->indexingType()) {
+ case ALL_INT32_INDEXING_TYPES:
+ // FIXME: This could use the proven type if we had the Edge for the
+ // value. https://bugs.webkit.org/show_bug.cgi?id=155311
+ speculate(BadType, noValue(), nullptr, isNotInt32(value));
+ storeType = Output::Store64;
+ heap = &m_heaps.indexedInt32Properties;
+ break;
+
+ case ALL_DOUBLE_INDEXING_TYPES: {
+ // FIXME: If the source is ValueRep, we should avoid emitting any
+ // checks. We could also avoid emitting checks if we had the Edge of
+ // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
+
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock doubleCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
+
+ ValueFromBlock intResult =
+ m_out.anchor(m_out.intToDouble(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(doubleCase, continuation);
+
+ speculate(BadType, noValue(), nullptr, isNumber(value));
+ ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ value = m_out.phi(Double, intResult, doubleResult);
+ storeType = Output::StoreDouble;
+ heap = &m_heaps.indexedDoubleProperties;
+ break;
+ }
+
+ case ALL_CONTIGUOUS_INDEXING_TYPES:
+ storeType = Output::Store64;
+ heap = &m_heaps.indexedContiguousProperties;
+ break;
+
+ default:
+ DFG_CRASH(m_graph, m_node, "Invalid indexing type");
+ break;
+ }
+
+ m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
+
+ m_out.jump(blocksWithChecks[i]);
+ m_out.appendTo(
+ blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
+ }
+ }
+ } else {
+ // In the easy case where we can do a one-shot allocation, we simply allocate the
+ // object to directly have the desired structure.
+ object = allocateObject(structure);
+ butterfly = nullptr; // Don't have one, don't need one.
+ }
+
+ BitVector setInlineOffsets;
+ for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
+ for (unsigned i = data.m_properties.size(); i--;) {
+ PromotedLocationDescriptor descriptor = data.m_properties[i];
+ if (descriptor.kind() != NamedPropertyPLoc)
+ continue;
+ if (m_graph.identifiers()[descriptor.info()] != entry.key)
+ continue;
+
+ LValue base;
+ if (isInlineOffset(entry.offset)) {
+ setInlineOffsets.set(entry.offset);
+ base = object;
+ } else
+ base = butterfly;
+ storeProperty(values[i], base, descriptor.info(), entry.offset);
+ break;
+ }
+ }
+ for (unsigned i = structure->inlineCapacity(); i--;) {
+ if (!setInlineOffsets.get(i))
+ m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
+ }
+
+ results.append(m_out.anchor(object));
+ m_out.jump(outerContinuation);
+ }
+
+ m_out.appendTo(dummyDefault, outerContinuation);
+ m_out.unreachable();
+
+ m_out.appendTo(outerContinuation, outerLastNext);
+ setJSValue(m_out.phi(pointerType(), results));
+ mutatorFence();
+ }
+
+ void compileMaterializeCreateActivation()
+ {
+ ObjectMaterializationData& data = m_node->objectMaterializationData();
+
+ Vector<LValue, 8> values;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i)
+ values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
+
+ LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
+ SymbolTable* table = m_node->castOperand<SymbolTable*>();
+ ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
+ RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ LValue fastObject = allocateObject<JSLexicalEnvironment>(
+ JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
+
+ m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
+ m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
+
+
+ ValueFromBlock fastResult = m_out.anchor(fastObject);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ // We ensure allocation sinking explictly sets bottom values for all field members.
+ // Therefore, it doesn't matter what JSValue we pass in as the initialization value
+ // because all fields will be overwritten.
+ // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
+ // doesn't initialize every slot because we are guaranteed to do that here.
+ LValue callResult = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationCreateActivationDirect, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
+ CCallHelpers::TrustedImmPtr(table),
+ CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
+ }, scope);
+ ValueFromBlock slowResult = m_out.anchor(callResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
+ RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
+ for (unsigned i = 0; i < data.m_properties.size(); ++i) {
+ PromotedLocationDescriptor descriptor = data.m_properties[i];
+ ASSERT(descriptor.kind() == ClosureVarPLoc);
+ m_out.store64(
+ values[i], activation,
+ m_heaps.JSEnvironmentRecord_variables[descriptor.info()]);
+ }
+
+ if (validationEnabled()) {
+ // Validate to make sure every slot in the scope has one value.
+ ConcurrentJSLocker locker(table->m_lock);
+ for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
+ bool found = false;
+ for (unsigned i = 0; i < data.m_properties.size(); ++i) {
+ PromotedLocationDescriptor descriptor = data.m_properties[i];
+ ASSERT(descriptor.kind() == ClosureVarPLoc);
+ if (iter->value.scopeOffset().offset() == descriptor.info()) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(found, found);
+ }
+ }
+
+ mutatorFence();
+ setJSValue(activation);
+ }
+
+ void compileCheckWatchdogTimer()
+ {
+ LBasicBlock timerDidFire = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().watchdog()->timerDidFireAddress()));
+ m_out.branch(m_out.isZero32(state),
+ usually(continuation), rarely(timerDidFire));
+
+ LBasicBlock lastNext = m_out.appendTo(timerDidFire, continuation);
+
+ lazySlowPath(
+ [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(operationHandleWatchdogTimer, InvalidGPRReg);
+ });
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void compileRegExpExec()
+ {
+ LValue globalObject = lowCell(m_node->child1());
+
+ if (m_node->child2().useKind() == RegExpObjectUse) {
+ LValue base = lowRegExpObject(m_node->child2());
+
+ if (m_node->child3().useKind() == StringUse) {
+ LValue argument = lowString(m_node->child3());
+ LValue result = vmCall(
+ Int64, m_out.operation(operationRegExpExecString), m_callFrame, globalObject,
+ base, argument);
+ setJSValue(result);
+ return;
+ }
+
+ LValue argument = lowJSValue(m_node->child3());
+ LValue result = vmCall(
+ Int64, m_out.operation(operationRegExpExec), m_callFrame, globalObject, base,
+ argument);
+ setJSValue(result);
+ return;
+ }
+
+ LValue base = lowJSValue(m_node->child2());
+ LValue argument = lowJSValue(m_node->child3());
+ LValue result = vmCall(
+ Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, globalObject, base,
+ argument);
+ setJSValue(result);
+ }
+
+ void compileRegExpTest()
+ {
+ LValue globalObject = lowCell(m_node->child1());
+
+ if (m_node->child2().useKind() == RegExpObjectUse) {
+ LValue base = lowRegExpObject(m_node->child2());
+
+ if (m_node->child3().useKind() == StringUse) {
+ LValue argument = lowString(m_node->child3());
+ LValue result = vmCall(
+ Int32, m_out.operation(operationRegExpTestString), m_callFrame, globalObject,
+ base, argument);
+ setBoolean(result);
+ return;
+ }
+
+ LValue argument = lowJSValue(m_node->child3());
+ LValue result = vmCall(
+ Int32, m_out.operation(operationRegExpTest), m_callFrame, globalObject, base,
+ argument);
+ setBoolean(result);
+ return;
+ }
+
+ LValue base = lowJSValue(m_node->child2());
+ LValue argument = lowJSValue(m_node->child3());
+ LValue result = vmCall(
+ Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, globalObject, base,
+ argument);
+ setBoolean(result);
+ }
+
+ void compileNewRegexp()
+ {
+ FrozenValue* regexp = m_node->cellOperand();
+ ASSERT(regexp->cell()->inherits(vm(), RegExp::info()));
+ LValue result = vmCall(
+ pointerType(),
+ m_out.operation(operationNewRegexp), m_callFrame,
+ frozenPointer(regexp));
+
+ setJSValue(result);
+ }
+
+ void compileSetFunctionName()
+ {
+ vmCall(Void, m_out.operation(operationSetFunctionName), m_callFrame,
+ lowCell(m_node->child1()), lowJSValue(m_node->child2()));
+ }
+
+ void compileStringReplace()
+ {
+ if (m_node->child1().useKind() == StringUse
+ && m_node->child2().useKind() == RegExpObjectUse
+ && m_node->child3().useKind() == StringUse) {
+
+ if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
+ if (!replace->length()) {
+ LValue string = lowString(m_node->child1());
+ LValue regExp = lowRegExpObject(m_node->child2());
+
+ LValue result = vmCall(
+ Int64, m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr),
+ m_callFrame, string, regExp);
+
+ setJSValue(result);
+ return;
+ }
+ }
+
+ LValue string = lowString(m_node->child1());
+ LValue regExp = lowRegExpObject(m_node->child2());
+ LValue replace = lowString(m_node->child3());
+
+ LValue result = vmCall(
+ Int64, m_out.operation(operationStringProtoFuncReplaceRegExpString),
+ m_callFrame, string, regExp, replace);
+
+ setJSValue(result);
+ return;
+ }
+
+ LValue search;
+ if (m_node->child2().useKind() == StringUse)
+ search = lowString(m_node->child2());
+ else
+ search = lowJSValue(m_node->child2());
+
+ LValue result = vmCall(
+ Int64, m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame,
+ lowJSValue(m_node->child1()), search,
+ lowJSValue(m_node->child3()));
+
+ setJSValue(result);
+ }
+
+ void compileGetRegExpObjectLastIndex()
+ {
+ setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
+ }
+
+ void compileSetRegExpObjectLastIndex()
+ {
+ LValue regExp = lowRegExpObject(m_node->child1());
+ LValue value = lowJSValue(m_node->child2());
+
+ speculate(
+ ExoticObjectMode, noValue(), nullptr,
+ m_out.isZero32(m_out.load8ZeroExt32(regExp, m_heaps.RegExpObject_lastIndexIsWritable)));
+
+ m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
+ }
+
+ void compileLogShadowChickenPrologue()
+ {
+ LValue packet = ensureShadowChickenPacket();
+ LValue scope = lowCell(m_node->child1());
+
+ m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
+ m_out.storePtr(m_out.loadPtr(addressFor(0)), packet, m_heaps.ShadowChicken_Packet_callerFrame);
+ m_out.storePtr(m_out.loadPtr(payloadFor(CallFrameSlot::callee)), packet, m_heaps.ShadowChicken_Packet_callee);
+ m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
+ }
+
+ void compileLogShadowChickenTail()
+ {
+ LValue packet = ensureShadowChickenPacket();
+ LValue thisValue = lowJSValue(m_node->child1());
+ LValue scope = lowCell(m_node->child2());
+ CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
+
+ m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
+ m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
+ m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
+ m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
+ // We don't want the CodeBlock to have a weak pointer to itself because
+ // that would cause it to always get collected.
+ m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
+ m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
+ }
+
+ void compileRecordRegExpCachedResult()
+ {
+ Edge constructorEdge = m_graph.varArgChild(m_node, 0);
+ Edge regExpEdge = m_graph.varArgChild(m_node, 1);
+ Edge stringEdge = m_graph.varArgChild(m_node, 2);
+ Edge startEdge = m_graph.varArgChild(m_node, 3);
+ Edge endEdge = m_graph.varArgChild(m_node, 4);
+
+ LValue constructor = lowCell(constructorEdge);
+ LValue regExp = lowCell(regExpEdge);
+ LValue string = lowCell(stringEdge);
+ LValue start = lowInt32(startEdge);
+ LValue end = lowInt32(endEdge);
+
+ m_out.storePtr(regExp, constructor, m_heaps.RegExpConstructor_cachedResult_lastRegExp);
+ m_out.storePtr(string, constructor, m_heaps.RegExpConstructor_cachedResult_lastInput);
+ m_out.store32(start, constructor, m_heaps.RegExpConstructor_cachedResult_result_start);
+ m_out.store32(end, constructor, m_heaps.RegExpConstructor_cachedResult_result_end);
+ m_out.store32As8(
+ m_out.constInt32(0),
+ m_out.address(constructor, m_heaps.RegExpConstructor_cachedResult_reified));
+ }
+
+ struct ArgumentsLength {
+ ArgumentsLength()
+ : isKnown(false)
+ , known(UINT_MAX)
+ , value(nullptr)
+ {
+ }
+
+ bool isKnown;
+ unsigned known;
+ LValue value;
+ };
+ ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
+ {
+ ArgumentsLength length;
+
+ if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
+ length.known = inlineCallFrame->arguments.size() - 1;
+ length.isKnown = true;
+ length.value = m_out.constInt32(length.known);
+ } else {
+ length.known = UINT_MAX;
+ length.isKnown = false;
+
+ VirtualRegister argumentCountRegister;
+ if (!inlineCallFrame)
+ argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
+ else
+ argumentCountRegister = inlineCallFrame->argumentCountRegister;
+ length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
+ }
+
+ return length;
+ }
+
+ ArgumentsLength getArgumentsLength()
+ {
+ return getArgumentsLength(m_node->origin.semantic.inlineCallFrame);
+ }
+
+ LValue getCurrentCallee()
+ {
+ if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame) {
+ if (frame->isClosureCall)
+ return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
+ return weakPointer(frame->calleeRecovery.constant().asCell());
+ }
+ return m_out.loadPtr(addressFor(CallFrameSlot::callee));
+ }
+
+ LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
+ {
+ VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
+ return addressFor(start).value();
+ }
+
+ LValue getArgumentsStart()
+ {
+ return getArgumentsStart(m_node->origin.semantic.inlineCallFrame);
+ }
+
+ template<typename Functor>
+ void checkStructure(
+ LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
+ RegisteredStructureSet set, const Functor& weakStructureDiscriminant)
+ {
+ if (set.isEmpty()) {
+ terminate(exitKind);
+ return;
+ }
+
+ if (set.size() == 1) {
+ speculate(
+ exitKind, formattedValue, 0,
+ m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
+ return;
+ }
+
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+ for (unsigned i = 0; i < set.size() - 1; ++i) {
+ LBasicBlock nextStructure = m_out.newBlock();
+ m_out.branch(
+ m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
+ unsure(continuation), unsure(nextStructure));
+ m_out.appendTo(nextStructure);
+ }
+
+ speculate(
+ exitKind, formattedValue, 0,
+ m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
+
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ LValue numberOrNotCellToInt32(Edge edge, LValue value)
+ {
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock notIntCase = m_out.newBlock();
+ LBasicBlock doubleCase = 0;
+ LBasicBlock notNumberCase = 0;
+ if (edge.useKind() == NotCellUse) {
+ doubleCase = m_out.newBlock();
+ notNumberCase = m_out.newBlock();
+ }
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock> results;
+
+ m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
+ results.append(m_out.anchor(unboxInt32(value)));
+ m_out.jump(continuation);
+
+ if (edge.useKind() == NumberUse) {
+ m_out.appendTo(notIntCase, continuation);
+ FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value));
+ results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
+ m_out.jump(continuation);
+ } else {
+ m_out.appendTo(notIntCase, doubleCase);
+ m_out.branch(
+ isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
+
+ m_out.appendTo(doubleCase, notNumberCase);
+ results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notNumberCase, continuation);
+
+ FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCell, isCell(value));
+
+ LValue specialResult = m_out.select(
+ m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
+ m_out.int32One, m_out.int32Zero);
+ results.append(m_out.anchor(specialResult));
+ m_out.jump(continuation);
+ }
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, results);
+ }
+
+ void checkInferredType(Edge edge, LValue value, const InferredType::Descriptor& type)
+ {
+ // This cannot use FTL_TYPE_CHECK or typeCheck() because it is called partially, as in a node like:
+ //
+ // MultiPutByOffset(...)
+ //
+ // may be lowered to:
+ //
+ // switch (object->structure) {
+ // case 42:
+ // checkInferredType(..., type1);
+ // ...
+ // break;
+ // case 43:
+ // checkInferredType(..., type2);
+ // ...
+ // break;
+ // }
+ //
+ // where type1 and type2 are different. Using typeCheck() would mean that the edge would be
+ // filtered by type1 & type2, instead of type1 | type2.
+
+ switch (type.kind()) {
+ case InferredType::Bottom:
+ speculate(BadType, jsValueValue(value), edge.node(), m_out.booleanTrue);
+ return;
+
+ case InferredType::Boolean:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotBoolean(value, provenType(edge)));
+ return;
+
+ case InferredType::Other:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotOther(value, provenType(edge)));
+ return;
+
+ case InferredType::Int32:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotInt32(value, provenType(edge)));
+ return;
+
+ case InferredType::Number:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotNumber(value, provenType(edge)));
+ return;
+
+ case InferredType::String:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge)));
+ speculate(BadType, jsValueValue(value), edge.node(), isNotString(value, provenType(edge)));
+ return;
+
+ case InferredType::Symbol:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge)));
+ speculate(BadType, jsValueValue(value), edge.node(), isNotSymbol(value, provenType(edge)));
+ return;
+
+ case InferredType::ObjectWithStructure: {
+ RegisteredStructure structure = m_graph.registerStructure(type.structure());
+ speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge)));
+ if (!abstractValue(edge).m_structure.isSubsetOf(RegisteredStructureSet(structure))) {
+ speculate(
+ BadType, jsValueValue(value), edge.node(),
+ m_out.notEqual(
+ m_out.load32(value, m_heaps.JSCell_structureID),
+ weakStructureID(structure)));
+ }
+ return;
+ }
+
+ case InferredType::ObjectWithStructureOrOther: {
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+
+ RegisteredStructure structure = m_graph.registerStructure(type.structure());
+ if (!abstractValue(edge).m_structure.isSubsetOf(RegisteredStructureSet(structure))) {
+ speculate(
+ BadType, jsValueValue(value), edge.node(),
+ m_out.notEqual(
+ m_out.load32(value, m_heaps.JSCell_structureID),
+ weakStructureID(structure)));
+ }
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+
+ speculate(
+ BadType, jsValueValue(value), edge.node(),
+ isNotOther(value, provenType(edge) & ~SpecCell));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return;
+ }
+
+ case InferredType::Object:
+ speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge)));
+ speculate(BadType, jsValueValue(value), edge.node(), isNotObject(value, provenType(edge)));
+ return;
+
+ case InferredType::ObjectOrOther: {
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+
+ speculate(
+ BadType, jsValueValue(value), edge.node(),
+ isNotObject(value, provenType(edge) & SpecCell));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+
+ speculate(
+ BadType, jsValueValue(value), edge.node(),
+ isNotOther(value, provenType(edge) & ~SpecCell));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return;
+ }
+
+ case InferredType::Top:
+ return;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Bad inferred type");
+ }
+
+ LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
+ {
+ return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
+ }
+
+ void storeProperty(
+ LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
+ {
+ m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
+ }
+
+ TypedPointer addressOfProperty(
+ LValue storage, unsigned identifierNumber, PropertyOffset offset)
+ {
+ return m_out.address(
+ m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
+ }
+
+ LValue storageForTransition(
+ LValue object, PropertyOffset offset,
+ Structure* previousStructure, Structure* nextStructure)
+ {
+ if (isInlineOffset(offset))
+ return object;
+
+ if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
+ return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
+
+ LValue result;
+ if (!previousStructure->outOfLineCapacity())
+ result = allocatePropertyStorage(object, previousStructure);
+ else {
+ result = reallocatePropertyStorage(
+ object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
+ previousStructure, nextStructure);
+ }
+
+ nukeStructureAndSetButterfly(result, object);
+ return result;
+ }
+
+ void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
+ {
+
+ if (begin == end)
+ return;
+
+ if (indexingType->hasInt32()) {
+ IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
+ if (hasUndecided(rawIndexingType))
+ return;
+ IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
+ DFG_ASSERT(m_graph, m_node, heap);
+
+ LValue hole;
+ if (hasDouble(rawIndexingType))
+ hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
+ else
+ hole = m_out.constInt64(JSValue::encode(JSValue()));
+
+ splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
+ } else {
+ LValue hole = m_out.select(
+ m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
+ m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
+ m_out.constInt64(JSValue::encode(JSValue())));
+ splatWords(butterfly, begin, end, hole, m_heaps.root);
+ }
+ }
+
+ void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
+ {
+ const uint64_t unrollingLimit = 10;
+ if (begin->hasInt() && end->hasInt()) {
+ uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
+ uint64_t endConst = static_cast<uint64_t>(end->asInt());
+
+ if (endConst - beginConst <= unrollingLimit) {
+ for (uint64_t i = beginConst; i < endConst; ++i) {
+ LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
+ m_out.store64(value, TypedPointer(heap, pointer));
+ }
+ return;
+ }
+ }
+
+ LBasicBlock initLoop = m_out.newBlock();
+ LBasicBlock initDone = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
+
+ ValueFromBlock originalIndex = m_out.anchor(end);
+ ValueFromBlock originalPointer = m_out.anchor(
+ m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
+ m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
+
+ m_out.appendTo(initLoop, initDone);
+ LValue index = m_out.phi(Int32, originalIndex);
+ LValue pointer = m_out.phi(pointerType(), originalPointer);
+
+ m_out.store64(value, TypedPointer(heap, pointer));
+
+ LValue nextIndex = m_out.sub(index, m_out.int32One);
+ m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
+ m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
+ m_out.branch(
+ m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
+
+ m_out.appendTo(initDone, lastNext);
+ }
+
+ LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
+ {
+ if (previousStructure->couldHaveIndexingHeader()) {
+ return vmCall(
+ pointerType(),
+ m_out.operation(operationAllocateComplexPropertyStorageWithInitialCapacity),
+ m_callFrame, object);
+ }
+
+ LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
+
+ splatWords(
+ result,
+ m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
+ m_out.int64Zero, m_heaps.properties.atAnyNumber());
+
+ return result;
+ }
+
+ LValue reallocatePropertyStorage(
+ LValue object, LValue oldStorage, Structure* previous, Structure* next)
+ {
+ size_t oldSize = previous->outOfLineCapacity();
+ size_t newSize = oldSize * outOfLineGrowthFactor;
+
+ ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
+
+ if (previous->couldHaveIndexingHeader()) {
+ LValue newAllocSize = m_out.constIntPtr(newSize);
+ return vmCall(pointerType(), m_out.operation(operationAllocateComplexPropertyStorage), m_callFrame, object, newAllocSize);
+ }
+
+ LValue result = allocatePropertyStorageWithSizeImpl(newSize);
+
+ ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
+ ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
+
+ for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
+ LValue loaded =
+ m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
+ m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
+ }
+
+ splatWords(
+ result,
+ m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
+ m_out.int64Zero, m_heaps.properties.atAnyNumber());
+
+ return result;
+ }
+
+ LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
+ {
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ size_t sizeInBytes = sizeInValues * sizeof(JSValue);
+ MarkedAllocator* allocator = vm().auxiliarySpace.allocatorFor(sizeInBytes);
+ LValue startOfStorage = allocateHeapCell(m_out.constIntPtr(allocator), slowPath);
+ ValueFromBlock fastButterfly = m_out.anchor(
+ m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ LValue slowButterflyValue;
+ if (sizeInValues == initialOutOfLineCapacity) {
+ slowButterflyValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationAllocateSimplePropertyStorageWithInitialCapacity,
+ locations[0].directGPR());
+ });
+ } else {
+ slowButterflyValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationAllocateSimplePropertyStorage, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(sizeInValues));
+ });
+ }
+ ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.phi(pointerType(), fastButterfly, slowButterfly);
+ }
+
+ LValue getById(LValue base, AccessType type)
+ {
+ Node* node = m_node;
+ UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
+
+ B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(base);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+
+ // FIXME: If this is a GetByIdFlush, we might get some performance boost if we claim that it
+ // clobbers volatile registers late. It's not necessary for correctness, though, since the
+ // IC code is super smart about saving registers.
+ // https://bugs.webkit.org/show_bug.cgi?id=152848
+
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CallSiteIndex callSiteIndex =
+ state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
+
+ // This is the direct exit target for operation calls.
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ // This is the exit for call IC's created by the getById for getters. We don't have
+ // to do anything weird other than call this, since it will associate the exit with
+ // the callsite index.
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ auto generator = Box<JITGetByIdGenerator>::create(
+ jit.codeBlock(), node->origin.semantic, callSiteIndex,
+ params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
+ JSValueRegs(params[0].gpr()), type);
+
+ generator->generateFastPath(jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ J_JITOperation_ESsiJI optimizationFunction;
+ if (type == AccessType::Get)
+ optimizationFunction = operationGetByIdOptimize;
+ else
+ optimizationFunction = operationTryGetByIdOptimize;
+
+ generator->slowPathJump().link(&jit);
+ CCallHelpers::Label slowPathBegin = jit.label();
+ CCallHelpers::Call slowPathCall = callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), optimizationFunction, params[0].gpr(),
+ CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
+ CCallHelpers::TrustedImmPtr(uid)).call();
+ jit.jump().linkTo(done, &jit);
+
+ generator->reportSlowPathCall(slowPathBegin, slowPathCall);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ generator->finalize(linkBuffer);
+ });
+ });
+ });
+
+ return patchpoint;
+ }
+
+ LValue isFastTypedArray(LValue object)
+ {
+ return m_out.equal(
+ m_out.load32(object, m_heaps.JSArrayBufferView_mode),
+ m_out.constInt32(FastTypedArray));
+ }
+
+ TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
+ {
+ return m_out.baseIndex(
+ heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
+ }
+
+ template<typename IntFunctor, typename DoubleFunctor>
+ void compare(
+ const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
+ C_JITOperation_TT stringIdentFunction,
+ C_JITOperation_B_EJssJss stringFunction,
+ S_JITOperation_EJJ fallbackFunction)
+ {
+ if (m_node->isBinaryUseKind(Int32Use)) {
+ LValue left = lowInt32(m_node->child1());
+ LValue right = lowInt32(m_node->child2());
+ setBoolean(intFunctor(left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(Int52RepUse)) {
+ Int52Kind kind;
+ LValue left = lowWhicheverInt52(m_node->child1(), kind);
+ LValue right = lowInt52(m_node->child2(), kind);
+ setBoolean(intFunctor(left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(DoubleRepUse)) {
+ LValue left = lowDouble(m_node->child1());
+ LValue right = lowDouble(m_node->child2());
+ setBoolean(doubleFunctor(left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringIdentUse)) {
+ LValue left = lowStringIdent(m_node->child1());
+ LValue right = lowStringIdent(m_node->child2());
+ setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
+ return;
+ }
+
+ if (m_node->isBinaryUseKind(StringUse)) {
+ LValue left = lowCell(m_node->child1());
+ LValue right = lowCell(m_node->child2());
+ speculateString(m_node->child1(), left);
+ speculateString(m_node->child2(), right);
+
+ LValue result = vmCall(
+ Int32, m_out.operation(stringFunction),
+ m_callFrame, left, right);
+ setBoolean(result);
+ return;
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse));
+ nonSpeculativeCompare(intFunctor, fallbackFunction);
+ }
+
+ void compileToLowerCase()
+ {
+ LBasicBlock notRope = m_out.newBlock();
+ LBasicBlock is8Bit = m_out.newBlock();
+ LBasicBlock loopTop = m_out.newBlock();
+ LBasicBlock loopBody = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue string = lowString(m_node->child1());
+ ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
+ ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
+ LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
+ m_out.branch(m_out.isZero64(impl),
+ unsure(slowPath), unsure(notRope));
+
+ LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(slowPath), unsure(is8Bit));
+
+ m_out.appendTo(is8Bit, loopTop);
+ LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
+ LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
+ ValueFromBlock fastResult = m_out.anchor(string);
+ m_out.jump(loopTop);
+
+ m_out.appendTo(loopTop, loopBody);
+ LValue index = m_out.phi(Int32, startIndex);
+ ValueFromBlock indexFromBlock = m_out.anchor(index);
+ m_out.branch(m_out.below(index, length),
+ unsure(loopBody), unsure(continuation));
+
+ m_out.appendTo(loopBody, slowPath);
+
+ LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
+ LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
+ LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
+ LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
+ m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
+ m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
+
+ m_out.appendTo(slowPath, continuation);
+ LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
+ ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationToLowerCase), m_callFrame, string, slowPathIndex));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
+ }
+
+ void compileNumberToStringWithRadix()
+ {
+ bool validRadixIsGuaranteed = false;
+ if (m_node->child2()->isInt32Constant()) {
+ int32_t radix = m_node->child2()->asInt32();
+ if (radix >= 2 && radix <= 36)
+ validRadixIsGuaranteed = true;
+ }
+
+ switch (m_node->child1().useKind()) {
+ case Int32Use:
+ setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
+ break;
+ case Int52RepUse:
+ setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
+ break;
+ case DoubleRepUse:
+ setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void compileResolveScope()
+ {
+ UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
+ setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScope),
+ m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
+ }
+
+ void compileGetDynamicVar()
+ {
+ UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
+ setJSValue(vmCall(Int64, m_out.operation(operationGetDynamicVar),
+ m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
+ }
+
+ void compilePutDynamicVar()
+ {
+ UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
+ setJSValue(vmCall(Void, m_out.operation(operationPutDynamicVar),
+ m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
+ }
+
+ void compileUnreachable()
+ {
+ // It's so tempting to assert that AI has proved that this is unreachable. But that's
+ // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
+ // *you* know will not return, then it's fine to end the basic block with Unreachable
+ // after that opcode. You don't have to also prove to AI that your opcode does not return.
+ // Hence, there is nothing to do here but emit code that will crash, so that we catch
+ // cases where you said Unreachable but you lied.
+
+ crash();
+ }
+
+ void compileCheckDOM()
+ {
+ LValue cell = lowCell(m_node->child1());
+
+ DOMJIT::Patchpoint* domJIT = m_node->checkDOMPatchpoint();
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Void);
+ patchpoint->appendSomeRegister(cell);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
+ NodeOrigin origin = m_origin;
+ unsigned osrExitArgumentOffset = patchpoint->numChildren();
+ OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
+ patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
+
+ patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
+ patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ State* state = &m_ftlState;
+ Node* node = m_node;
+ JSValue child1Constant = m_state.forNode(m_node->child1()).value();
+
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Vector<GPRReg> gpScratch;
+ Vector<FPRReg> fpScratch;
+ Vector<DOMJIT::Value> regs;
+
+ regs.append(DOMJIT::Value(params[0].gpr(), child1Constant));
+
+ for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
+ gpScratch.append(params.gpScratch(i));
+
+ for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
+ fpScratch.append(params.fpScratch(i));
+
+ RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, osrExitArgumentOffset);
+
+ DOMJITPatchpointParams domJITParams(*state, params, node, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
+ CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
+
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(failureCases, linkBuffer.locationOf(handle->label));
+ });
+ });
+ patchpoint->effects = Effects::forCheck();
+ }
+
+ void compileCallDOM()
+ {
+ const DOMJIT::Signature* signature = m_node->signature();
+
+ // FIXME: We should have a way to call functions with the vector of registers.
+ // https://bugs.webkit.org/show_bug.cgi?id=163099
+ Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
+
+ unsigned index = 0;
+ DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
+ if (!index)
+ operands.append(lowCell(edge));
+ else {
+ switch (signature->arguments[index - 1]) {
+ case SpecString:
+ operands.append(lowString(edge));
+ break;
+ case SpecInt32Only:
+ operands.append(lowInt32(edge));
+ break;
+ case SpecBoolean:
+ operands.append(lowBoolean(edge));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+ ++index;
+ });
+
+ unsigned argumentCountIncludingThis = signature->argumentCount + 1;
+ LValue result;
+ switch (argumentCountIncludingThis) {
+ case 1:
+ result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
+ break;
+ case 2:
+ result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
+ break;
+ case 3:
+ result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ setJSValue(result);
+ }
+
+ void compileCallDOMGetter()
+ {
+ DOMJIT::CallDOMGetterPatchpoint* domJIT = m_node->callDOMGetterData()->patchpoint;
+
+ Edge& baseEdge = m_node->child1();
+ LValue base = lowCell(baseEdge);
+ JSValue baseConstant = m_state.forNode(baseEdge).value();
+
+ LValue globalObject;
+ JSValue globalObjectConstant;
+ if (domJIT->requireGlobalObject) {
+ Edge& globalObjectEdge = m_node->child2();
+ globalObject = lowCell(globalObjectEdge);
+ globalObjectConstant = m_state.forNode(globalObjectEdge).value();
+ }
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(base);
+ if (domJIT->requireGlobalObject)
+ patchpoint->appendSomeRegister(globalObject);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
+ patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+
+ State* state = &m_ftlState;
+ Node* node = m_node;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Vector<GPRReg> gpScratch;
+ Vector<FPRReg> fpScratch;
+ Vector<DOMJIT::Value> regs;
+
+ regs.append(JSValueRegs(params[0].gpr()));
+ regs.append(DOMJIT::Value(params[1].gpr(), baseConstant));
+ if (domJIT->requireGlobalObject)
+ regs.append(DOMJIT::Value(params[2].gpr(), globalObjectConstant));
+
+ for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
+ gpScratch.append(params.gpScratch(i));
+
+ for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
+ fpScratch.append(params.fpScratch(i));
+
+ Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ DOMJITPatchpointParams domJITParams(*state, params, node, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
+ domJIT->generator()->run(jit, domJITParams);
+ });
+ patchpoint->effects = Effects::forCall();
+ setJSValue(patchpoint);
+ }
+
+ void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
+ {
+ LValue rightCell = lowCell(rightChild);
+ LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
+
+ speculateTruthyObject(rightChild, rightCell, SpecObject);
+
+ LBasicBlock leftCellCase = m_out.newBlock();
+ LBasicBlock leftNotCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(leftValue, provenType(leftChild)),
+ unsure(leftCellCase), unsure(leftNotCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
+ speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCell));
+ ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(leftNotCellCase, continuation);
+ FTL_TYPE_CHECK(
+ jsValueValue(leftValue), leftChild, SpecOther | SpecCell, isNotOther(leftValue));
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, cellResult, notCellResult));
+ }
+
+ void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
+ {
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
+ return;
+ }
+
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
+ speculate(
+ BadType, jsValueValue(cell), edge.node(),
+ m_out.testNonZero32(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(MasqueradesAsUndefined)));
+ }
+
+ template<typename IntFunctor>
+ void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction)
+ {
+ LValue left = lowJSValue(m_node->child1());
+ LValue right = lowJSValue(m_node->child2());
+
+ LBasicBlock leftIsInt = m_out.newBlock();
+ LBasicBlock fastPath = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
+
+ LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
+ m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
+
+ m_out.appendTo(fastPath, slowPath);
+ ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
+ pointerType(), m_out.operation(helperFunction), m_callFrame, left, right)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setBoolean(m_out.phi(Int32, fastResult, slowResult));
+ }
+
+ LValue stringsEqual(LValue leftJSString, LValue rightJSString)
+ {
+ LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
+ LBasicBlock notEmptyCase = m_out.newBlock();
+ LBasicBlock leftReadyCase = m_out.newBlock();
+ LBasicBlock rightReadyCase = m_out.newBlock();
+ LBasicBlock left8BitCase = m_out.newBlock();
+ LBasicBlock right8BitCase = m_out.newBlock();
+ LBasicBlock loop = m_out.newBlock();
+ LBasicBlock bytesEqual = m_out.newBlock();
+ LBasicBlock trueCase = m_out.newBlock();
+ LBasicBlock falseCase = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue length = m_out.load32(leftJSString, m_heaps.JSString_length);
+
+ m_out.branch(
+ m_out.notEqual(length, m_out.load32(rightJSString, m_heaps.JSString_length)),
+ unsure(falseCase), unsure(notTriviallyUnequalCase));
+
+ LBasicBlock lastNext = m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
+
+ m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
+
+ m_out.appendTo(notEmptyCase, leftReadyCase);
+
+ LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
+ LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
+
+ m_out.branch(m_out.notNull(left), usually(leftReadyCase), rarely(slowCase));
+
+ m_out.appendTo(leftReadyCase, rightReadyCase);
+
+ m_out.branch(m_out.notNull(right), usually(rightReadyCase), rarely(slowCase));
+
+ m_out.appendTo(rightReadyCase, left8BitCase);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(slowCase), unsure(left8BitCase));
+
+ m_out.appendTo(left8BitCase, right8BitCase);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(slowCase), unsure(right8BitCase));
+
+ m_out.appendTo(right8BitCase, loop);
+
+ LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
+ LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
+
+ ValueFromBlock indexAtStart = m_out.anchor(length);
+
+ m_out.jump(loop);
+
+ m_out.appendTo(loop, bytesEqual);
+
+ LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
+ LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
+
+ LValue leftByte = m_out.load8ZeroExt32(
+ m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
+ LValue rightByte = m_out.load8ZeroExt32(
+ m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
+
+ m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
+
+ m_out.appendTo(bytesEqual, trueCase);
+
+ ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
+ m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
+ m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
+
+ m_out.appendTo(trueCase, falseCase);
+
+ ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(falseCase, slowCase);
+
+ ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowCase, continuation);
+
+ LValue slowResultValue = vmCall(
+ Int64, m_out.operation(operationCompareStringEq), m_callFrame,
+ leftJSString, rightJSString);
+ ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, trueResult, falseResult, slowResult);
+ }
+
+ enum ScratchFPRUsage {
+ DontNeedScratchFPR,
+ NeedScratchFPR
+ };
+ template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
+ void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction)
+ {
+ Node* node = m_node;
+
+ LValue left = lowJSValue(node->child1());
+ LValue right = lowJSValue(node->child2());
+
+ SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
+ SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(left);
+ patchpoint->appendSomeRegister(right);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+ patchpoint->numGPScratchRegisters = 1;
+ patchpoint->numFPScratchRegisters = 2;
+ if (scratchFPRUsage == NeedScratchFPR)
+ patchpoint->numFPScratchRegisters++;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ auto generator = Box<BinaryArithOpGenerator>::create(
+ leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
+ params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
+ scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
+
+ generator->generateFastPath(jit);
+
+ if (generator->didEmitFastPath()) {
+ generator->endJumpList().link(&jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generator->slowPathJumpList().link(&jit);
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), slowPathFunction, params[0].gpr(),
+ params[1].gpr(), params[2].gpr());
+ jit.jump().linkTo(done, &jit);
+ });
+ } else {
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(),
+ params[2].gpr());
+ }
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ template<typename BinaryBitOpGenerator>
+ void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction)
+ {
+ Node* node = m_node;
+
+ LValue left = lowJSValue(node->child1());
+ LValue right = lowJSValue(node->child2());
+
+ SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
+ SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(left);
+ patchpoint->appendSomeRegister(right);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+ patchpoint->numGPScratchRegisters = 1;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ auto generator = Box<BinaryBitOpGenerator>::create(
+ leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
+
+ generator->generateFastPath(jit);
+ generator->endJumpList().link(&jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generator->slowPathJumpList().link(&jit);
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), slowPathFunction, params[0].gpr(),
+ params[1].gpr(), params[2].gpr());
+ jit.jump().linkTo(done, &jit);
+ });
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
+ {
+ Node* node = m_node;
+
+ // FIXME: Make this do exceptions.
+ // https://bugs.webkit.org/show_bug.cgi?id=151686
+
+ LValue left = lowJSValue(node->child1());
+ LValue right = lowJSValue(node->child2());
+
+ SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
+ SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
+
+ PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(left);
+ patchpoint->appendSomeRegister(right);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+ patchpoint->numGPScratchRegisters = 1;
+ patchpoint->numFPScratchRegisters = 1;
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ auto generator = Box<JITRightShiftGenerator>::create(
+ leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
+ params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
+
+ generator->generateFastPath(jit);
+ generator->endJumpList().link(&jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ generator->slowPathJumpList().link(&jit);
+
+ J_JITOperation_EJJ slowPathFunction =
+ shiftType == JITRightShiftGenerator::SignedShift
+ ? operationValueBitRShift : operationValueBitURShift;
+
+ callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), slowPathFunction, params[0].gpr(),
+ params[1].gpr(), params[2].gpr());
+ jit.jump().linkTo(done, &jit);
+ });
+ });
+
+ setJSValue(patchpoint);
+ }
+
+ LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
+ {
+ MarkedAllocator* actualAllocator = nullptr;
+ if (allocator->hasIntPtr())
+ actualAllocator = bitwise_cast<MarkedAllocator*>(allocator->asIntPtr());
+
+ if (!actualAllocator) {
+ // This means that either we know that the allocator is null or we don't know what the
+ // allocator is. In either case, we need the null check.
+ LBasicBlock haveAllocator = m_out.newBlock();
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
+ m_out.branch(allocator, usually(haveAllocator), rarely(slowPath));
+ m_out.appendTo(haveAllocator, lastNext);
+ }
+
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+
+ PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
+ patchpoint->effects.terminal = true;
+ patchpoint->appendSomeRegister(allocator);
+ patchpoint->numGPScratchRegisters++;
+ patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+
+ m_out.appendSuccessor(usually(continuation));
+ m_out.appendSuccessor(rarely(slowPath));
+
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CCallHelpers::JumpList jumpToSlowPath;
+
+ // We use a patchpoint to emit the allocation path because whenever we mess with
+ // allocation paths, we already reason about them at the machine code level. We know
+ // exactly what instruction sequence we want. We're confident that no compiler
+ // optimization could make this code better. So, it's best to have the code in
+ // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
+ // all of the compiler tiers.
+ jit.emitAllocateWithNonNullAllocator(
+ params[0].gpr(), actualAllocator, params[1].gpr(), params.gpScratch(0),
+ jumpToSlowPath);
+
+ CCallHelpers::Jump jumpToSuccess;
+ if (!params.fallsThroughToSuccessor(0))
+ jumpToSuccess = jit.jump();
+
+ Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ jumpToSlowPath.linkTo(*labels[1], &jit);
+ if (jumpToSuccess.isSet())
+ jumpToSuccess.linkTo(*labels[0], &jit);
+ });
+ });
+
+ m_out.appendTo(continuation, lastNext);
+ return patchpoint;
+ }
+
+ void storeStructure(LValue object, Structure* structure)
+ {
+ m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
+ m_out.store32(
+ m_out.constInt32(structure->objectInitializationBlob()),
+ object, m_heaps.JSCell_usefulBytes);
+ }
+
+ void storeStructure(LValue object, LValue structure)
+ {
+ if (structure->hasIntPtr()) {
+ storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
+ return;
+ }
+
+ LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
+ m_out.store32(id, object, m_heaps.JSCell_structureID);
+
+ LValue blob = m_out.load32(structure, m_heaps.Structure_indexingTypeIncludingHistory);
+ m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
+ }
+
+ template <typename StructureType>
+ LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
+ {
+ LValue result = allocateHeapCell(allocator, slowPath);
+ storeStructure(result, structure);
+ return result;
+ }
+
+ LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
+ }
+
+ LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ LValue result = allocateCell(allocator, structure, slowPath);
+ if (structure->hasIntPtr()) {
+ splatWords(
+ result,
+ m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+ m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
+ m_out.int64Zero,
+ m_heaps.properties.atAnyNumber());
+ } else {
+ LValue end = m_out.add(
+ m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+ m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
+ splatWords(
+ result,
+ m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
+ end,
+ m_out.int64Zero,
+ m_heaps.properties.atAnyNumber());
+ }
+
+ m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
+ return result;
+ }
+
+ template<typename ClassType, typename StructureType>
+ LValue allocateObject(
+ size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ MarkedAllocator* allocator = subspaceFor<ClassType>(vm())->allocatorFor(size);
+ return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, slowPath);
+ }
+
+ template<typename ClassType, typename StructureType>
+ LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ return allocateObject<ClassType>(
+ ClassType::allocationSize(0), structure, butterfly, slowPath);
+ }
+
+ LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
+ {
+ static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
+
+ // Try to do some constant-folding here.
+ if (subspace->hasIntPtr() && size->hasIntPtr()) {
+ Subspace* actualSubspace = bitwise_cast<Subspace*>(subspace->asIntPtr());
+ size_t actualSize = size->asIntPtr();
+
+ MarkedAllocator* actualAllocator = actualSubspace->allocatorFor(actualSize);
+ if (!actualAllocator) {
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+ m_out.jump(slowPath);
+ m_out.appendTo(continuation, lastNext);
+ return m_out.intPtrZero;
+ }
+
+ return m_out.constIntPtr(actualAllocator);
+ }
+
+ unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
+
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+
+ LValue sizeClassIndex = m_out.lShr(
+ m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
+ m_out.constInt32(stepShift));
+
+ m_out.branch(
+ m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.loadPtr(
+ m_out.baseIndex(
+ m_heaps.Subspace_allocatorForSizeStep,
+ subspace, m_out.sub(sizeClassIndex, m_out.intPtrOne)));
+ }
+
+ LValue allocatorForSize(Subspace& subspace, LValue size, LBasicBlock slowPath)
+ {
+ return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
+ }
+
+ template<typename ClassType>
+ LValue allocateVariableSizedObject(
+ LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
+ {
+ LValue allocator = allocatorForSize(
+ *subspaceFor<ClassType>(vm()), size, slowPath);
+ return allocateObject(allocator, structure, butterfly, slowPath);
+ }
+
+ template<typename ClassType>
+ LValue allocateVariableSizedCell(
+ LValue size, Structure* structure, LBasicBlock slowPath)
+ {
+ LValue allocator = allocatorForSize(
+ *subspaceFor<ClassType>(vm()), size, slowPath);
+ return allocateCell(allocator, structure, slowPath);
+ }
+
+ LValue allocateObject(RegisteredStructure structure)
+ {
+ size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
+ MarkedAllocator* allocator = subspaceFor<JSFinalObject>(vm())->allocatorFor(allocationSize);
+
+ // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
+ // instead of putting it on the slow path.
+ // https://bugs.webkit.org/show_bug.cgi?id=161062
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ ValueFromBlock fastResult = m_out.anchor(allocateObject(
+ m_out.constIntPtr(allocator), structure, m_out.intPtrZero, slowPath));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNewObject, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(structure.get()));
+ });
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(pointerType(), fastResult, slowResult);
+ }
+
+ struct ArrayValues {
+ ArrayValues()
+ : array(0)
+ , butterfly(0)
+ {
+ }
+
+ ArrayValues(LValue array, LValue butterfly)
+ : array(array)
+ , butterfly(butterfly)
+ {
+ }
+
+ LValue array;
+ LValue butterfly;
+ };
+
+ ArrayValues allocateJSArray(LValue publicLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+ if (indexingType->hasInt32()) {
+ IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
+ ASSERT_UNUSED(type,
+ hasUndecided(type)
+ || hasInt32(type)
+ || hasDouble(type)
+ || hasContiguous(type));
+ }
+
+ LBasicBlock fastCase = m_out.newBlock();
+ LBasicBlock largeCase = m_out.newBlock();
+ LBasicBlock failCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+ LBasicBlock slowCase = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
+
+ ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
+
+ LValue predicate;
+ if (shouldLargeArraySizeCreateArrayStorage)
+ predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+ else
+ predicate = m_out.booleanFalse;
+
+ m_out.branch(predicate, rarely(largeCase), usually(fastCase));
+
+ m_out.appendTo(fastCase, largeCase);
+
+ LValue vectorLength = nullptr;
+ if (publicLength->hasInt32() && structure->hasIntPtr()) {
+ unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
+ if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
+ vectorLength = m_out.constInt32(
+ Butterfly::optimalContiguousVectorLength(
+ bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst));
+ }
+ }
+
+ if (!vectorLength) {
+ // We don't compute the optimal vector length for new Array(blah) where blah is not
+ // statically known, since the compute effort of doing it here is probably not worth it.
+ vectorLength = publicLength;
+ }
+
+ LValue payloadSize =
+ m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
+
+ LValue butterflySize = m_out.add(
+ payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
+
+ LValue allocator = allocatorForSize(vm().auxiliarySpace, butterflySize, failCase);
+ LValue startOfStorage = allocateHeapCell(allocator, failCase);
+
+ LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
+
+ m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
+ m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
+
+ initializeArrayElements(
+ indexingType,
+ shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
+ butterfly);
+
+ ValueFromBlock haveButterfly = m_out.anchor(butterfly);
+
+ LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
+
+ ValueFromBlock fastResult = m_out.anchor(object);
+ ValueFromBlock fastButterfly = m_out.anchor(butterfly);
+ m_out.jump(continuation);
+
+ m_out.appendTo(largeCase, failCase);
+ ValueFromBlock largeStructure = m_out.anchor(
+ weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
+ m_out.jump(slowCase);
+
+ m_out.appendTo(failCase, slowCase);
+ ValueFromBlock failStructure = m_out.anchor(structure);
+ m_out.jump(slowCase);
+
+ m_out.appendTo(slowCase, continuation);
+ LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
+ LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
+
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationNewArrayWithSize, locations[0].directGPR(),
+ locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
+ },
+ structureValue, publicLength, butterflyValue);
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ ValueFromBlock slowButterfly = m_out.anchor(
+ m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return ArrayValues(
+ m_out.phi(pointerType(), fastResult, slowResult),
+ m_out.phi(pointerType(), fastButterfly, slowButterfly));
+ }
+
+ ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
+ {
+ bool shouldInitializeElements = false;
+ bool shouldLargeArraySizeCreateArrayStorage = false;
+ return allocateJSArray(
+ publicLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
+ shouldLargeArraySizeCreateArrayStorage);
+ }
+
+ LValue ensureShadowChickenPacket()
+ {
+ LBasicBlock slowCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ TypedPointer addressOfLogCursor = m_out.absolute(vm().shadowChicken().addressOfLogCursor());
+ LValue logCursor = m_out.loadPtr(addressOfLogCursor);
+
+ ValueFromBlock fastResult = m_out.anchor(logCursor);
+
+ m_out.branch(
+ m_out.below(logCursor, m_out.constIntPtr(vm().shadowChicken().logEnd())),
+ usually(continuation), rarely(slowCase));
+
+ LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
+
+ vmCall(Void, m_out.operation(operationProcessShadowChickenLog), m_callFrame);
+
+ ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ LValue result = m_out.phi(pointerType(), fastResult, slowResult);
+
+ m_out.storePtr(
+ m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
+ addressOfLogCursor);
+
+ return result;
+ }
+
+ LValue boolify(Edge edge)
+ {
+ switch (edge.useKind()) {
+ case BooleanUse:
+ case KnownBooleanUse:
+ return lowBoolean(edge);
+ case Int32Use:
+ return m_out.notZero32(lowInt32(edge));
+ case DoubleRepUse:
+ return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
+ case ObjectOrOtherUse:
+ return m_out.logicalNot(
+ equalNullOrUndefined(
+ edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
+ ManualOperandSpeculation));
+ case StringUse: {
+ LValue stringValue = lowString(edge);
+ LValue length = m_out.load32NonNegative(stringValue, m_heaps.JSString_length);
+ return m_out.notEqual(length, m_out.int32Zero);
+ }
+ case StringOrOtherUse: {
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+
+ FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCell) | SpecString, isNotString(value));
+ LValue length = m_out.load32NonNegative(value, m_heaps.JSString_length);
+ ValueFromBlock cellResult = m_out.anchor(m_out.notEqual(length, m_out.int32Zero));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+
+ FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value));
+ ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.phi(Int32, cellResult, notCellResult);
+ }
+ case UntypedUse: {
+ LValue value = lowJSValue(edge);
+
+ // Implements the following control flow structure:
+ // if (value is cell) {
+ // if (value is string)
+ // result = !!value->length
+ // else {
+ // do evil things for masquerades-as-undefined
+ // result = true
+ // }
+ // } else if (value is int32) {
+ // result = !!unboxInt32(value)
+ // } else if (value is number) {
+ // result = !!unboxDouble(value)
+ // } else {
+ // result = value == jsTrue
+ // }
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock stringCase = m_out.newBlock();
+ LBasicBlock notStringCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock int32Case = m_out.newBlock();
+ LBasicBlock notInt32Case = m_out.newBlock();
+ LBasicBlock doubleCase = m_out.newBlock();
+ LBasicBlock notDoubleCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock> results;
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, stringCase);
+ m_out.branch(
+ isString(value, provenType(edge) & SpecCell),
+ unsure(stringCase), unsure(notStringCase));
+
+ m_out.appendTo(stringCase, notStringCase);
+ LValue nonEmptyString = m_out.notZero32(
+ m_out.load32NonNegative(value, m_heaps.JSString_length));
+ results.append(m_out.anchor(nonEmptyString));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notStringCase, notCellCase);
+ LValue isTruthyObject;
+ if (masqueradesAsUndefinedWatchpointIsStillValid())
+ isTruthyObject = m_out.booleanTrue;
+ else {
+ LBasicBlock masqueradesCase = m_out.newBlock();
+
+ results.append(m_out.anchor(m_out.booleanTrue));
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(MasqueradesAsUndefined)),
+ usually(continuation), rarely(masqueradesCase));
+
+ m_out.appendTo(masqueradesCase);
+
+ isTruthyObject = m_out.notEqual(
+ weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
+ m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
+ }
+ results.append(m_out.anchor(isTruthyObject));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, int32Case);
+ m_out.branch(
+ isInt32(value, provenType(edge) & ~SpecCell),
+ unsure(int32Case), unsure(notInt32Case));
+
+ m_out.appendTo(int32Case, notInt32Case);
+ results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notInt32Case, doubleCase);
+ m_out.branch(
+ isNumber(value, provenType(edge) & ~SpecCell),
+ unsure(doubleCase), unsure(notDoubleCase));
+
+ m_out.appendTo(doubleCase, notDoubleCase);
+ LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
+ unboxDouble(value), m_out.constDouble(0));
+ results.append(m_out.anchor(doubleIsTruthy));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notDoubleCase, continuation);
+ LValue miscIsTruthy = m_out.equal(
+ value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
+ results.append(m_out.anchor(miscIsTruthy));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, results);
+ }
+ default:
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return 0;
+ }
+ }
+
+ enum StringOrObjectMode {
+ AllCellsAreFalse,
+ CellCaseSpeculatesObject
+ };
+ enum EqualNullOrUndefinedMode {
+ EqualNull,
+ EqualUndefined,
+ EqualNullOrUndefined,
+ SpeculateNullOrUndefined
+ };
+ LValue equalNullOrUndefined(
+ Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
+ OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
+ {
+ bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
+
+ LValue value = lowJSValue(edge, operandMode);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock primitiveCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
+
+ Vector<ValueFromBlock, 3> results;
+
+ switch (cellMode) {
+ case AllCellsAreFalse:
+ break;
+ case CellCaseSpeculatesObject:
+ FTL_TYPE_CHECK(
+ jsValueValue(value), edge, (~SpecCell) | SpecObject, isNotObject(value));
+ break;
+ }
+
+ if (validWatchpoint) {
+ results.append(m_out.anchor(m_out.booleanFalse));
+ m_out.jump(continuation);
+ } else {
+ LBasicBlock masqueradesCase =
+ m_out.newBlock();
+
+ results.append(m_out.anchor(m_out.booleanFalse));
+
+ m_out.branch(
+ m_out.testNonZero32(
+ m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(MasqueradesAsUndefined)),
+ rarely(masqueradesCase), usually(continuation));
+
+ m_out.appendTo(masqueradesCase, primitiveCase);
+
+ LValue structure = loadStructure(value);
+
+ results.append(m_out.anchor(
+ m_out.equal(
+ weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
+ m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
+ m_out.jump(continuation);
+ }
+
+ m_out.appendTo(primitiveCase, continuation);
+
+ LValue primitiveResult;
+ switch (primitiveMode) {
+ case EqualNull:
+ primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
+ break;
+ case EqualUndefined:
+ primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
+ break;
+ case EqualNullOrUndefined:
+ primitiveResult = isOther(value, provenType(edge));
+ break;
+ case SpeculateNullOrUndefined:
+ FTL_TYPE_CHECK(
+ jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value));
+ primitiveResult = m_out.booleanTrue;
+ break;
+ }
+ results.append(m_out.anchor(primitiveResult));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.phi(Int32, results);
+ }
+
+ template<typename FunctionType>
+ void contiguousPutByValOutOfBounds(
+ FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
+ LBasicBlock continuation)
+ {
+ LValue isNotInBounds = m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
+ if (!m_node->arrayMode().isInBounds()) {
+ LBasicBlock notInBoundsCase =
+ m_out.newBlock();
+ LBasicBlock performStore =
+ m_out.newBlock();
+
+ m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
+
+ LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
+
+ LValue isOutOfBounds = m_out.aboveOrEqual(
+ index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
+
+ if (!m_node->arrayMode().isOutOfBounds())
+ speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
+ else {
+ LBasicBlock outOfBoundsCase =
+ m_out.newBlock();
+ LBasicBlock holeCase =
+ m_out.newBlock();
+
+ m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
+
+ LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
+
+ vmCall(
+ Void, m_out.operation(slowPathFunction),
+ m_callFrame, base, index, value);
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(holeCase, innerLastNext);
+ }
+
+ m_out.store32(
+ m_out.add(index, m_out.int32One),
+ storage, m_heaps.Butterfly_publicLength);
+
+ m_out.jump(performStore);
+ m_out.appendTo(performStore, lastNext);
+ }
+ }
+
+ void buildSwitch(SwitchData* data, LType type, LValue switchValue)
+ {
+ ASSERT(type == pointerType() || type == Int32);
+
+ Vector<SwitchCase> cases;
+ for (unsigned i = 0; i < data->cases.size(); ++i) {
+ SwitchCase newCase;
+
+ if (type == pointerType()) {
+ newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
+ lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
+ } else if (type == Int32) {
+ newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
+ lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
+ } else
+ CRASH();
+
+ cases.append(newCase);
+ }
+
+ m_out.switchInstruction(
+ switchValue, cases,
+ lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
+ }
+
+ void switchString(SwitchData* data, LValue string)
+ {
+ bool canDoBinarySwitch = true;
+ unsigned totalLength = 0;
+
+ for (DFG::SwitchCase myCase : data->cases) {
+ StringImpl* string = myCase.value.stringImpl();
+ if (!string->is8Bit()) {
+ canDoBinarySwitch = false;
+ break;
+ }
+ if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
+ canDoBinarySwitch = false;
+ break;
+ }
+ totalLength += string->length();
+ }
+
+ if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
+ switchStringSlow(data, string);
+ return;
+ }
+
+ LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+ LValue length = m_out.load32(string, m_heaps.JSString_length);
+
+ LBasicBlock hasImplBlock = m_out.newBlock();
+ LBasicBlock is8BitBlock = m_out.newBlock();
+ LBasicBlock slowBlock = m_out.newBlock();
+
+ m_out.branch(m_out.isNull(stringImpl), unsure(slowBlock), unsure(hasImplBlock));
+
+ LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
+
+ m_out.branch(
+ m_out.testIsZero32(
+ m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIs8Bit())),
+ unsure(slowBlock), unsure(is8BitBlock));
+
+ m_out.appendTo(is8BitBlock, slowBlock);
+
+ LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
+
+ // FIXME: We should propagate branch weight data to the cases of this switch.
+ // https://bugs.webkit.org/show_bug.cgi?id=144368
+
+ Vector<StringSwitchCase> cases;
+ for (DFG::SwitchCase myCase : data->cases)
+ cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
+ std::sort(cases.begin(), cases.end());
+ switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
+
+ m_out.appendTo(slowBlock, lastNext);
+ switchStringSlow(data, string);
+ }
+
+ // The code for string switching is based closely on the same code in the DFG backend. While it
+ // would be nice to reduce the amount of similar-looking code, it seems like this is one of
+ // those algorithms where factoring out the common bits would result in more code than just
+ // duplicating.
+
+ struct StringSwitchCase {
+ StringSwitchCase() { }
+
+ StringSwitchCase(StringImpl* string, LBasicBlock target)
+ : string(string)
+ , target(target)
+ {
+ }
+
+ bool operator<(const StringSwitchCase& other) const
+ {
+ return stringLessThan(*string, *other.string);
+ }
+
+ StringImpl* string;
+ LBasicBlock target;
+ };
+
+ struct CharacterCase {
+ CharacterCase()
+ : character(0)
+ , begin(0)
+ , end(0)
+ {
+ }
+
+ CharacterCase(LChar character, unsigned begin, unsigned end)
+ : character(character)
+ , begin(begin)
+ , end(end)
+ {
+ }
+
+ bool operator<(const CharacterCase& other) const
+ {
+ return character < other.character;
+ }
+
+ LChar character;
+ unsigned begin;
+ unsigned end;
+ };
+
+ void switchStringRecurse(
+ SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
+ unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
+ unsigned checkedExactLength)
+ {
+ LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
+
+ if (begin == end) {
+ m_out.jump(fallThrough);
+ return;
+ }
+
+ unsigned minLength = cases[begin].string->length();
+ unsigned commonChars = minLength;
+ bool allLengthsEqual = true;
+ for (unsigned i = begin + 1; i < end; ++i) {
+ unsigned myCommonChars = numChecked;
+ unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
+ for (unsigned j = numChecked; j < limit; ++j) {
+ if (cases[begin].string->at(j) != cases[i].string->at(j))
+ break;
+ myCommonChars++;
+ }
+ commonChars = std::min(commonChars, myCommonChars);
+ if (minLength != cases[i].string->length())
+ allLengthsEqual = false;
+ minLength = std::min(minLength, cases[i].string->length());
+ }
+
+ if (checkedExactLength) {
+ DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength);
+ DFG_ASSERT(m_graph, m_node, allLengthsEqual);
+ }
+
+ DFG_ASSERT(m_graph, m_node, minLength >= commonChars);
+
+ if (!allLengthsEqual && alreadyCheckedLength < minLength)
+ m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
+ if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
+ m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
+
+ for (unsigned i = numChecked; i < commonChars; ++i) {
+ m_out.check(
+ m_out.notEqual(
+ m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
+ m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
+ unsure(fallThrough));
+ }
+
+ if (minLength == commonChars) {
+ // This is the case where one of the cases is a prefix of all of the other cases.
+ // We've already checked that the input string is a prefix of all of the cases,
+ // so we just check length to jump to that case.
+
+ DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars);
+ for (unsigned i = begin + 1; i < end; ++i)
+ DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars);
+
+ if (allLengthsEqual) {
+ DFG_ASSERT(m_graph, m_node, end == begin + 1);
+ m_out.jump(cases[begin].target);
+ return;
+ }
+
+ m_out.check(
+ m_out.equal(length, m_out.constInt32(commonChars)),
+ unsure(cases[begin].target));
+
+ // We've checked if the length is >= minLength, and then we checked if the length is
+ // == commonChars. We get to this point if it is >= minLength but not == commonChars.
+ // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
+ switchStringRecurse(
+ data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
+ return;
+ }
+
+ // At this point we know that the string is longer than commonChars, and we've only verified
+ // commonChars. Use a binary switch on the next unchecked character, i.e.
+ // string[commonChars].
+
+ DFG_ASSERT(m_graph, m_node, end >= begin + 2);
+
+ LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
+
+ Vector<CharacterCase> characterCases;
+ CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
+ for (unsigned i = begin + 1; i < end; ++i) {
+ LChar currentChar = cases[i].string->at(commonChars);
+ if (currentChar != currentCase.character) {
+ currentCase.end = i;
+ characterCases.append(currentCase);
+ currentCase = CharacterCase(currentChar, i, i + 1);
+ } else
+ currentCase.end = i + 1;
+ }
+ characterCases.append(currentCase);
+
+ Vector<LBasicBlock> characterBlocks;
+ for (unsigned i = characterCases.size(); i--;)
+ characterBlocks.append(m_out.newBlock());
+
+ Vector<SwitchCase> switchCases;
+ for (unsigned i = 0; i < characterCases.size(); ++i) {
+ if (i)
+ DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
+ switchCases.append(SwitchCase(
+ m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
+ }
+ m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
+
+ LBasicBlock lastNext = m_out.m_nextBlock;
+ characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
+ for (unsigned i = 0; i < characterCases.size(); ++i) {
+ m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
+ switchStringRecurse(
+ data, buffer, length, cases, commonChars + 1,
+ characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
+ }
+
+ DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
+ }
+
+ void switchStringSlow(SwitchData* data, LValue string)
+ {
+ // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
+ // blocks we want to jump to, and then request their addresses after compilation completes.
+ // https://bugs.webkit.org/show_bug.cgi?id=144369
+
+ LValue branchOffset = vmCall(
+ Int32, m_out.operation(operationSwitchStringAndGetBranchOffset),
+ m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
+
+ StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
+
+ Vector<SwitchCase> cases;
+ std::unordered_set<int32_t> alreadyHandled; // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
+ for (unsigned i = 0; i < data->cases.size(); ++i) {
+ // FIXME: The fact that we're using the bytecode's switch table means that the
+ // following DFG IR transformation would be invalid.
+ //
+ // Original code:
+ // switch (v) {
+ // case "foo":
+ // case "bar":
+ // things();
+ // break;
+ // default:
+ // break;
+ // }
+ //
+ // New code:
+ // switch (v) {
+ // case "foo":
+ // instrumentFoo();
+ // goto _things;
+ // case "bar":
+ // instrumentBar();
+ // _things:
+ // things();
+ // break;
+ // default:
+ // break;
+ // }
+ //
+ // Luckily, we don't currently do any such transformation. But it's kind of silly that
+ // this is an issue.
+ // https://bugs.webkit.org/show_bug.cgi?id=144635
+
+ DFG::SwitchCase myCase = data->cases[i];
+ StringJumpTable::StringOffsetTable::iterator iter =
+ table.offsetTable.find(myCase.value.stringImpl());
+ DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
+
+ if (!alreadyHandled.insert(iter->value.branchOffset).second)
+ continue;
+
+ cases.append(SwitchCase(
+ m_out.constInt32(iter->value.branchOffset),
+ lowBlock(myCase.target.block), Weight(myCase.target.count)));
+ }
+
+ m_out.switchInstruction(
+ branchOffset, cases, lowBlock(data->fallThrough.block),
+ Weight(data->fallThrough.count));
+ }
+
+ // Calls the functor at the point of code generation where we know what the result type is.
+ // You can emit whatever code you like at that point. Expects you to terminate the basic block.
+ // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
+ // you aren't using this as the terminator of a high-level block, you should create your own
+ // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
+ // calling this. For example:
+ //
+ // LBasicBlock continuation = m_out.newBlock();
+ // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
+ // buildTypeOf(
+ // child, value,
+ // [&] (TypeofType type) {
+ // do things;
+ // m_out.jump(continuation);
+ // });
+ // m_out.appendTo(continuation, lastNext);
+ template<typename Functor>
+ void buildTypeOf(Edge child, LValue value, const Functor& functor)
+ {
+ JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
+
+ // Implements the following branching structure:
+ //
+ // if (is cell) {
+ // if (is object) {
+ // if (is function) {
+ // return function;
+ // } else if (doesn't have call trap and doesn't masquerade as undefined) {
+ // return object
+ // } else {
+ // return slowPath();
+ // }
+ // } else if (is string) {
+ // return string
+ // } else {
+ // return symbol
+ // }
+ // } else if (is number) {
+ // return number
+ // } else if (is null) {
+ // return object
+ // } else if (is boolean) {
+ // return boolean
+ // } else {
+ // return undefined
+ // }
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock objectCase = m_out.newBlock();
+ LBasicBlock functionCase = m_out.newBlock();
+ LBasicBlock notFunctionCase = m_out.newBlock();
+ LBasicBlock reallyObjectCase = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock unreachable = m_out.newBlock();
+ LBasicBlock notObjectCase = m_out.newBlock();
+ LBasicBlock stringCase = m_out.newBlock();
+ LBasicBlock symbolCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock numberCase = m_out.newBlock();
+ LBasicBlock notNumberCase = m_out.newBlock();
+ LBasicBlock notNullCase = m_out.newBlock();
+ LBasicBlock booleanCase = m_out.newBlock();
+ LBasicBlock undefinedCase = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
+ m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
+
+ m_out.appendTo(objectCase, functionCase);
+ m_out.branch(
+ isFunction(value, provenType(child) & SpecObject),
+ unsure(functionCase), unsure(notFunctionCase));
+
+ m_out.appendTo(functionCase, notFunctionCase);
+ functor(TypeofType::Function);
+
+ m_out.appendTo(notFunctionCase, reallyObjectCase);
+ m_out.branch(
+ isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
+ rarely(slowPath), usually(reallyObjectCase));
+
+ m_out.appendTo(reallyObjectCase, slowPath);
+ functor(TypeofType::Object);
+
+ m_out.appendTo(slowPath, unreachable);
+ LValue result = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
+ CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
+ }, value);
+ Vector<SwitchCase, 3> cases;
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
+ cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
+ m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
+
+ m_out.appendTo(unreachable, notObjectCase);
+ m_out.unreachable();
+
+ m_out.appendTo(notObjectCase, stringCase);
+ m_out.branch(
+ isString(value, provenType(child) & (SpecCell - SpecObject)),
+ unsure(stringCase), unsure(symbolCase));
+
+ m_out.appendTo(stringCase, symbolCase);
+ functor(TypeofType::String);
+
+ m_out.appendTo(symbolCase, notCellCase);
+ functor(TypeofType::Symbol);
+
+ m_out.appendTo(notCellCase, numberCase);
+ m_out.branch(
+ isNumber(value, provenType(child) & ~SpecCell),
+ unsure(numberCase), unsure(notNumberCase));
+
+ m_out.appendTo(numberCase, notNumberCase);
+ functor(TypeofType::Number);
+
+ m_out.appendTo(notNumberCase, notNullCase);
+ LValue isNull;
+ if (provenType(child) & SpecOther)
+ isNull = m_out.equal(value, m_out.constInt64(ValueNull));
+ else
+ isNull = m_out.booleanFalse;
+ m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
+
+ m_out.appendTo(notNullCase, booleanCase);
+ m_out.branch(
+ isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
+ unsure(booleanCase), unsure(undefinedCase));
+
+ m_out.appendTo(booleanCase, undefinedCase);
+ functor(TypeofType::Boolean);
+
+ m_out.appendTo(undefinedCase, lastNext);
+ functor(TypeofType::Undefined);
+ }
+
+ LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
+ {
+ LBasicBlock greatEnough = m_out.newBlock();
+ LBasicBlock withinRange = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 2> results;
+
+ m_out.branch(
+ m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
+ unsure(greatEnough), unsure(slowPath));
+
+ LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
+ m_out.branch(
+ m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
+ unsure(withinRange), unsure(slowPath));
+
+ m_out.appendTo(withinRange, slowPath);
+ LValue fastResult;
+ if (isSigned)
+ fastResult = m_out.doubleToInt(doubleValue);
+ else
+ fastResult = m_out.doubleToUInt(doubleValue);
+ results.append(m_out.anchor(fastResult));
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+ results.append(m_out.anchor(m_out.call(Int32, m_out.operation(operationToInt32), doubleValue)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, results);
+ }
+
+ LValue doubleToInt32(LValue doubleValue)
+ {
+ if (hasSensibleDoubleToInt())
+ return sensibleDoubleToInt32(doubleValue);
+
+ double limit = pow(2, 31) - 1;
+ return doubleToInt32(doubleValue, -limit, limit);
+ }
+
+ LValue sensibleDoubleToInt32(LValue doubleValue)
+ {
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue fastResultValue = m_out.doubleToInt(doubleValue);
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+ m_out.branch(
+ m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
+ rarely(slowPath), usually(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
+ ValueFromBlock slowResult = m_out.anchor(
+ m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int32, fastResult, slowResult);
+ }
+
+ // This is a mechanism for creating a code generator that fills in a gap in the code using our
+ // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
+ // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
+ // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
+ // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
+ // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
+ // to spill to top of stack if there aren't enough registers available.
+ //
+ // Lazy slow paths involve three different stages of execution. Each stage has unique
+ // capabilities and knowledge. The stages are:
+ //
+ // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
+ // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
+ // path, then you must pass it as an argument here (as one of the varargs arguments after the
+ // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
+ // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
+ //
+ // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
+ // the mapping from arguments passed to this method in (1) and the registers that B3
+ // selected for those arguments. You don't actually want to generate any code here, since then
+ // the slow path wouldn't actually be lazily generated. Instead, you want to save the
+ // registers being used for the arguments and defer code generation to stage (3) by creating
+ // and returning a LazySlowPath::Generator:
+ //
+ // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
+ // executing for the first time. It will call the generator you created in stage (2).
+ //
+ // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
+ // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
+ // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
+ // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
+ // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
+ // whenever you use the lazySlowPath() mechanism.
+ //
+ // A typical use of lazySlowPath() will look like the example below, which just creates a slow
+ // path that adds some value to the input and returns it.
+ //
+ // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
+ // // Notice how we pass "input" as an argument to lazySlowPath().
+ // LValue input = ...;
+ // int addend = ...;
+ // LValue output = lazySlowPath(
+ // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ // // Stage (2) is here. This is your last chance to figure out which registers are used
+ // // for which values. Location zero is always the return value. You can ignore it if
+ // // you don't want to return anything. Location 1 is the register for the first
+ // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
+ // // also hold an FPR, if you are passing a double.
+ // GPRReg outputGPR = locations[0].directGPR();
+ // GPRReg inputGPR = locations[1].directGPR();
+ // return LazySlowPath::createGenerator(
+ // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
+ // // Stage (3) is here. This is when you generate code. You have access to the
+ // // registers you collected in stage (2) because this lambda closes over those
+ // // variables (outputGPR and inputGPR). You also have access to whatever extra
+ // // data you collected in stage (1), such as the addend in this case.
+ // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
+ // // You have to end by jumping to done. There is nothing to fall through to.
+ // // You can also jump to the exception handler (see LazySlowPath.h for more
+ // // info). Note that currently you cannot OSR exit.
+ // params.doneJumps.append(jit.jump());
+ // });
+ // },
+ // input);
+ //
+ // You can basically pass as many inputs as you like, either using this varargs form, or by
+ // passing a Vector of LValues.
+ //
+ // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
+ // helper. For example:
+ //
+ // LValue input = ...;
+ // LValue output = lazySlowPath(
+ // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ // return createLazyCallGenerator(
+ // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
+ // }, input);
+ //
+ // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
+ // run after the function that created them returns. Hence, you should not use by-reference
+ // capture (i.e. [&]) in any of these lambdas.
+ template<typename Functor, typename... ArgumentTypes>
+ PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
+ {
+ return lazySlowPath(functor, Vector<LValue>{ arguments... });
+ }
+
+ template<typename Functor>
+ PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
+ {
+ CodeOrigin origin = m_node->origin.semantic;
+
+ PatchpointValue* result = m_out.patchpoint(B3::Int64);
+ for (LValue arg : userArguments)
+ result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(result);
+
+ result->clobber(RegisterSet::macroScratchRegisters());
+ State* state = &m_ftlState;
+
+ result->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ Vector<Location> locations;
+ for (const B3::ValueRep& rep : params)
+ locations.append(Location::forValueRep(rep));
+
+ RefPtr<LazySlowPath::Generator> generator = functor(locations);
+
+ CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
+ CCallHelpers::Label done = jit.label();
+
+ RegisterSet usedRegisters = params.unavailableRegisters();
+
+ RefPtr<ExceptionTarget> exceptionTarget =
+ exceptionHandle->scheduleExitCreation(params);
+
+ // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
+ // Doing so should automagically register late paths that emit exit thunks.
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ patchableJump.m_jump.link(&jit);
+ unsigned index = state->jitCode->lazySlowPaths.size();
+ state->jitCode->lazySlowPaths.append(nullptr);
+ jit.pushToSaveImmediateWithoutTouchingRegisters(
+ CCallHelpers::TrustedImm32(index));
+ CCallHelpers::Jump generatorJump = jit.jump();
+
+ // Note that so long as we're here, we don't really know if our late path
+ // runs before or after any other late paths that we might depend on, like
+ // the exception thunk.
+
+ RefPtr<JITCode> jitCode = state->jitCode;
+ VM* vm = &state->graph.m_vm;
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(
+ generatorJump, CodeLocationLabel(
+ vm->getCTIStub(
+ lazySlowPathGenerationThunkGenerator).code()));
+
+ CodeLocationJump linkedPatchableJump = CodeLocationJump(
+ linkBuffer.locationOf(patchableJump));
+ CodeLocationLabel linkedDone = linkBuffer.locationOf(done);
+
+ CallSiteIndex callSiteIndex =
+ jitCode->common.addUniqueCallSiteIndex(origin);
+
+ std::unique_ptr<LazySlowPath> lazySlowPath =
+ std::make_unique<LazySlowPath>(
+ linkedPatchableJump, linkedDone,
+ exceptionTarget->label(linkBuffer), usedRegisters,
+ callSiteIndex, generator);
+
+ jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
+ });
+ });
+ });
+ return result;
+ }
+
+ void speculate(
+ ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
+ {
+ appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
+ }
+
+ void terminate(ExitKind kind)
+ {
+ speculate(kind, noValue(), nullptr, m_out.booleanTrue);
+ didAlreadyTerminate();
+ }
+
+ void didAlreadyTerminate()
+ {
+ m_state.setIsValid(false);
+ }
+
+ void typeCheck(
+ FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
+ LValue failCondition, ExitKind exitKind = BadType)
+ {
+ appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
+ }
+
+ void appendTypeCheck(
+ FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
+ LValue failCondition, ExitKind exitKind)
+ {
+ if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
+ return;
+ ASSERT(mayHaveTypeCheck(highValue.useKind()));
+ appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
+ m_interpreter.filter(highValue, typesPassedThrough);
+ }
+
+ LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
+
+ if (edge->hasConstant()) {
+ JSValue value = edge->asJSValue();
+ if (!value.isInt32()) {
+ terminate(Uncountable);
+ return m_out.int32Zero;
+ }
+ return m_out.constInt32(value.asInt32());
+ }
+
+ LoweredNodeValue value = m_int32Values.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ value = m_strictInt52Values.get(edge.node());
+ if (isValid(value))
+ return strictInt52ToInt32(edge, value.value());
+
+ value = m_int52Values.get(edge.node());
+ if (isValid(value))
+ return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
+
+ value = m_jsValueValues.get(edge.node());
+ if (isValid(value)) {
+ LValue boxedResult = value.value();
+ FTL_TYPE_CHECK(
+ jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
+ LValue result = unboxInt32(boxedResult);
+ setInt32(edge.node(), result);
+ return result;
+ }
+
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32Only));
+ terminate(Uncountable);
+ return m_out.int32Zero;
+ }
+
+ enum Int52Kind { StrictInt52, Int52 };
+ LValue lowInt52(Edge edge, Int52Kind kind)
+ {
+ DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse);
+
+ LoweredNodeValue value;
+
+ switch (kind) {
+ case Int52:
+ value = m_int52Values.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ value = m_strictInt52Values.get(edge.node());
+ if (isValid(value))
+ return strictInt52ToInt52(value.value());
+ break;
+
+ case StrictInt52:
+ value = m_strictInt52Values.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ value = m_int52Values.get(edge.node());
+ if (isValid(value))
+ return int52ToStrictInt52(value.value());
+ break;
+ }
+
+ DFG_ASSERT(m_graph, m_node, !provenType(edge));
+ terminate(Uncountable);
+ return m_out.int64Zero;
+ }
+
+ LValue lowInt52(Edge edge)
+ {
+ return lowInt52(edge, Int52);
+ }
+
+ LValue lowStrictInt52(Edge edge)
+ {
+ return lowInt52(edge, StrictInt52);
+ }
+
+ bool betterUseStrictInt52(Node* node)
+ {
+ return !isValid(m_int52Values.get(node));
+ }
+ bool betterUseStrictInt52(Edge edge)
+ {
+ return betterUseStrictInt52(edge.node());
+ }
+ template<typename T>
+ Int52Kind bestInt52Kind(T node)
+ {
+ return betterUseStrictInt52(node) ? StrictInt52 : Int52;
+ }
+ Int52Kind opposite(Int52Kind kind)
+ {
+ switch (kind) {
+ case Int52:
+ return StrictInt52;
+ case StrictInt52:
+ return Int52;
+ }
+ DFG_CRASH(m_graph, m_node, "Bad use kind");
+ return Int52;
+ }
+
+ LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
+ {
+ kind = bestInt52Kind(edge);
+ return lowInt52(edge, kind);
+ }
+
+ LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()));
+
+ if (edge->op() == JSConstant) {
+ FrozenValue* value = edge->constant();
+ if (!value->value().isCell()) {
+ terminate(Uncountable);
+ return m_out.intPtrZero;
+ }
+ return frozenPointer(value);
+ }
+
+ LoweredNodeValue value = m_jsValueValues.get(edge.node());
+ if (isValid(value)) {
+ LValue uncheckedValue = value.value();
+ FTL_TYPE_CHECK(
+ jsValueValue(uncheckedValue), edge, SpecCell, isNotCell(uncheckedValue));
+ return uncheckedValue;
+ }
+
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCell));
+ terminate(Uncountable);
+ return m_out.intPtrZero;
+ }
+
+ LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
+
+ LValue result = lowCell(edge, mode);
+ speculateObject(edge, result);
+ return result;
+ }
+
+ LValue lowRegExpObject(Edge edge)
+ {
+ LValue result = lowCell(edge);
+ speculateRegExpObject(edge, result);
+ return result;
+ }
+
+ LValue lowMapObject(Edge edge)
+ {
+ LValue result = lowCell(edge);
+ speculateMapObject(edge, result);
+ return result;
+ }
+
+ LValue lowSetObject(Edge edge)
+ {
+ LValue result = lowCell(edge);
+ speculateSetObject(edge, result);
+ return result;
+ }
+
+ LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
+
+ LValue result = lowCell(edge, mode);
+ speculateString(edge, result);
+ return result;
+ }
+
+ LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
+
+ LValue string = lowString(edge, mode);
+ LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
+ speculateStringIdent(edge, string, stringImpl);
+ return stringImpl;
+ }
+
+ LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
+
+ LValue result = lowCell(edge, mode);
+ speculateSymbol(edge, result);
+ return result;
+ }
+
+ LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
+
+ LValue result = lowCell(edge, mode);
+ speculateNonNullObject(edge, result);
+ return result;
+ }
+
+ LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
+
+ if (edge->hasConstant()) {
+ JSValue value = edge->asJSValue();
+ if (!value.isBoolean()) {
+ terminate(Uncountable);
+ return m_out.booleanFalse;
+ }
+ return m_out.constBool(value.asBoolean());
+ }
+
+ LoweredNodeValue value = m_booleanValues.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ value = m_jsValueValues.get(edge.node());
+ if (isValid(value)) {
+ LValue unboxedResult = value.value();
+ FTL_TYPE_CHECK(
+ jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
+ LValue result = unboxBoolean(unboxedResult);
+ setBoolean(edge.node(), result);
+ return result;
+ }
+
+ DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean));
+ terminate(Uncountable);
+ return m_out.booleanFalse;
+ }
+
+ LValue lowDouble(Edge edge)
+ {
+ DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()));
+
+ LoweredNodeValue value = m_doubleValues.get(edge.node());
+ if (isValid(value))
+ return value.value();
+ DFG_ASSERT(m_graph, m_node, !provenType(edge));
+ terminate(Uncountable);
+ return m_out.doubleZero;
+ }
+
+ LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
+ {
+ DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
+ DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()));
+ DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse);
+
+ if (edge->hasConstant())
+ return m_out.constInt64(JSValue::encode(edge->asJSValue()));
+
+ LoweredNodeValue value = m_jsValueValues.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ value = m_int32Values.get(edge.node());
+ if (isValid(value)) {
+ LValue result = boxInt32(value.value());
+ setJSValue(edge.node(), result);
+ return result;
+ }
+
+ value = m_booleanValues.get(edge.node());
+ if (isValid(value)) {
+ LValue result = boxBoolean(value.value());
+ setJSValue(edge.node(), result);
+ return result;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Value not defined");
+ return 0;
+ }
+
+ LValue lowNotCell(Edge edge)
+ {
+ LValue result = lowJSValue(edge, ManualOperandSpeculation);
+ FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCell, isCell(result));
+ return result;
+ }
+
+ LValue lowStorage(Edge edge)
+ {
+ LoweredNodeValue value = m_storageValues.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ LValue result = lowCell(edge);
+ setStorage(edge.node(), result);
+ return result;
+ }
+
+ LValue lowMapBucket(Edge edge)
+ {
+ LoweredNodeValue value = m_mapBucketValues.get(edge.node());
+ if (isValid(value))
+ return value.value();
+
+ LValue result = lowCell(edge);
+ setStorage(edge.node(), result);
+ return result;
+ }
+
+ LValue strictInt52ToInt32(Edge edge, LValue value)
+ {
+ LValue result = m_out.castToInt32(value);
+ FTL_TYPE_CHECK(
+ noValue(), edge, SpecInt32Only,
+ m_out.notEqual(m_out.signExt32To64(result), value));
+ setInt32(edge.node(), result);
+ return result;
+ }
+
+ LValue strictInt52ToDouble(LValue value)
+ {
+ return m_out.intToDouble(value);
+ }
+
+ LValue strictInt52ToJSValue(LValue value)
+ {
+ LBasicBlock isInt32 = m_out.newBlock();
+ LBasicBlock isDouble = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ Vector<ValueFromBlock, 2> results;
+
+ LValue int32Value = m_out.castToInt32(value);
+ m_out.branch(
+ m_out.equal(m_out.signExt32To64(int32Value), value),
+ unsure(isInt32), unsure(isDouble));
+
+ LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
+
+ results.append(m_out.anchor(boxInt32(int32Value)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(isDouble, continuation);
+
+ results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(Int64, results);
+ }
+
+ LValue strictInt52ToInt52(LValue value)
+ {
+ return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
+ }
+
+ LValue int52ToStrictInt52(LValue value)
+ {
+ return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
+ }
+
+ LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecInt32Only))
+ return proven;
+ return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
+ }
+ LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecInt32Only))
+ return proven;
+ return m_out.below(jsValue, m_tagTypeNumber);
+ }
+ LValue unboxInt32(LValue jsValue)
+ {
+ return m_out.castToInt32(jsValue);
+ }
+ LValue boxInt32(LValue value)
+ {
+ return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
+ }
+
+ LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecCell | SpecMisc))
+ return proven;
+ return m_out.testIsZero64(jsValue, m_tagTypeNumber);
+ }
+ LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~(SpecCell | SpecMisc)))
+ return proven;
+ return m_out.testNonZero64(jsValue, m_tagTypeNumber);
+ }
+
+ LValue unboxDouble(LValue jsValue)
+ {
+ return m_out.bitCast(m_out.add(jsValue, m_tagTypeNumber), Double);
+ }
+ LValue boxDouble(LValue doubleValue)
+ {
+ return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
+ }
+
+ LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
+ {
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock doubleCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue isNotInt32;
+ if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
+ isNotInt32 = m_out.booleanFalse;
+ else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
+ isNotInt32 = m_out.booleanTrue;
+ else
+ isNotInt32 = this->isNotInt32(boxedValue);
+ m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
+
+ ValueFromBlock intToInt52 = m_out.anchor(
+ m_out.signExt32To64(unboxInt32(boxedValue)));
+ m_out.jump(continuation);
+
+ m_out.appendTo(doubleCase, continuation);
+
+ LValue possibleResult = m_out.call(
+ Int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue);
+ FTL_TYPE_CHECK(
+ jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
+ m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
+
+ ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.phi(Int64, intToInt52, doubleToInt52);
+ }
+
+ LValue doubleToStrictInt52(Edge edge, LValue value)
+ {
+ LValue possibleResult = m_out.call(
+ Int64, m_out.operation(operationConvertDoubleToInt52), value);
+ FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
+ doubleValue(value), edge, SpecAnyIntAsDouble,
+ m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
+
+ return possibleResult;
+ }
+
+ LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
+ {
+ LValue integerValue = m_out.doubleToInt(value);
+ LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
+ LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
+ speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
+
+ if (shouldCheckNegativeZero) {
+ LBasicBlock valueIsZero = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+ m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
+
+ LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
+ LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
+
+ speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+ return integerValue;
+ }
+
+ LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecFullNumber))
+ return proven;
+ return isNotCellOrMisc(jsValue);
+ }
+ LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecFullNumber))
+ return proven;
+ return isCellOrMisc(jsValue);
+ }
+
+ LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecCell))
+ return proven;
+ return m_out.testNonZero64(jsValue, m_tagMask);
+ }
+
+ LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecCell))
+ return proven;
+ return m_out.testIsZero64(jsValue, m_tagMask);
+ }
+
+ LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecMisc))
+ return proven;
+ return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
+ }
+
+ LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecMisc))
+ return proven;
+ return m_out.logicalNot(isNotMisc(value));
+ }
+
+ LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecBoolean))
+ return proven;
+ return m_out.testNonZero64(
+ m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
+ m_out.constInt64(~1));
+ }
+ LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecBoolean))
+ return proven;
+ return m_out.logicalNot(isNotBoolean(jsValue));
+ }
+ LValue unboxBoolean(LValue jsValue)
+ {
+ // We want to use a cast that guarantees that B3 knows that even the integer
+ // value is just 0 or 1. But for now we do it the dumb way.
+ return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
+ }
+ LValue boxBoolean(LValue value)
+ {
+ return m_out.select(
+ value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
+ }
+
+ LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, ~SpecOther))
+ return proven;
+ return m_out.notEqual(
+ m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
+ m_out.constInt64(ValueNull));
+ }
+ LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type, SpecOther))
+ return proven;
+ return m_out.equal(
+ m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
+ m_out.constInt64(ValueNull));
+ }
+
+ LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
+ {
+ if (!(provenType & ~wantedType))
+ return m_out.booleanTrue;
+ if (!(provenType & wantedType))
+ return m_out.booleanFalse;
+ return nullptr;
+ }
+
+ void speculate(Edge edge)
+ {
+ switch (edge.useKind()) {
+ case UntypedUse:
+ break;
+ case KnownInt32Use:
+ case KnownStringUse:
+ case KnownPrimitiveUse:
+ case DoubleRepUse:
+ case Int52RepUse:
+ ASSERT(!m_interpreter.needsTypeCheck(edge));
+ break;
+ case Int32Use:
+ speculateInt32(edge);
+ break;
+ case CellUse:
+ speculateCell(edge);
+ break;
+ case CellOrOtherUse:
+ speculateCellOrOther(edge);
+ break;
+ case KnownCellUse:
+ ASSERT(!m_interpreter.needsTypeCheck(edge));
+ break;
+ case AnyIntUse:
+ speculateAnyInt(edge);
+ break;
+ case ObjectUse:
+ speculateObject(edge);
+ break;
+ case ArrayUse:
+ speculateArray(edge);
+ break;
+ case FunctionUse:
+ speculateFunction(edge);
+ break;
+ case ObjectOrOtherUse:
+ speculateObjectOrOther(edge);
+ break;
+ case FinalObjectUse:
+ speculateFinalObject(edge);
+ break;
+ case RegExpObjectUse:
+ speculateRegExpObject(edge);
+ break;
+ case ProxyObjectUse:
+ speculateProxyObject(edge);
+ break;
+ case DerivedArrayUse:
+ speculateDerivedArray(edge);
+ break;
+ case MapObjectUse:
+ speculateMapObject(edge);
+ break;
+ case SetObjectUse:
+ speculateSetObject(edge);
+ break;
+ case StringUse:
+ speculateString(edge);
+ break;
+ case StringOrOtherUse:
+ speculateStringOrOther(edge);
+ break;
+ case StringIdentUse:
+ speculateStringIdent(edge);
+ break;
+ case SymbolUse:
+ speculateSymbol(edge);
+ break;
+ case StringObjectUse:
+ speculateStringObject(edge);
+ break;
+ case StringOrStringObjectUse:
+ speculateStringOrStringObject(edge);
+ break;
+ case NumberUse:
+ speculateNumber(edge);
+ break;
+ case RealNumberUse:
+ speculateRealNumber(edge);
+ break;
+ case DoubleRepRealUse:
+ speculateDoubleRepReal(edge);
+ break;
+ case DoubleRepAnyIntUse:
+ speculateDoubleRepAnyInt(edge);
+ break;
+ case BooleanUse:
+ speculateBoolean(edge);
+ break;
+ case NotStringVarUse:
+ speculateNotStringVar(edge);
+ break;
+ case NotCellUse:
+ speculateNotCell(edge);
+ break;
+ case OtherUse:
+ speculateOther(edge);
+ break;
+ case MiscUse:
+ speculateMisc(edge);
+ break;
+ default:
+ DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
+ }
+ }
+
+ void speculate(Node*, Edge edge)
+ {
+ speculate(edge);
+ }
+
+ void speculateInt32(Edge edge)
+ {
+ lowInt32(edge);
+ }
+
+ void speculateCell(Edge edge)
+ {
+ lowCell(edge);
+ }
+
+ void speculateNotCell(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+ lowNotCell(edge);
+ }
+
+ void speculateCellOrOther(Edge edge)
+ {
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+
+ LBasicBlock isNotCell = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
+
+ LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
+ FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateAnyInt(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
+ }
+
+ LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
+ return proven;
+ return m_out.equal(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(queriedType));
+ }
+
+ LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecTypedArrayView))
+ return proven;
+ LValue jsType = m_out.sub(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(Int8ArrayType));
+ return m_out.belowOrEqual(
+ jsType,
+ m_out.constInt32(Float64ArrayType - Int8ArrayType));
+ }
+
+ LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
+ return proven;
+ return m_out.aboveOrEqual(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(ObjectType));
+ }
+
+ LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
+ return proven;
+ return m_out.below(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(ObjectType));
+ }
+
+ LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
+ return proven;
+ return m_out.notEqual(
+ m_out.load32(cell, m_heaps.JSCell_structureID),
+ m_out.constInt32(vm().stringStructure->id()));
+ }
+
+ LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecString))
+ return proven;
+ return m_out.equal(
+ m_out.load32(cell, m_heaps.JSCell_structureID),
+ m_out.constInt32(vm().stringStructure->id()));
+ }
+
+ LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
+ return proven;
+ return m_out.notEqual(
+ m_out.load32(cell, m_heaps.JSCell_structureID),
+ m_out.constInt32(vm().symbolStructure->id()));
+ }
+
+ LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
+ return proven;
+ return m_out.equal(
+ m_out.load32(cell, m_heaps.JSCell_structureID),
+ m_out.constInt32(vm().symbolStructure->id()));
+ }
+
+ LValue isArrayType(LValue cell, ArrayMode arrayMode)
+ {
+ switch (arrayMode.type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous: {
+ LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
+
+ switch (arrayMode.arrayClass()) {
+ case Array::OriginalArray:
+ DFG_CRASH(m_graph, m_node, "Unexpected original array");
+ return 0;
+
+ case Array::Array:
+ return m_out.equal(
+ m_out.bitAnd(indexingType, m_out.constInt32(IsArray | IndexingShapeMask)),
+ m_out.constInt32(IsArray | arrayMode.shapeMask()));
+
+ case Array::NonArray:
+ case Array::OriginalNonArray:
+ return m_out.equal(
+ m_out.bitAnd(indexingType, m_out.constInt32(IsArray | IndexingShapeMask)),
+ m_out.constInt32(arrayMode.shapeMask()));
+
+ case Array::PossiblyArray:
+ return m_out.equal(
+ m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
+ m_out.constInt32(arrayMode.shapeMask()));
+ }
+
+ DFG_CRASH(m_graph, m_node, "Corrupt array class");
+ }
+
+ case Array::DirectArguments:
+ return m_out.equal(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(DirectArgumentsType));
+
+ case Array::ScopedArguments:
+ return m_out.equal(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(ScopedArgumentsType));
+
+ default:
+ return m_out.equal(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
+ }
+ }
+
+ LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
+ return proven;
+ return isType(cell, JSFunctionType);
+ }
+ LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
+ return proven;
+ return isNotType(cell, JSFunctionType);
+ }
+
+ LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
+ {
+ if (!(type & SpecObjectOther))
+ return m_out.booleanFalse;
+ return m_out.testNonZero32(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
+ }
+
+ LValue isType(LValue cell, JSType type)
+ {
+ return m_out.equal(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
+ m_out.constInt32(type));
+ }
+
+ LValue isNotType(LValue cell, JSType type)
+ {
+ return m_out.logicalNot(isType(cell, type));
+ }
+
+ void speculateObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
+ }
+
+ void speculateObject(Edge edge)
+ {
+ speculateObject(edge, lowCell(edge));
+ }
+
+ void speculateArray(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
+ }
+
+ void speculateArray(Edge edge)
+ {
+ speculateArray(edge, lowCell(edge));
+ }
+
+ void speculateFunction(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
+ }
+
+ void speculateFunction(Edge edge)
+ {
+ speculateFunction(edge, lowCell(edge));
+ }
+
+ void speculateObjectOrOther(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock primitiveCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
+
+ FTL_TYPE_CHECK(
+ jsValueValue(value), edge, (~SpecCell) | SpecObject, isNotObject(value));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(primitiveCase, continuation);
+
+ FTL_TYPE_CHECK(
+ jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value));
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateFinalObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
+ }
+
+ void speculateFinalObject(Edge edge)
+ {
+ speculateFinalObject(edge, lowCell(edge));
+ }
+
+ void speculateRegExpObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
+ }
+
+ void speculateRegExpObject(Edge edge)
+ {
+ speculateRegExpObject(edge, lowCell(edge));
+ }
+
+ void speculateProxyObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
+ }
+
+ void speculateProxyObject(Edge edge)
+ {
+ speculateProxyObject(edge, lowCell(edge));
+ }
+
+ void speculateDerivedArray(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
+ }
+
+ void speculateDerivedArray(Edge edge)
+ {
+ speculateDerivedArray(edge, lowCell(edge));
+ }
+
+ void speculateMapObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
+ }
+
+ void speculateMapObject(Edge edge)
+ {
+ speculateMapObject(edge, lowCell(edge));
+ }
+
+ void speculateSetObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(
+ jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
+ }
+
+ void speculateSetObject(Edge edge)
+ {
+ speculateSetObject(edge, lowCell(edge));
+ }
+
+ void speculateString(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString | ~SpecCell, isNotString(cell));
+ }
+
+ void speculateString(Edge edge)
+ {
+ speculateString(edge, lowCell(edge));
+ }
+
+ void speculateStringOrOther(Edge edge, LValue value)
+ {
+ LBasicBlock cellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
+
+ FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCell) | SpecString, isNotString(value));
+
+ m_out.jump(continuation);
+ m_out.appendTo(notCellCase, continuation);
+
+ FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value));
+
+ m_out.jump(continuation);
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateStringOrOther(Edge edge)
+ {
+ speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
+ }
+
+ void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
+ {
+ if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
+ return;
+
+ speculate(BadType, jsValueValue(string), edge.node(), m_out.isNull(stringImpl));
+ speculate(
+ BadType, jsValueValue(string), edge.node(),
+ m_out.testIsZero32(
+ m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
+ m_out.constInt32(StringImpl::flagIsAtomic())));
+ m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
+ }
+
+ void speculateStringIdent(Edge edge)
+ {
+ lowStringIdent(edge);
+ }
+
+ void speculateStringObject(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
+ return;
+
+ speculateStringObjectForCell(edge, lowCell(edge));
+ m_interpreter.filter(edge, SpecStringObject);
+ }
+
+ void speculateStringOrStringObject(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
+ return;
+
+ LBasicBlock notString = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue structureID = m_out.load32(lowCell(edge), m_heaps.JSCell_structureID);
+ m_out.branch(
+ m_out.equal(structureID, m_out.constInt32(vm().stringStructure->id())),
+ unsure(continuation), unsure(notString));
+
+ LBasicBlock lastNext = m_out.appendTo(notString, continuation);
+ speculateStringObjectForStructureID(edge, structureID);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ m_interpreter.filter(edge, SpecString | SpecStringObject);
+ }
+
+ void speculateStringObjectForCell(Edge edge, LValue cell)
+ {
+ speculateStringObjectForStructureID(edge, m_out.load32(cell, m_heaps.JSCell_structureID));
+ }
+
+ void speculateStringObjectForStructureID(Edge edge, LValue structureID)
+ {
+ RegisteredStructure stringObjectStructure =
+ m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->stringObjectStructure());
+
+ if (abstractStructure(edge).isSubsetOf(RegisteredStructureSet(stringObjectStructure)))
+ return;
+
+ speculate(
+ NotStringObject, noValue(), 0,
+ m_out.notEqual(structureID, weakStructureID(stringObjectStructure)));
+ }
+
+ void speculateSymbol(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol | ~SpecCell, isNotSymbol(cell));
+ }
+
+ void speculateSymbol(Edge edge)
+ {
+ speculateSymbol(edge, lowCell(edge));
+ }
+
+ void speculateNonNullObject(Edge edge, LValue cell)
+ {
+ FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
+ if (masqueradesAsUndefinedWatchpointIsStillValid())
+ return;
+
+ speculate(
+ BadType, jsValueValue(cell), edge.node(),
+ m_out.testNonZero32(
+ m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
+ m_out.constInt32(MasqueradesAsUndefined)));
+ }
+
+ void speculateNumber(Edge edge)
+ {
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+ FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
+ }
+
+ void speculateRealNumber(Edge edge)
+ {
+ // Do an early return here because lowDouble() can create a lot of control flow.
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+ LValue doubleValue = unboxDouble(value);
+
+ LBasicBlock intCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ m_out.doubleEqual(doubleValue, doubleValue),
+ usually(continuation), rarely(intCase));
+
+ LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
+
+ typeCheck(
+ jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
+ isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateDoubleRepReal(Edge edge)
+ {
+ // Do an early return here because lowDouble() can create a lot of control flow.
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowDouble(edge);
+ FTL_TYPE_CHECK(
+ doubleValue(value), edge, SpecDoubleReal,
+ m_out.doubleNotEqualOrUnordered(value, value));
+ }
+
+ void speculateDoubleRepAnyInt(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ doubleToStrictInt52(edge, lowDouble(edge));
+ }
+
+ void speculateBoolean(Edge edge)
+ {
+ lowBoolean(edge);
+ }
+
+ void speculateNotStringVar(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+
+ LBasicBlock isCellCase = m_out.newBlock();
+ LBasicBlock isStringCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
+ m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
+
+ m_out.appendTo(isStringCase, continuation);
+ speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void speculateOther(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+ typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
+ }
+
+ void speculateMisc(Edge edge)
+ {
+ if (!m_interpreter.needsTypeCheck(edge))
+ return;
+
+ LValue value = lowJSValue(edge, ManualOperandSpeculation);
+ typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
+ }
+
+ void speculateTypedArrayIsNotNeutered(LValue base)
+ {
+ LBasicBlock isWasteful = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
+ m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
+ unsure(isWasteful), unsure(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
+ LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
+ speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ bool masqueradesAsUndefinedWatchpointIsStillValid()
+ {
+ return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic);
+ }
+
+ LValue loadCellState(LValue base)
+ {
+ return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
+ }
+
+ void emitStoreBarrier(LValue base, bool isFenced)
+ {
+ LBasicBlock recheckPath = nullptr;
+ if (isFenced)
+ recheckPath = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
+
+ LValue threshold;
+ if (isFenced)
+ threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
+ else
+ threshold = m_out.constInt32(blackThreshold);
+
+ m_out.branch(
+ m_out.above(loadCellState(base), threshold),
+ usually(continuation), rarely(isFenced ? recheckPath : slowPath));
+
+ if (isFenced) {
+ m_out.appendTo(recheckPath, slowPath);
+
+ m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
+
+ m_out.branch(
+ m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
+ usually(continuation), rarely(slowPath));
+ }
+
+ m_out.appendTo(slowPath, continuation);
+
+ LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
+ m_heaps.decorateCCallRead(&m_heaps.root, call);
+ m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
+
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void mutatorFence()
+ {
+ if (isX86()) {
+ m_out.fence(&m_heaps.root, nullptr);
+ return;
+ }
+
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
+
+ m_out.branch(
+ m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
+ rarely(slowPath), usually(continuation));
+
+ m_out.appendTo(slowPath, continuation);
+
+ m_out.fence(&m_heaps.root, nullptr);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
+ {
+ if (isX86()) {
+ m_out.store32(
+ m_out.bitOr(
+ m_out.load32(object, m_heaps.JSCell_structureID),
+ m_out.constInt32(nukedStructureIDBit())),
+ object, m_heaps.JSCell_structureID);
+ m_out.fence(&m_heaps.root, nullptr);
+ m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
+ m_out.fence(&m_heaps.root, nullptr);
+ return;
+ }
+
+ LBasicBlock fastPath = m_out.newBlock();
+ LBasicBlock slowPath = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
+
+ m_out.branch(
+ m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
+ rarely(slowPath), usually(fastPath));
+
+ m_out.appendTo(fastPath, slowPath);
+
+ m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
+ m_out.jump(continuation);
+
+ m_out.appendTo(slowPath, continuation);
+
+ m_out.store32(
+ m_out.bitOr(
+ m_out.load32(object, m_heaps.JSCell_structureID),
+ m_out.constInt32(nukedStructureIDBit())),
+ object, m_heaps.JSCell_structureID);
+ m_out.fence(&m_heaps.root, nullptr);
+ m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
+ m_out.fence(&m_heaps.root, nullptr);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ }
+
+ template<typename... Args>
+ LValue vmCall(LType type, LValue function, Args... args)
+ {
+ callPreflight();
+ LValue result = m_out.call(type, function, args...);
+ callCheck();
+ return result;
+ }
+
+ void callPreflight(CodeOrigin codeOrigin)
+ {
+ CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin);
+ m_out.store32(
+ m_out.constInt32(callSiteIndex.bits()),
+ tagFor(CallFrameSlot::argumentCount));
+ }
+
+ void callPreflight()
+ {
+ callPreflight(codeOriginDescriptionOfCallSite());
+ }
+
+ CodeOrigin codeOriginDescriptionOfCallSite() const
+ {
+ CodeOrigin codeOrigin = m_node->origin.semantic;
+ if (m_node->op() == TailCallInlinedCaller
+ || m_node->op() == TailCallVarargsInlinedCaller
+ || m_node->op() == TailCallForwardVarargsInlinedCaller
+ || m_node->op() == DirectTailCallInlinedCaller) {
+ // This case arises when you have a situation like this:
+ // foo makes a call to bar, bar is inlined in foo. bar makes a call
+ // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
+ // and jaz is inlined in baz. We want the callframe for jaz to appear to
+ // have caller be bar.
+ codeOrigin = *codeOrigin.inlineCallFrame->getCallerSkippingTailCalls();
+ }
+
+ return codeOrigin;
+ }
+
+ void callCheck()
+ {
+ if (Options::useExceptionFuzz())
+ m_out.call(Void, m_out.operation(operationExceptionFuzz), m_callFrame);
+
+ LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
+ LValue hadException = m_out.notZero64(exception);
+
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
+ bool exitOK = true;
+ bool isExceptionHandler = true;
+ appendOSRExit(
+ ExceptionCheck, noValue(), nullptr, hadException,
+ m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
+ return;
+ }
+
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ hadException, rarely(m_handleExceptions), usually(continuation));
+
+ m_out.appendTo(continuation);
+ }
+
+ RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
+ {
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
+ if (!willCatchException)
+ return PatchpointExceptionHandle::defaultHandle(m_ftlState);
+
+ if (verboseCompilationEnabled()) {
+ dataLog(" Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n");
+ if (!m_availableRecoveries.isEmpty())
+ dataLog(" Available recoveries: ", listDump(m_availableRecoveries), "\n");
+ }
+
+ bool exitOK = true;
+ NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
+
+ OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
+
+ // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
+ // we are about to append. We need to account for both the children we've already added, and
+ // for the possibility of a result value if the patchpoint is not void.
+ unsigned offset = value->numChildren();
+ if (value->type() != Void)
+ offset++;
+
+ // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
+ // result and with any late-clobbered registers.
+ value->appendVectorWithRep(
+ buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
+ ValueRep::LateColdAny);
+
+ return PatchpointExceptionHandle::create(
+ m_ftlState, exitDescriptor, origin, offset, *exceptionHandler);
+ }
+
+ LBasicBlock lowBlock(DFG::BasicBlock* block)
+ {
+ return m_blocks.get(block);
+ }
+
+ OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
+ {
+ return &m_ftlState.jitCode->osrExitDescriptors.alloc(
+ lowValue.format(), m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
+ availabilityMap().m_locals.numberOfArguments(),
+ availabilityMap().m_locals.numberOfLocals());
+ }
+
+ void appendOSRExit(
+ ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
+ NodeOrigin origin, bool isExceptionHandler = false)
+ {
+ if (verboseCompilationEnabled()) {
+ dataLog(" OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n");
+ if (!m_availableRecoveries.isEmpty())
+ dataLog(" Available recoveries: ", listDump(m_availableRecoveries), "\n");
+ }
+
+ DFG_ASSERT(m_graph, m_node, origin.exitOK);
+
+ if (!isExceptionHandler
+ && Options::useOSRExitFuzz()
+ && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_node->origin.semantic))
+ && doOSRExitFuzzing()) {
+ LValue numberOfFuzzChecks = m_out.add(
+ m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
+ m_out.int32One);
+
+ m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
+
+ if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
+ failCondition = m_out.bitOr(
+ failCondition,
+ m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
+ }
+ if (unsigned at = Options::fireOSRExitFuzzAt()) {
+ failCondition = m_out.bitOr(
+ failCondition,
+ m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
+ }
+ }
+
+ if (failCondition == m_out.booleanFalse)
+ return;
+
+ blessSpeculation(
+ m_out.speculate(failCondition), kind, lowValue, highValue, origin);
+ }
+
+ void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
+ {
+ OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, highValue);
+
+ value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
+
+ State* state = &m_ftlState;
+ value->setGenerator(
+ [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
+ exitDescriptor->emitOSRExit(
+ *state, kind, origin, jit, params, 0);
+ });
+ }
+
+ StackmapArgumentList buildExitArguments(
+ OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
+ unsigned offsetOfExitArgumentsInStackmapLocations = 0)
+ {
+ StackmapArgumentList result;
+ buildExitArguments(
+ exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
+ return result;
+ }
+
+ void buildExitArguments(
+ OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
+ unsigned offsetOfExitArgumentsInStackmapLocations = 0)
+ {
+ if (!!lowValue)
+ arguments.append(lowValue.value());
+
+ AvailabilityMap availabilityMap = this->availabilityMap();
+ availabilityMap.pruneByLiveness(m_graph, exitOrigin);
+
+ HashMap<Node*, ExitTimeObjectMaterialization*> map;
+ availabilityMap.forEachAvailability(
+ [&] (Availability availability) {
+ if (!availability.shouldUseNode())
+ return;
+
+ Node* node = availability.node();
+ if (!node->isPhantomAllocation())
+ return;
+
+ auto result = map.add(node, nullptr);
+ if (result.isNewEntry) {
+ result.iterator->value =
+ exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
+ }
+ });
+
+ for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
+ int operand = exitDescriptor->m_values.operandForIndex(i);
+
+ Availability availability = availabilityMap.m_locals[i];
+
+ if (Options::validateFTLOSRExitLiveness()
+ && m_graph.m_plan.mode != FTLForOSREntryMode) {
+
+ if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
+ DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
+ }
+ ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
+ if (exitValue.hasIndexInStackmapLocations())
+ exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
+ exitDescriptor->m_values[i] = exitValue;
+ }
+
+ for (auto heapPair : availabilityMap.m_heap) {
+ Node* node = heapPair.key.base();
+ ExitTimeObjectMaterialization* materialization = map.get(node);
+ ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
+ if (exitValue.hasIndexInStackmapLocations())
+ exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
+ materialization->add(
+ heapPair.key.descriptor(),
+ exitValue);
+ }
+
+ if (verboseCompilationEnabled()) {
+ dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
+ if (!exitDescriptor->m_materializations.isEmpty()) {
+ dataLog(" Materializations: \n");
+ for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
+ dataLog(" ", pointerDump(materialization), "\n");
+ }
+ }
+ }
+
+ ExitValue exitValueForAvailability(
+ StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
+ Availability availability)
+ {
+ FlushedAt flush = availability.flushedAt();
+ switch (flush.format()) {
+ case DeadFlush:
+ case ConflictingFlush:
+ if (availability.hasNode())
+ return exitValueForNode(arguments, map, availability.node());
+
+ // This means that the value is dead. It could be dead in bytecode or it could have
+ // been killed by our DCE, which can sometimes kill things even if they were live in
+ // bytecode.
+ return ExitValue::dead();
+
+ case FlushedJSValue:
+ case FlushedCell:
+ case FlushedBoolean:
+ return ExitValue::inJSStack(flush.virtualRegister());
+
+ case FlushedInt32:
+ return ExitValue::inJSStackAsInt32(flush.virtualRegister());
+
+ case FlushedInt52:
+ return ExitValue::inJSStackAsInt52(flush.virtualRegister());
+
+ case FlushedDouble:
+ return ExitValue::inJSStackAsDouble(flush.virtualRegister());
+ }
+
+ DFG_CRASH(m_graph, m_node, "Invalid flush format");
+ return ExitValue::dead();
+ }
+
+ ExitValue exitValueForNode(
+ StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
+ Node* node)
+ {
+ // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
+ // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
+ // it.
+
+ ASSERT(node->shouldGenerate());
+ ASSERT(node->hasResult());
+
+ if (node) {
+ switch (node->op()) {
+ case BottomValue:
+ // This might arise in object materializations. I actually doubt that it would,
+ // but it seems worthwhile to be conservative.
+ return ExitValue::dead();
+
+ case JSConstant:
+ case Int52Constant:
+ case DoubleConstant:
+ return ExitValue::constant(node->asJSValue());
+
+ default:
+ if (node->isPhantomAllocation())
+ return ExitValue::materializeNewObject(map.get(node));
+ break;
+ }
+ }
+
+ for (unsigned i = 0; i < m_availableRecoveries.size(); ++i) {
+ AvailableRecovery recovery = m_availableRecoveries[i];
+ if (recovery.node() != node)
+ continue;
+ ExitValue result = ExitValue::recovery(
+ recovery.opcode(), arguments.size(), arguments.size() + 1,
+ recovery.format());
+ arguments.append(recovery.left());
+ arguments.append(recovery.right());
+ return result;
+ }
+
+ LoweredNodeValue value = m_int32Values.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatInt32, value.value());
+
+ value = m_int52Values.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatInt52, value.value());
+
+ value = m_strictInt52Values.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatStrictInt52, value.value());
+
+ value = m_booleanValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatBoolean, value.value());
+
+ value = m_jsValueValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatJS, value.value());
+
+ value = m_doubleValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatDouble, value.value());
+
+ DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
+ return ExitValue::dead();
+ }
+
+ ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
+ {
+ ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
+ arguments.append(value);
+ return result;
+ }
+
+ ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
+ {
+ ASSERT(node->shouldGenerate());
+ ASSERT(node->hasResult());
+
+ switch (node->op()) {
+ case JSConstant:
+ case Int52Constant:
+ case DoubleConstant:
+ return ExitValue::constant(node->asJSValue());
+
+ default:
+ break;
+ }
+
+ LoweredNodeValue value = m_jsValueValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatJS, value.value());
+
+ value = m_int32Values.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
+
+ value = m_booleanValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
+
+ // Doubles and Int52 have been converted by ValueRep()
+ DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
+ }
+
+ void addAvailableRecovery(
+ Node* node, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format)
+ {
+ m_availableRecoveries.append(AvailableRecovery(node, opcode, left, right, format));
+ }
+
+ void addAvailableRecovery(
+ Edge edge, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format)
+ {
+ addAvailableRecovery(edge.node(), opcode, left, right, format);
+ }
+
+ void setInt32(Node* node, LValue value)
+ {
+ m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setInt52(Node* node, LValue value)
+ {
+ m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setStrictInt52(Node* node, LValue value)
+ {
+ m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setInt52(Node* node, LValue value, Int52Kind kind)
+ {
+ switch (kind) {
+ case Int52:
+ setInt52(node, value);
+ return;
+
+ case StrictInt52:
+ setStrictInt52(node, value);
+ return;
+ }
+
+ DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
+ }
+ void setJSValue(Node* node, LValue value)
+ {
+ m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setBoolean(Node* node, LValue value)
+ {
+ m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setStorage(Node* node, LValue value)
+ {
+ m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setMapBucket(Node* node, LValue value)
+ {
+ m_mapBucketValues.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+ void setDouble(Node* node, LValue value)
+ {
+ m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
+ }
+
+ void setInt32(LValue value)
+ {
+ setInt32(m_node, value);
+ }
+ void setInt52(LValue value)
+ {
+ setInt52(m_node, value);
+ }
+ void setStrictInt52(LValue value)
+ {
+ setStrictInt52(m_node, value);
+ }
+ void setInt52(LValue value, Int52Kind kind)
+ {
+ setInt52(m_node, value, kind);
+ }
+ void setJSValue(LValue value)
+ {
+ setJSValue(m_node, value);
+ }
+ void setBoolean(LValue value)
+ {
+ setBoolean(m_node, value);
+ }
+ void setStorage(LValue value)
+ {
+ setStorage(m_node, value);
+ }
+ void setMapBucket(LValue value)
+ {
+ setMapBucket(m_node, value);
+ }
+ void setDouble(LValue value)
+ {
+ setDouble(m_node, value);
+ }
+
+ bool isValid(const LoweredNodeValue& value)
+ {
+ if (!value)
+ return false;
+ if (!m_graph.m_dominators->dominates(value.block(), m_highBlock))
+ return false;
+ return true;
+ }
+
+ void addWeakReference(JSCell* target)
+ {
+ m_graph.m_plan.weakReferences.addLazily(target);
+ }
+
+ LValue loadStructure(LValue value)
+ {
+ LValue tableIndex = m_out.load32(value, m_heaps.JSCell_structureID);
+ LValue tableBase = m_out.loadPtr(
+ m_out.absolute(vm().heap.structureIDTable().base()));
+ TypedPointer address = m_out.baseIndex(
+ m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
+ return m_out.loadPtr(address);
+ }
+
+ LValue weakPointer(JSCell* pointer)
+ {
+ // There are weird relationships in how optimized CodeBlocks
+ // point to other CodeBlocks. We don't want to have them be
+ // part of the weak pointer set. For example, an optimized CodeBlock
+ // having a weak pointer to itself will cause it to get collected.
+ RELEASE_ASSERT(!jsDynamicCast<CodeBlock*>(vm(), pointer));
+
+ addWeakReference(pointer);
+ return m_out.weakPointer(m_graph, pointer);
+ }
+
+ LValue frozenPointer(FrozenValue* value)
+ {
+ return m_out.weakPointer(value);
+ }
+
+ LValue weakStructureID(RegisteredStructure structure)
+ {
+ return m_out.constInt32(structure->id());
+ }
+
+ LValue weakStructure(RegisteredStructure structure)
+ {
+ ASSERT(!!structure.get());
+ return m_out.weakPointer(m_graph, structure.get());
+ }
+
+ TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
+ {
+ return m_out.address(base, m_heaps.variables[operand], offset);
+ }
+ TypedPointer payloadFor(LValue base, int operand)
+ {
+ return addressFor(base, operand, PayloadOffset);
+ }
+ TypedPointer tagFor(LValue base, int operand)
+ {
+ return addressFor(base, operand, TagOffset);
+ }
+ TypedPointer addressFor(int operand, ptrdiff_t offset = 0)
+ {
+ return addressFor(VirtualRegister(operand), offset);
+ }
+ TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
+ {
+ if (operand.isLocal())
+ return addressFor(m_captured, operand.offset(), offset);
+ return addressFor(m_callFrame, operand.offset(), offset);
+ }
+ TypedPointer payloadFor(int operand)
+ {
+ return payloadFor(VirtualRegister(operand));
+ }
+ TypedPointer payloadFor(VirtualRegister operand)
+ {
+ return addressFor(operand, PayloadOffset);
+ }
+ TypedPointer tagFor(int operand)
+ {
+ return tagFor(VirtualRegister(operand));
+ }
+ TypedPointer tagFor(VirtualRegister operand)
+ {
+ return addressFor(operand, TagOffset);
+ }
+
+ AbstractValue abstractValue(Node* node)
+ {
+ return m_state.forNode(node);
+ }
+ AbstractValue abstractValue(Edge edge)
+ {
+ return abstractValue(edge.node());
+ }
+
+ SpeculatedType provenType(Node* node)
+ {
+ return abstractValue(node).m_type;
+ }
+ SpeculatedType provenType(Edge edge)
+ {
+ return provenType(edge.node());
+ }
+
+ JSValue provenValue(Node* node)
+ {
+ return abstractValue(node).m_value;
+ }
+ JSValue provenValue(Edge edge)
+ {
+ return provenValue(edge.node());
+ }
+
+ StructureAbstractValue abstractStructure(Node* node)
+ {
+ return abstractValue(node).m_structure;
+ }
+ StructureAbstractValue abstractStructure(Edge edge)
+ {
+ return abstractStructure(edge.node());
+ }
+
+#if ENABLE(MASM_PROBE)
+ void probe(std::function<void (CCallHelpers::ProbeContext*)> probeFunc)
+ {
+ UNUSED_PARAM(probeFunc);
+ }
+#endif
+
+ void crash()
+ {
+ crash(m_highBlock, m_node);
+ }
+ void crash(DFG::BasicBlock* block, Node* node)
+ {
+ BlockIndex blockIndex = block->index;
+ unsigned nodeIndex = node ? node->index() : UINT_MAX;
+#if ASSERT_DISABLED
+ m_out.patchpoint(Void)->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
+ jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
+ if (node)
+ jit.move(CCallHelpers::TrustedImm32(node->op()), GPRInfo::regT2);
+ jit.abortWithReason(FTLCrash);
+ });
+#else
+ m_out.call(
+ Void,
+ m_out.constIntPtr(ftlUnreachable),
+ // We don't want the CodeBlock to have a weak pointer to itself because
+ // that would cause it to always get collected.
+ m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
+ m_out.constInt32(nodeIndex));
+#endif
+ m_out.unreachable();
+ }
+
+ AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
+
+ VM& vm() { return m_graph.m_vm; }
+ CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
+
+ Graph& m_graph;
+ State& m_ftlState;
+ AbstractHeapRepository m_heaps;
+ Output m_out;
+ Procedure& m_proc;
+
+ LBasicBlock m_prologue;
+ LBasicBlock m_handleExceptions;
+ HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
+
+ LValue m_callFrame;
+ LValue m_captured;
+ LValue m_tagTypeNumber;
+ LValue m_tagMask;
+
+ HashMap<Node*, LoweredNodeValue> m_int32Values;
+ HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
+ HashMap<Node*, LoweredNodeValue> m_int52Values;
+ HashMap<Node*, LoweredNodeValue> m_jsValueValues;
+ HashMap<Node*, LoweredNodeValue> m_booleanValues;
+ HashMap<Node*, LoweredNodeValue> m_storageValues;
+ HashMap<Node*, LoweredNodeValue> m_mapBucketValues;
+ HashMap<Node*, LoweredNodeValue> m_doubleValues;
+
+ // This is a bit of a hack. It prevents B3 from having to do CSE on loading of arguments.
+ // It's nice to have these optimizations on our end because we can guarantee them a bit better.
+ // Probably also saves B3 compile time.
+ HashMap<Node*, LValue> m_loadedArgumentValues;
+
+ HashMap<Node*, LValue> m_phis;
+
+ LocalOSRAvailabilityCalculator m_availabilityCalculator;
+
+ Vector<AvailableRecovery, 3> m_availableRecoveries;
+
+ InPlaceAbstractState m_state;
+ AbstractInterpreter<InPlaceAbstractState> m_interpreter;
+ DFG::BasicBlock* m_highBlock;
+ DFG::BasicBlock* m_nextHighBlock;
+ LBasicBlock m_nextLowBlock;
+
+ NodeOrigin m_origin;
+ unsigned m_nodeIndex;
+ Node* m_node;
+};
+
+} // anonymous namespace
+
+void lowerDFGToB3(State& state)
+{
+ LowerDFGToB3 lowering(state);
+ lowering.lower();
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.h b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h
index ad7d44c4d..e9569f4d3 100644
--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.h
+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLLowerDFGToLLVM_h
-#define FTLLowerDFGToLLVM_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -35,11 +32,8 @@
namespace JSC { namespace FTL {
-void lowerDFGToLLVM(State&);
+void lowerDFGToB3(State&);
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLLowerDFGToLLVM_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
deleted file mode 100644
index be2a40c7d..000000000
--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
+++ /dev/null
@@ -1,4515 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "FTLLowerDFGToLLVM.h"
-
-#if ENABLE(FTL_JIT)
-
-#include "CodeBlockWithJITType.h"
-#include "DFGAbstractInterpreterInlines.h"
-#include "DFGInPlaceAbstractState.h"
-#include "FTLAbstractHeapRepository.h"
-#include "FTLForOSREntryJITCode.h"
-#include "FTLFormattedValue.h"
-#include "FTLInlineCacheSize.h"
-#include "FTLLoweredNodeValue.h"
-#include "FTLOutput.h"
-#include "FTLThunks.h"
-#include "LinkBuffer.h"
-#include "OperandsInlines.h"
-#include "Operations.h"
-#include "VirtualRegister.h"
-#include <atomic>
-#include <wtf/ProcessID.h>
-
-namespace JSC { namespace FTL {
-
-using namespace DFG;
-
-static std::atomic<int> compileCounter;
-
-// Using this instead of typeCheck() helps to reduce the load on LLVM, by creating
-// significantly less dead code.
-#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) do { \
- FormattedValue _ftc_lowValue = (lowValue); \
- Edge _ftc_highValue = (highValue); \
- SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
- if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
- break; \
- typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition)); \
- } while (false)
-
-class LowerDFGToLLVM {
-public:
- LowerDFGToLLVM(State& state)
- : m_graph(state.graph)
- , m_ftlState(state)
- , m_heaps(state.context)
- , m_out(state.context)
- , m_availability(OperandsLike, state.graph.block(0)->variablesAtHead)
- , m_state(state.graph)
- , m_interpreter(state.graph, m_state)
- , m_stackmapIDs(0)
- {
- }
-
- void lower()
- {
- CString name;
- if (verboseCompilationEnabled()) {
- name = toCString(
- "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
- "_", codeBlock()->hash());
- } else
- name = "jsBody";
-
- m_graph.m_dominators.computeIfNecessary(m_graph);
-
- m_ftlState.module =
- llvm->ModuleCreateWithNameInContext(name.data(), m_ftlState.context);
-
- m_ftlState.function = addFunction(
- m_ftlState.module, name.data(), functionType(m_out.int64, m_out.intPtr));
- setFunctionCallingConv(m_ftlState.function, LLVMCCallConv);
-
- m_out.initialize(m_ftlState.module, m_ftlState.function, m_heaps);
-
- m_prologue = appendBasicBlock(m_ftlState.context, m_ftlState.function);
- m_out.appendTo(m_prologue);
- createPhiVariables();
-
- m_callFrame = m_out.param(0);
- m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
- m_tagMask = m_out.constInt64(TagMask);
-
- for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
- m_highBlock = m_graph.block(blockIndex);
- if (!m_highBlock)
- continue;
- m_blocks.add(m_highBlock, FTL_NEW_BLOCK(m_out, ("Block ", *m_highBlock)));
- }
-
- m_out.appendTo(m_prologue);
- m_out.jump(lowBlock(m_graph.block(0)));
-
- Vector<BasicBlock*> depthFirst;
- m_graph.getBlocksInDepthFirstOrder(depthFirst);
- for (unsigned i = 0; i < depthFirst.size(); ++i)
- compileBlock(depthFirst[i]);
-
- if (Options::dumpLLVMIR())
- dumpModule(m_ftlState.module);
-
- if (verboseCompilationEnabled())
- m_ftlState.dumpState("after lowering");
- if (validationEnabled())
- verifyModule(m_ftlState.module);
- }
-
-private:
-
- void createPhiVariables()
- {
- for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
- BasicBlock* block = m_graph.block(blockIndex);
- if (!block)
- continue;
- for (unsigned nodeIndex = block->size(); nodeIndex--;) {
- Node* node = block->at(nodeIndex);
- if (node->op() != Phi)
- continue;
- LType type;
- switch (node->flags() & NodeResultMask) {
- case NodeResultNumber:
- type = m_out.doubleType;
- break;
- case NodeResultInt32:
- type = m_out.int32;
- break;
- case NodeResultInt52:
- type = m_out.int64;
- break;
- case NodeResultBoolean:
- type = m_out.boolean;
- break;
- case NodeResultJS:
- type = m_out.int64;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- m_phis.add(node, buildAlloca(m_out.m_builder, type));
- }
- }
- }
-
- void compileBlock(BasicBlock* block)
- {
- if (!block)
- return;
-
- if (verboseCompilationEnabled())
- dataLog("Compiling block ", *block, "\n");
-
- m_highBlock = block;
-
- LBasicBlock lowBlock = m_blocks.get(m_highBlock);
-
- m_nextHighBlock = 0;
- for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
- m_nextHighBlock = m_graph.block(nextBlockIndex);
- if (m_nextHighBlock)
- break;
- }
- m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
-
- // All of this effort to find the next block gives us the ability to keep the
- // generated IR in roughly program order. This ought not affect the performance
- // of the generated code (since we expect LLVM to reorder things) but it will
- // make IR dumps easier to read.
- m_out.appendTo(lowBlock, m_nextLowBlock);
-
- if (Options::ftlCrashes())
- m_out.crashNonTerminal();
-
- if (!m_highBlock->cfaHasVisited) {
- m_out.crash();
- return;
- }
-
- initializeOSRExitStateForBlock();
-
- m_state.reset();
- m_state.beginBasicBlock(m_highBlock);
-
- for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
- if (!compileNode(m_nodeIndex))
- break;
- }
- }
-
- bool compileNode(unsigned nodeIndex)
- {
- if (!m_state.isValid()) {
- m_out.unreachable();
- return false;
- }
-
- m_node = m_highBlock->at(nodeIndex);
- m_codeOriginForExitProfile = m_node->codeOrigin;
- m_codeOriginForExitTarget = m_node->codeOriginForExitTarget;
-
- if (verboseCompilationEnabled())
- dataLog("Lowering ", m_node, "\n");
-
- bool shouldExecuteEffects = m_interpreter.startExecuting(m_node);
-
- switch (m_node->op()) {
- case Upsilon:
- compileUpsilon();
- break;
- case Phi:
- compilePhi();
- break;
- case JSConstant:
- break;
- case WeakJSConstant:
- compileWeakJSConstant();
- break;
- case GetArgument:
- compileGetArgument();
- break;
- case ExtractOSREntryLocal:
- compileExtractOSREntryLocal();
- break;
- case GetLocal:
- compileGetLocal();
- break;
- case SetLocal:
- compileSetLocal();
- break;
- case MovHint:
- compileMovHint();
- break;
- case ZombieHint:
- compileZombieHint();
- break;
- case Phantom:
- compilePhantom();
- break;
- case ValueAdd:
- compileValueAdd();
- break;
- case ArithAdd:
- compileAddSub();
- break;
- case ArithSub:
- compileAddSub();
- break;
- case ArithMul:
- compileArithMul();
- break;
- case ArithDiv:
- compileArithDivMod();
- break;
- case ArithMod:
- compileArithDivMod();
- break;
- case ArithMin:
- case ArithMax:
- compileArithMinOrMax();
- break;
- case ArithAbs:
- compileArithAbs();
- break;
- case ArithNegate:
- compileArithNegate();
- break;
- case BitAnd:
- compileBitAnd();
- break;
- case BitOr:
- compileBitOr();
- break;
- case BitXor:
- compileBitXor();
- break;
- case BitRShift:
- compileBitRShift();
- break;
- case BitLShift:
- compileBitLShift();
- break;
- case BitURShift:
- compileBitURShift();
- break;
- case UInt32ToNumber:
- compileUInt32ToNumber();
- break;
- case Int32ToDouble:
- compileInt32ToDouble();
- break;
- case CheckStructure:
- compileCheckStructure();
- break;
- case StructureTransitionWatchpoint:
- compileStructureTransitionWatchpoint();
- break;
- case CheckFunction:
- compileCheckFunction();
- break;
- case ArrayifyToStructure:
- compileArrayifyToStructure();
- break;
- case PutStructure:
- compilePutStructure();
- break;
- case PhantomPutStructure:
- compilePhantomPutStructure();
- break;
- case GetById:
- compileGetById();
- break;
- case PutById:
- compilePutById();
- break;
- case GetButterfly:
- compileGetButterfly();
- break;
- case ConstantStoragePointer:
- compileConstantStoragePointer();
- break;
- case GetIndexedPropertyStorage:
- compileGetIndexedPropertyStorage();
- break;
- case CheckArray:
- compileCheckArray();
- break;
- case GetArrayLength:
- compileGetArrayLength();
- break;
- case CheckInBounds:
- compileCheckInBounds();
- break;
- case GetByVal:
- compileGetByVal();
- break;
- case PutByVal:
- case PutByValAlias:
- case PutByValDirect:
- compilePutByVal();
- break;
- case NewObject:
- compileNewObject();
- break;
- case NewArray:
- compileNewArray();
- break;
- case NewArrayBuffer:
- compileNewArrayBuffer();
- break;
- case AllocatePropertyStorage:
- compileAllocatePropertyStorage();
- break;
- case StringCharAt:
- compileStringCharAt();
- break;
- case StringCharCodeAt:
- compileStringCharCodeAt();
- break;
- case GetByOffset:
- compileGetByOffset();
- break;
- case PutByOffset:
- compilePutByOffset();
- break;
- case GetGlobalVar:
- compileGetGlobalVar();
- break;
- case PutGlobalVar:
- compilePutGlobalVar();
- break;
- case NotifyWrite:
- compileNotifyWrite();
- break;
- case GetMyScope:
- compileGetMyScope();
- break;
- case SkipScope:
- compileSkipScope();
- break;
- case GetClosureRegisters:
- compileGetClosureRegisters();
- break;
- case GetClosureVar:
- compileGetClosureVar();
- break;
- case PutClosureVar:
- compilePutClosureVar();
- break;
- case CompareEq:
- compileCompareEq();
- break;
- case CompareEqConstant:
- compileCompareEqConstant();
- break;
- case CompareStrictEq:
- compileCompareStrictEq();
- break;
- case CompareStrictEqConstant:
- compileCompareStrictEqConstant();
- break;
- case CompareLess:
- compileCompareLess();
- break;
- case CompareLessEq:
- compileCompareLessEq();
- break;
- case CompareGreater:
- compileCompareGreater();
- break;
- case CompareGreaterEq:
- compileCompareGreaterEq();
- break;
- case LogicalNot:
- compileLogicalNot();
- break;
- case Call:
- case Construct:
- compileCallOrConstruct();
- break;
- case Jump:
- compileJump();
- break;
- case Branch:
- compileBranch();
- break;
- case Switch:
- compileSwitch();
- break;
- case Return:
- compileReturn();
- break;
- case ForceOSRExit:
- compileForceOSRExit();
- break;
- case InvalidationPoint:
- compileInvalidationPoint();
- break;
- case ValueToInt32:
- compileValueToInt32();
- break;
- case Int52ToValue:
- compileInt52ToValue();
- break;
- case StoreBarrier:
- compileStoreBarrier();
- break;
- case ConditionalStoreBarrier:
- compileConditionalStoreBarrier();
- break;
- case StoreBarrierWithNullCheck:
- compileStoreBarrierWithNullCheck();
- break;
- case Flush:
- case PhantomLocal:
- case SetArgument:
- case LoopHint:
- case VariableWatchpoint:
- case FunctionReentryWatchpoint:
- case TypedArrayWatchpoint:
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- if (shouldExecuteEffects)
- m_interpreter.executeEffects(nodeIndex);
-
- return true;
- }
-
- void compileValueToInt32()
- {
- switch (m_node->child1().useKind()) {
- case Int32Use:
- setInt32(lowInt32(m_node->child1()));
- break;
-
- case MachineIntUse:
- setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
- break;
-
- case NumberUse:
- case NotCellUse: {
- LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
- if (isValid(value)) {
- setInt32(value.value());
- break;
- }
-
- value = m_jsValueValues.get(m_node->child1().node());
- if (isValid(value)) {
- LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("ValueToInt32 int case"));
- LBasicBlock notIntCase = FTL_NEW_BLOCK(m_out, ("ValueToInt32 not int case"));
- LBasicBlock doubleCase = 0;
- LBasicBlock notNumberCase = 0;
- if (m_node->child1().useKind() == NotCellUse) {
- doubleCase = FTL_NEW_BLOCK(m_out, ("ValueToInt32 double case"));
- notNumberCase = FTL_NEW_BLOCK(m_out, ("ValueToInt32 not number case"));
- }
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ValueToInt32 continuation"));
-
- Vector<ValueFromBlock> results;
-
- m_out.branch(isNotInt32(value.value()), notIntCase, intCase);
-
- LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
- results.append(m_out.anchor(unboxInt32(value.value())));
- m_out.jump(continuation);
-
- if (m_node->child1().useKind() == NumberUse) {
- m_out.appendTo(notIntCase, continuation);
- FTL_TYPE_CHECK(
- jsValueValue(value.value()), m_node->child1(), SpecFullNumber,
- isCellOrMisc(value.value()));
- results.append(m_out.anchor(doubleToInt32(unboxDouble(value.value()))));
- m_out.jump(continuation);
- } else {
- m_out.appendTo(notIntCase, doubleCase);
- m_out.branch(isCellOrMisc(value.value()), notNumberCase, doubleCase);
-
- m_out.appendTo(doubleCase, notNumberCase);
- results.append(m_out.anchor(doubleToInt32(unboxDouble(value.value()))));
- m_out.jump(continuation);
-
- m_out.appendTo(notNumberCase, continuation);
-
- FTL_TYPE_CHECK(
- jsValueValue(value.value()), m_node->child1(), ~SpecCell,
- isCell(value.value()));
-
- LValue specialResult = m_out.select(
- m_out.equal(
- value.value(),
- m_out.constInt64(JSValue::encode(jsBoolean(true)))),
- m_out.int32One, m_out.int32Zero);
- results.append(m_out.anchor(specialResult));
- m_out.jump(continuation);
- }
-
- m_out.appendTo(continuation, lastNext);
- setInt32(m_out.phi(m_out.int32, results));
- break;
- }
-
- value = m_doubleValues.get(m_node->child1().node());
- if (isValid(value)) {
- setInt32(doubleToInt32(value.value()));
- break;
- }
-
- terminate(Uncountable);
- break;
- }
-
- case BooleanUse:
- setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), m_out.int32));
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileInt52ToValue()
- {
- setJSValue(lowJSValue(m_node->child1()));
- }
-
- void compileStoreBarrier()
- {
- emitStoreBarrier(lowCell(m_node->child1()));
- }
-
- void compileConditionalStoreBarrier()
- {
- LValue base = lowCell(m_node->child1());
- LValue value = lowJSValue(m_node->child2());
- emitStoreBarrier(base, value, m_node->child2());
- }
-
- void compileStoreBarrierWithNullCheck()
- {
-#if ENABLE(GGC)
- LBasicBlock isNotNull = FTL_NEW_BLOCK(m_out, ("Store barrier with null check value not null"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Store barrier continuation"));
-
- LValue base = lowJSValue(m_node->child1());
- m_out.branch(m_out.isZero64(base), continuation, isNotNull);
- LBasicBlock lastNext = m_out.appendTo(isNotNull, continuation);
- emitStoreBarrier(base);
- m_out.appendTo(continuation, lastNext);
-#else
- speculate(m_node->child1());
-#endif
- }
-
- void compileUpsilon()
- {
- LValue destination = m_phis.get(m_node->phi());
-
- switch (m_node->child1().useKind()) {
- case NumberUse:
- m_out.set(lowDouble(m_node->child1()), destination);
- break;
- case Int32Use:
- m_out.set(lowInt32(m_node->child1()), destination);
- break;
- case MachineIntUse:
- m_out.set(lowInt52(m_node->child1()), destination);
- break;
- case BooleanUse:
- m_out.set(lowBoolean(m_node->child1()), destination);
- break;
- case CellUse:
- m_out.set(lowCell(m_node->child1()), destination);
- break;
- case UntypedUse:
- m_out.set(lowJSValue(m_node->child1()), destination);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compilePhi()
- {
- LValue source = m_phis.get(m_node);
-
- switch (m_node->flags() & NodeResultMask) {
- case NodeResultNumber:
- setDouble(m_out.get(source));
- break;
- case NodeResultInt32:
- setInt32(m_out.get(source));
- break;
- case NodeResultInt52:
- setInt52(m_out.get(source));
- break;
- case NodeResultBoolean:
- setBoolean(m_out.get(source));
- break;
- case NodeResultJS:
- setJSValue(m_out.get(source));
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileWeakJSConstant()
- {
- setJSValue(weakPointer(m_node->weakConstant()));
- }
-
- void compileGetArgument()
- {
- VariableAccessData* variable = m_node->variableAccessData();
- VirtualRegister operand = variable->machineLocal();
- RELEASE_ASSERT(operand.isArgument());
-
- LValue jsValue = m_out.load64(addressFor(operand));
-
- switch (useKindFor(variable->flushFormat())) {
- case Int32Use:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotInt32(jsValue));
- setInt32(unboxInt32(jsValue));
- break;
- case CellUse:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotCell(jsValue));
- setJSValue(jsValue);
- break;
- case BooleanUse:
- speculate(BadType, jsValueValue(jsValue), m_node, isNotBoolean(jsValue));
- setBoolean(unboxBoolean(jsValue));
- break;
- case UntypedUse:
- setJSValue(jsValue);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileExtractOSREntryLocal()
- {
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
- m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
- setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
- }
-
- void compileGetLocal()
- {
- // GetLocals arise only for captured variables.
-
- VariableAccessData* variable = m_node->variableAccessData();
- AbstractValue& value = m_state.variables().operand(variable->local());
-
- RELEASE_ASSERT(variable->isCaptured());
-
- if (isInt32Speculation(value.m_type))
- setInt32(m_out.load32(payloadFor(variable->machineLocal())));
- else
- setJSValue(m_out.load64(addressFor(variable->machineLocal())));
- }
-
- void compileSetLocal()
- {
- VariableAccessData* variable = m_node->variableAccessData();
- switch (variable->flushFormat()) {
- case FlushedJSValue: {
- LValue value = lowJSValue(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
- break;
- }
-
- case FlushedDouble: {
- LValue value = lowDouble(m_node->child1());
- m_out.storeDouble(value, addressFor(variable->machineLocal()));
- break;
- }
-
- case FlushedInt32: {
- LValue value = lowInt32(m_node->child1());
- m_out.store32(value, payloadFor(variable->machineLocal()));
- break;
- }
-
- case FlushedInt52: {
- LValue value = lowInt52(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
- break;
- }
-
- case FlushedCell: {
- LValue value = lowCell(m_node->child1());
- m_out.store64(value, addressFor(variable->machineLocal()));
- break;
- }
-
- case FlushedBoolean: {
- speculateBoolean(m_node->child1());
- m_out.store64(
- lowJSValue(m_node->child1(), ManualOperandSpeculation),
- addressFor(variable->machineLocal()));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- m_availability.operand(variable->local()) = Availability(variable->flushedAt());
- }
-
- void compileMovHint()
- {
- ASSERT(m_node->containsMovHint());
- ASSERT(m_node->op() != ZombieHint);
-
- VirtualRegister operand = m_node->unlinkedLocal();
- m_availability.operand(operand) = Availability(m_node->child1().node());
- }
-
- void compileZombieHint()
- {
- m_availability.operand(m_node->unlinkedLocal()) = Availability::unavailable();
- }
-
- void compilePhantom()
- {
- DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
- }
-
- void compileValueAdd()
- {
- J_JITOperation_EJJ operation;
- if (!(m_state.forNode(m_node->child1()).m_type & SpecFullNumber)
- && !(m_state.forNode(m_node->child2()).m_type & SpecFullNumber))
- operation = operationValueAddNotNumber;
- else
- operation = operationValueAdd;
- setJSValue(vmCall(
- m_out.operation(operation), m_callFrame,
- lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
- }
-
- void compileAddSub()
- {
- bool isSub = m_node->op() == ArithSub;
- switch (m_node->binaryUseKind()) {
- case Int32Use: {
- LValue left = lowInt32(m_node->child1());
- LValue right = lowInt32(m_node->child2());
- LValue result = isSub ? m_out.sub(left, right) : m_out.add(left, right);
-
- if (!shouldCheckOverflow(m_node->arithMode())) {
- setInt32(result);
- break;
- }
-
- LValue overflow = isSub ? m_out.subWithOverflow32(left, right) : m_out.addWithOverflow32(left, right);
-
- speculate(Overflow, noValue(), 0, m_out.extractValue(overflow, 1));
- setInt32(result);
- break;
- }
-
- case MachineIntUse: {
- if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)
- && !m_state.forNode(m_node->child2()).couldBeType(SpecInt52)) {
- Int52Kind kind;
- LValue left = lowWhicheverInt52(m_node->child1(), kind);
- LValue right = lowInt52(m_node->child2(), kind);
- setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
- break;
- }
-
- LValue left = lowInt52(m_node->child1());
- LValue right = lowInt52(m_node->child2());
- LValue result = isSub ? m_out.sub(left, right) : m_out.add(left, right);
-
- LValue overflow = isSub ? m_out.subWithOverflow64(left, right) : m_out.addWithOverflow64(left, right);
- speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflow, 1));
- setInt52(result);
- break;
- }
-
- case NumberUse: {
- LValue C1 = lowDouble(m_node->child1());
- LValue C2 = lowDouble(m_node->child2());
-
- setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileArithMul()
- {
- switch (m_node->binaryUseKind()) {
- case Int32Use: {
- LValue left = lowInt32(m_node->child1());
- LValue right = lowInt32(m_node->child2());
- LValue result = m_out.mul(left, right);
-
- if (shouldCheckOverflow(m_node->arithMode())) {
- LValue overflowResult = m_out.mulWithOverflow32(left, right);
- speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
- }
-
- if (shouldCheckNegativeZero(m_node->arithMode())) {
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
-
- m_out.branch(m_out.notZero32(result), continuation, slowCase);
-
- LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
- LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int32Zero), m_out.lessThan(right, m_out.int32Zero));
- speculate(NegativeZero, noValue(), 0, cond);
- m_out.jump(continuation);
- m_out.appendTo(continuation, lastNext);
- }
-
- setInt32(result);
- break;
- }
-
- case MachineIntUse: {
- Int52Kind kind;
- LValue left = lowWhicheverInt52(m_node->child1(), kind);
- LValue right = lowInt52(m_node->child2(), opposite(kind));
- LValue result = m_out.mul(left, right);
-
-
- LValue overflowResult = m_out.mulWithOverflow64(left, right);
- speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
-
- if (shouldCheckNegativeZero(m_node->arithMode())) {
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
-
- m_out.branch(m_out.notZero64(result), continuation, slowCase);
-
- LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
- LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int64Zero), m_out.lessThan(right, m_out.int64Zero));
- speculate(NegativeZero, noValue(), 0, cond);
- m_out.jump(continuation);
- m_out.appendTo(continuation, lastNext);
- }
-
- setInt52(result);
- break;
- }
-
- case NumberUse: {
- setDouble(
- m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileArithDivMod()
- {
- switch (m_node->binaryUseKind()) {
- case Int32Use: {
- LValue numerator = lowInt32(m_node->child1());
- LValue denominator = lowInt32(m_node->child2());
-
- LBasicBlock unsafeDenominator = FTL_NEW_BLOCK(m_out, ("ArithDivMod unsafe denominator"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithDivMod continuation"));
- LBasicBlock done = FTL_NEW_BLOCK(m_out, ("ArithDivMod done"));
-
- Vector<ValueFromBlock, 3> results;
-
- LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
-
- m_out.branch(m_out.above(adjustedDenominator, m_out.int32One), continuation, unsafeDenominator);
-
- LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
-
- LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
-
- if (shouldCheckOverflow(m_node->arithMode())) {
- LValue cond = m_out.bitOr(m_out.isZero32(denominator), m_out.equal(numerator, neg2ToThe31));
- speculate(Overflow, noValue(), 0, cond);
- m_out.jump(continuation);
- } else {
- // This is the case where we convert the result to an int after we're done. So,
- // if the denominator is zero, then the result should be zero.
- // If the denominator is not zero (i.e. it's -1 because we're guarded by the
- // check above) and the numerator is -2^31 then the result should be -2^31.
-
- LBasicBlock divByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv divide by zero"));
- LBasicBlock notDivByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv not divide by zero"));
- LBasicBlock neg2ToThe31ByNeg1 = FTL_NEW_BLOCK(m_out, ("ArithDiv -2^31/-1"));
-
- m_out.branch(m_out.isZero32(denominator), divByZero, notDivByZero);
-
- m_out.appendTo(divByZero, notDivByZero);
- results.append(m_out.anchor(m_out.int32Zero));
- m_out.jump(done);
-
- m_out.appendTo(notDivByZero, neg2ToThe31ByNeg1);
- m_out.branch(m_out.equal(numerator, neg2ToThe31), neg2ToThe31ByNeg1, continuation);
-
- m_out.appendTo(neg2ToThe31ByNeg1, continuation);
- results.append(m_out.anchor(neg2ToThe31));
- m_out.jump(done);
- }
-
- m_out.appendTo(continuation, done);
-
- if (shouldCheckNegativeZero(m_node->arithMode())) {
- LBasicBlock zeroNumerator = FTL_NEW_BLOCK(m_out, ("ArithDivMod zero numerator"));
- LBasicBlock numeratorContinuation = FTL_NEW_BLOCK(m_out, ("ArithDivMod numerator continuation"));
-
- m_out.branch(m_out.isZero32(numerator), zeroNumerator, numeratorContinuation);
-
- LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
-
- speculate(
- NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
-
- m_out.jump(numeratorContinuation);
-
- m_out.appendTo(numeratorContinuation, innerLastNext);
- }
-
- LValue divModResult = m_node->op() == ArithDiv
- ? m_out.div(numerator, denominator)
- : m_out.rem(numerator, denominator);
-
- if (shouldCheckOverflow(m_node->arithMode())) {
- speculate(
- Overflow, noValue(), 0,
- m_out.notEqual(m_out.mul(divModResult, denominator), numerator));
- }
-
- results.append(m_out.anchor(divModResult));
- m_out.jump(done);
-
- m_out.appendTo(done, lastNext);
-
- setInt32(m_out.phi(m_out.int32, results));
- break;
- }
-
- case NumberUse: {
- LValue C1 = lowDouble(m_node->child1());
- LValue C2 = lowDouble(m_node->child2());
- setDouble(m_node->op() == ArithDiv ? m_out.doubleDiv(C1, C2) : m_out.doubleRem(C1, C2));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileArithMinOrMax()
- {
- switch (m_node->binaryUseKind()) {
- case Int32Use: {
- LValue left = lowInt32(m_node->child1());
- LValue right = lowInt32(m_node->child2());
-
- setInt32(
- m_out.select(
- m_node->op() == ArithMin
- ? m_out.lessThan(left, right)
- : m_out.lessThan(right, left),
- left, right));
- break;
- }
-
- case NumberUse: {
- LValue left = lowDouble(m_node->child1());
- LValue right = lowDouble(m_node->child2());
-
- LBasicBlock notLessThan = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax not less than"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax continuation"));
-
- Vector<ValueFromBlock, 2> results;
-
- results.append(m_out.anchor(left));
- m_out.branch(
- m_node->op() == ArithMin
- ? m_out.doubleLessThan(left, right)
- : m_out.doubleGreaterThan(left, right),
- continuation, notLessThan);
-
- LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
- results.append(m_out.anchor(m_out.select(
- m_node->op() == ArithMin
- ? m_out.doubleGreaterThanOrEqual(left, right)
- : m_out.doubleLessThanOrEqual(left, right),
- right, m_out.constDouble(0.0 / 0.0))));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setDouble(m_out.phi(m_out.doubleType, results));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileArithAbs()
- {
- switch (m_node->child1().useKind()) {
- case Int32Use: {
- LValue value = lowInt32(m_node->child1());
-
- LValue mask = m_out.aShr(value, m_out.constInt32(31));
- LValue result = m_out.bitXor(mask, m_out.add(mask, value));
-
- speculate(Overflow, noValue(), 0, m_out.equal(result, m_out.constInt32(1 << 31)));
-
- setInt32(result);
- break;
- }
-
- case NumberUse: {
- setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileArithNegate()
- {
- switch (m_node->child1().useKind()) {
- case Int32Use: {
- LValue value = lowInt32(m_node->child1());
-
- LValue result = m_out.neg(value);
- if (shouldCheckOverflow(m_node->arithMode())) {
- if (!shouldCheckNegativeZero(m_node->arithMode())) {
- // We don't have a negate-with-overflow intrinsic. Hopefully this
- // does the trick, though.
- LValue overflowResult = m_out.subWithOverflow32(m_out.int32Zero, value);
- speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
- } else
- speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
-
- }
-
- setInt32(result);
- break;
- }
-
- case MachineIntUse: {
- if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)) {
- Int52Kind kind;
- LValue value = lowWhicheverInt52(m_node->child1(), kind);
- LValue result = m_out.neg(value);
- if (shouldCheckNegativeZero(m_node->arithMode()))
- speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
- setInt52(result, kind);
- break;
- }
-
- LValue value = lowInt52(m_node->child1());
- LValue overflowResult = m_out.subWithOverflow64(m_out.int64Zero, value);
- speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
- LValue result = m_out.neg(value);
- speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
- setInt52(result);
- break;
- }
-
- case NumberUse: {
- setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileBitAnd()
- {
- setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
- }
-
- void compileBitOr()
- {
- setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
- }
-
- void compileBitXor()
- {
- setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
- }
-
- void compileBitRShift()
- {
- setInt32(m_out.aShr(
- lowInt32(m_node->child1()),
- m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
- }
-
- void compileBitLShift()
- {
- setInt32(m_out.shl(
- lowInt32(m_node->child1()),
- m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
- }
-
- void compileBitURShift()
- {
- setInt32(m_out.lShr(
- lowInt32(m_node->child1()),
- m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
- }
-
- void compileUInt32ToNumber()
- {
- LValue value = lowInt32(m_node->child1());
-
- if (doesOverflow(m_node->arithMode())) {
- setDouble(m_out.unsignedToDouble(value));
- return;
- }
-
- speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
- setInt32(value);
- }
-
- void compileInt32ToDouble()
- {
- setDouble(lowDouble(m_node->child1()));
- }
-
- void compileCheckStructure()
- {
- LValue cell = lowCell(m_node->child1());
-
- ExitKind exitKind;
- if (m_node->child1()->op() == WeakJSConstant)
- exitKind = BadWeakConstantCache;
- else
- exitKind = BadCache;
-
- LValue structure = m_out.loadPtr(cell, m_heaps.JSCell_structure);
-
- if (m_node->structureSet().size() == 1) {
- speculate(
- exitKind, jsValueValue(cell), 0,
- m_out.notEqual(structure, weakPointer(m_node->structureSet()[0])));
- return;
- }
-
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CheckStructure continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
- for (unsigned i = 0; i < m_node->structureSet().size() - 1; ++i) {
- LBasicBlock nextStructure = FTL_NEW_BLOCK(m_out, ("CheckStructure nextStructure"));
- m_out.branch(
- m_out.equal(structure, weakPointer(m_node->structureSet()[i])),
- continuation, nextStructure);
- m_out.appendTo(nextStructure);
- }
-
- speculate(
- exitKind, jsValueValue(cell), 0,
- m_out.notEqual(structure, weakPointer(m_node->structureSet().last())));
-
- m_out.jump(continuation);
- m_out.appendTo(continuation, lastNext);
- }
-
- void compileStructureTransitionWatchpoint()
- {
- addWeakReference(m_node->structure());
- speculateCell(m_node->child1());
- }
-
- void compileCheckFunction()
- {
- LValue cell = lowCell(m_node->child1());
-
- speculate(
- BadFunction, jsValueValue(cell), m_node->child1().node(),
- m_out.notEqual(cell, weakPointer(m_node->function())));
- }
-
- void compileArrayifyToStructure()
- {
- LValue cell = lowCell(m_node->child1());
- LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
-
- LBasicBlock unexpectedStructure = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure unexpected structure"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure continuation"));
-
- LValue structure = m_out.loadPtr(cell, m_heaps.JSCell_structure);
-
- m_out.branch(
- m_out.notEqual(structure, weakPointer(m_node->structure())),
- unexpectedStructure, continuation);
-
- LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
-
- if (property) {
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous:
- speculate(
- Uncountable, noValue(), 0,
- m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
- break;
- default:
- break;
- }
- }
-
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- vmCall(m_out.operation(operationEnsureInt32), m_callFrame, cell);
- break;
- case Array::Double:
- vmCall(m_out.operation(operationEnsureDouble), m_callFrame, cell);
- break;
- case Array::Contiguous:
- if (m_node->arrayMode().conversion() == Array::RageConvert)
- vmCall(m_out.operation(operationRageEnsureContiguous), m_callFrame, cell);
- else
- vmCall(m_out.operation(operationEnsureContiguous), m_callFrame, cell);
- break;
- case Array::ArrayStorage:
- case Array::SlowPutArrayStorage:
- vmCall(m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- structure = m_out.loadPtr(cell, m_heaps.JSCell_structure);
- speculate(
- BadIndexingType, jsValueValue(cell), 0,
- m_out.notEqual(structure, weakPointer(m_node->structure())));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- }
-
- void compilePutStructure()
- {
- m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
-
- m_out.store64(
- m_out.constIntPtr(m_node->structureTransitionData().newStructure),
- lowCell(m_node->child1()), m_heaps.JSCell_structure);
- }
-
- void compilePhantomPutStructure()
- {
- m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
- }
-
- void compileGetById()
- {
- // UntypedUse is a bit harder to reason about and I'm not sure how best to do it, yet.
- // Basically we need to emit a cell branch that takes you to the slow path, but the slow
- // path is generated by the IC generator so we can't jump to it from here. And the IC
- // generator currently doesn't know how to emit such a branch. So, for now, we just
- // restrict this to CellUse.
- ASSERT(m_node->child1().useKind() == CellUse);
-
- LValue base = lowCell(m_node->child1());
- StringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
-
- // Arguments: id, bytes, target, numArgs, args...
- unsigned stackmapID = m_stackmapIDs++;
-
- if (Options::verboseCompilation())
- dataLog(" Emitting GetById patchpoint with stackmap #", stackmapID, "\n");
-
- LValue call = m_out.call(
- m_out.patchpointInt64Intrinsic(),
- m_out.constInt32(stackmapID), m_out.constInt32(sizeOfGetById()),
- constNull(m_out.ref8), m_out.constInt32(2), m_callFrame, base);
- setInstructionCallingConvention(call, LLVMAnyRegCallConv);
- setJSValue(call);
-
- m_ftlState.getByIds.append(GetByIdDescriptor(stackmapID, m_node->codeOrigin, uid));
- }
-
- void compilePutById()
- {
- // See above; CellUse is easier so we do only that for now.
- ASSERT(m_node->child1().useKind() == CellUse);
-
- LValue base = lowCell(m_node->child1());
- LValue value = lowJSValue(m_node->child2());
- StringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
-
- // Arguments: id, bytes, target, numArgs, args...
- unsigned stackmapID = m_stackmapIDs++;
-
- if (Options::verboseCompilation())
- dataLog(" Emitting PutById patchpoint with stackmap #", stackmapID, "\n");
-
- LValue call = m_out.call(
- m_out.patchpointVoidIntrinsic(),
- m_out.constInt32(stackmapID), m_out.constInt32(sizeOfPutById()),
- constNull(m_out.ref8), m_out.constInt32(3), m_callFrame, base, value);
- setInstructionCallingConvention(call, LLVMAnyRegCallConv);
-
- m_ftlState.putByIds.append(PutByIdDescriptor(
- stackmapID, m_node->codeOrigin, uid,
- m_graph.executableFor(m_node->codeOrigin)->ecmaMode(),
- m_node->op() == PutByIdDirect ? Direct : NotDirect));
- }
-
- void compileGetButterfly()
- {
- setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
- }
-
- void compileConstantStoragePointer()
- {
- setStorage(m_out.constIntPtr(m_node->storagePointer()));
- }
-
- void compileGetIndexedPropertyStorage()
- {
- LValue cell = lowCell(m_node->child1());
-
- if (m_node->arrayMode().type() == Array::String) {
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String continuation"));
-
- ValueFromBlock fastResult = m_out.anchor(
- m_out.loadPtr(cell, m_heaps.JSString_value));
-
- m_out.branch(m_out.notNull(fastResult.value()), continuation, slowPath);
-
- LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowResult = m_out.anchor(
- vmCall(m_out.operation(operationResolveRope), m_callFrame, cell));
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- setStorage(m_out.loadPtr(m_out.phi(m_out.intPtr, fastResult, slowResult), m_heaps.StringImpl_data));
- return;
- }
-
- setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
- }
-
- void compileCheckArray()
- {
- Edge edge = m_node->child1();
- LValue cell = lowCell(edge);
-
- if (m_node->arrayMode().alreadyChecked(m_graph, m_node, m_state.forNode(edge)))
- return;
-
- speculate(
- BadIndexingType, jsValueValue(cell), 0,
- m_out.bitNot(isArrayType(cell, m_node->arrayMode())));
- }
-
- void compileGetArrayLength()
- {
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous: {
- setInt32(m_out.load32(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
- return;
- }
-
- case Array::String: {
- LValue string = lowCell(m_node->child1());
- setInt32(m_out.load32(string, m_heaps.JSString_length));
- return;
- }
-
- default:
- if (isTypedView(m_node->arrayMode().typedArrayType())) {
- setInt32(
- m_out.load32(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return;
- }
- }
-
- void compileCheckInBounds()
- {
- speculate(
- OutOfBounds, noValue(), 0,
- m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
- }
-
- void compileGetByVal()
- {
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- case Array::Contiguous: {
- LValue index = lowInt32(m_node->child2());
- LValue storage = lowStorage(m_node->child3());
-
- IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
- m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
-
- if (m_node->arrayMode().isInBounds()) {
- LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
- speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
- setJSValue(result);
- return;
- }
-
- LValue base = lowCell(m_node->child1());
-
- LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous fast case"));
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous continuation"));
-
- m_out.branch(
- m_out.aboveOrEqual(
- index, m_out.load32(storage, m_heaps.Butterfly_publicLength)),
- slowCase, fastCase);
-
- LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
-
- ValueFromBlock fastResult = m_out.anchor(
- m_out.load64(baseIndex(heap, storage, index, m_node->child2())));
- m_out.branch(m_out.isZero64(fastResult.value()), slowCase, continuation);
-
- m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(
- vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
- return;
- }
-
- case Array::Double: {
- LValue index = lowInt32(m_node->child2());
- LValue storage = lowStorage(m_node->child3());
-
- IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
-
- if (m_node->arrayMode().isInBounds()) {
- LValue result = m_out.loadDouble(
- baseIndex(heap, storage, index, m_node->child2()));
-
- if (!m_node->arrayMode().isSaneChain()) {
- speculate(
- LoadFromHole, noValue(), 0,
- m_out.doubleNotEqualOrUnordered(result, result));
- }
- setDouble(result);
- break;
- }
-
- LValue base = lowCell(m_node->child1());
-
- LBasicBlock inBounds = FTL_NEW_BLOCK(m_out, ("GetByVal double in bounds"));
- LBasicBlock boxPath = FTL_NEW_BLOCK(m_out, ("GetByVal double boxing"));
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal double slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal double continuation"));
-
- m_out.branch(
- m_out.aboveOrEqual(
- index, m_out.load32(storage, m_heaps.Butterfly_publicLength)),
- slowCase, inBounds);
-
- LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
- LValue doubleValue = m_out.loadDouble(
- baseIndex(heap, storage, index, m_node->child2()));
- m_out.branch(
- m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), slowCase, boxPath);
-
- m_out.appendTo(boxPath, slowCase);
- ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
- m_out.jump(continuation);
-
- m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(
- vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
- return;
- }
-
- case Array::Generic: {
- setJSValue(vmCall(
- m_out.operation(operationGetByVal), m_callFrame,
- lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
- return;
- }
-
- case Array::String: {
- compileStringCharAt();
- return;
- }
-
- default: {
- LValue index = lowInt32(m_node->child2());
- LValue storage = lowStorage(m_node->child3());
-
- TypedArrayType type = m_node->arrayMode().typedArrayType();
-
- if (isTypedView(type)) {
- TypedPointer pointer = TypedPointer(
- m_heaps.typedArrayProperties,
- m_out.add(
- storage,
- m_out.shl(
- m_out.zeroExt(index, m_out.intPtr),
- m_out.constIntPtr(logElementSize(type)))));
-
- if (isInt(type)) {
- LValue result;
- switch (elementSize(type)) {
- case 1:
- result = m_out.load8(pointer);
- break;
- case 2:
- result = m_out.load16(pointer);
- break;
- case 4:
- result = m_out.load32(pointer);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- if (elementSize(type) < 4) {
- if (isSigned(type))
- result = m_out.signExt(result, m_out.int32);
- else
- result = m_out.zeroExt(result, m_out.int32);
- setInt32(result);
- return;
- }
-
- if (isSigned(type)) {
- setInt32(result);
- return;
- }
-
- if (m_node->shouldSpeculateInt32()) {
- speculate(
- Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
- setInt32(result);
- return;
- }
-
- setDouble(m_out.unsignedToFP(result, m_out.doubleType));
- return;
- }
-
- ASSERT(isFloat(type));
-
- LValue result;
- switch (type) {
- case TypeFloat32:
- result = m_out.fpCast(m_out.loadFloat(pointer), m_out.doubleType);
- break;
- case TypeFloat64:
- result = m_out.loadDouble(pointer);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- result = m_out.select(
- m_out.doubleEqual(result, result), result, m_out.constDouble(QNaN));
- setDouble(result);
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return;
- } }
- }
-
- void compilePutByVal()
- {
- Edge child1 = m_graph.varArgChild(m_node, 0);
- Edge child2 = m_graph.varArgChild(m_node, 1);
- Edge child3 = m_graph.varArgChild(m_node, 2);
- Edge child4 = m_graph.varArgChild(m_node, 3);
-
- switch (m_node->arrayMode().type()) {
- case Array::Generic: {
- V_JITOperation_EJJJ operation;
- if (m_node->op() == PutByValDirect) {
- if (m_graph.isStrictModeFor(m_node->codeOrigin))
- operation = operationPutByValDirectStrict;
- else
- operation = operationPutByValDirectNonStrict;
- } else {
- if (m_graph.isStrictModeFor(m_node->codeOrigin))
- operation = operationPutByValStrict;
- else
- operation = operationPutByValNonStrict;
- }
-
- vmCall(
- m_out.operation(operation), m_callFrame,
- lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
- return;
- }
-
- default:
- break;
- }
-
- LValue base = lowCell(child1);
- LValue index = lowInt32(child2);
- LValue storage = lowStorage(child4);
-
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous: {
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal continuation"));
- LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
-
- switch (m_node->arrayMode().type()) {
- case Array::Int32:
- case Array::Contiguous: {
- LValue value = lowJSValue(child3, ManualOperandSpeculation);
-
- if (m_node->arrayMode().type() == Array::Int32)
- FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32, isNotInt32(value));
-
- TypedPointer elementPointer = m_out.baseIndex(
- m_node->arrayMode().type() == Array::Int32 ?
- m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(child2).m_value);
-
- if (m_node->op() == PutByValAlias) {
- m_out.store64(value, elementPointer);
- break;
- }
-
- contiguousPutByValOutOfBounds(
- codeBlock()->isStrictMode()
- ? operationPutByValBeyondArrayBoundsStrict
- : operationPutByValBeyondArrayBoundsNonStrict,
- base, storage, index, value, continuation);
-
- m_out.store64(value, elementPointer);
- break;
- }
-
- case Array::Double: {
- LValue value = lowDouble(child3);
-
- FTL_TYPE_CHECK(
- doubleValue(value), child3, SpecFullRealNumber,
- m_out.doubleNotEqualOrUnordered(value, value));
-
- TypedPointer elementPointer = m_out.baseIndex(
- m_heaps.indexedDoubleProperties,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(child2).m_value);
-
- if (m_node->op() == PutByValAlias) {
- m_out.storeDouble(value, elementPointer);
- break;
- }
-
- contiguousPutByValOutOfBounds(
- codeBlock()->isStrictMode()
- ? operationPutDoubleByValBeyondArrayBoundsStrict
- : operationPutDoubleByValBeyondArrayBoundsNonStrict,
- base, storage, index, value, continuation);
-
- m_out.storeDouble(value, elementPointer);
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- m_out.jump(continuation);
- m_out.appendTo(continuation, outerLastNext);
- return;
- }
-
- default:
- TypedArrayType type = m_node->arrayMode().typedArrayType();
-
- if (isTypedView(type)) {
- TypedPointer pointer = TypedPointer(
- m_heaps.typedArrayProperties,
- m_out.add(
- storage,
- m_out.shl(
- m_out.zeroExt(index, m_out.intPtr),
- m_out.constIntPtr(logElementSize(type)))));
-
- if (isInt(type)) {
- LValue intValue;
- switch (child3.useKind()) {
- case MachineIntUse:
- case Int32Use: {
- if (child3.useKind() == Int32Use)
- intValue = lowInt32(child3);
- else
- intValue = m_out.castToInt32(lowStrictInt52(child3));
-
- if (isClamped(type)) {
- ASSERT(elementSize(type) == 1);
-
- LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp atLeastZero"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp continuation"));
-
- Vector<ValueFromBlock, 2> intValues;
- intValues.append(m_out.anchor(m_out.int32Zero));
- m_out.branch(
- m_out.lessThan(intValue, m_out.int32Zero),
- continuation, atLeastZero);
-
- LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
-
- intValues.append(m_out.anchor(m_out.select(
- m_out.greaterThan(intValue, m_out.constInt32(255)),
- m_out.constInt32(255),
- intValue)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- intValue = m_out.phi(m_out.int32, intValues);
- }
- break;
- }
-
- case NumberUse: {
- LValue doubleValue = lowDouble(child3);
-
- if (isClamped(type)) {
- ASSERT(elementSize(type) == 1);
-
- LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp atLeastZero"));
- LBasicBlock withinRange = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp withinRange"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp continuation"));
-
- Vector<ValueFromBlock, 3> intValues;
- intValues.append(m_out.anchor(m_out.int32Zero));
- m_out.branch(
- m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
- continuation, atLeastZero);
-
- LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
- intValues.append(m_out.anchor(m_out.constInt32(255)));
- m_out.branch(
- m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
- continuation, withinRange);
-
- m_out.appendTo(withinRange, continuation);
- intValues.append(m_out.anchor(m_out.fpToInt32(doubleValue)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- intValue = m_out.phi(m_out.int32, intValues);
- } else
- intValue = doubleToInt32(doubleValue);
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- switch (elementSize(type)) {
- case 1:
- m_out.store8(m_out.intCast(intValue, m_out.int8), pointer);
- break;
- case 2:
- m_out.store16(m_out.intCast(intValue, m_out.int16), pointer);
- break;
- case 4:
- m_out.store32(intValue, pointer);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- return;
- }
-
- ASSERT(isFloat(type));
-
- LValue value = lowDouble(child3);
- switch (type) {
- case TypeFloat32:
- m_out.storeFloat(m_out.fpCast(value, m_out.floatType), pointer);
- break;
- case TypeFloat64:
- m_out.storeDouble(value, pointer);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- void compileNewObject()
- {
- Structure* structure = m_node->structure();
- size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
- MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize);
-
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("NewObject slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewObject continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
-
- ValueFromBlock fastResult = m_out.anchor(allocateObject(
- m_out.constIntPtr(allocator), m_out.constIntPtr(structure), m_out.intPtrZero, slowPath));
-
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowResult = m_out.anchor(vmCall(
- m_out.operation(operationNewObject), m_callFrame, m_out.constIntPtr(structure)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
- }
-
- void compileNewArray()
- {
- // First speculate appropriately on all of the children. Do this unconditionally up here
- // because some of the slow paths may otherwise forget to do it. It's sort of arguable
- // that doing the speculations up here might be unprofitable for RA - so we can consider
- // sinking this to below the allocation fast path if we find that this has a lot of
- // register pressure.
- for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
- speculate(m_graph.varArgChild(m_node, operandIndex));
-
- JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->codeOrigin);
- Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
- m_node->indexingType());
-
- RELEASE_ASSERT(structure->indexingType() == m_node->indexingType());
-
- if (!globalObject->isHavingABadTime() && !hasArrayStorage(m_node->indexingType())) {
- unsigned numElements = m_node->numChildren();
-
- ArrayValues arrayValues = allocateJSArray(structure, numElements);
-
- for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
- Edge edge = m_graph.varArgChild(m_node, operandIndex);
-
- switch (m_node->indexingType()) {
- case ALL_BLANK_INDEXING_TYPES:
- case ALL_UNDECIDED_INDEXING_TYPES:
- CRASH();
- break;
-
- case ALL_DOUBLE_INDEXING_TYPES:
- m_out.storeDouble(
- lowDouble(edge),
- arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
- break;
-
- case ALL_INT32_INDEXING_TYPES:
- case ALL_CONTIGUOUS_INDEXING_TYPES:
- m_out.store64(
- lowJSValue(edge, ManualOperandSpeculation),
- arrayValues.butterfly,
- m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
- break;
-
- default:
- CRASH();
- }
- }
-
- setJSValue(arrayValues.array);
- return;
- }
-
- if (!m_node->numChildren()) {
- setJSValue(vmCall(
- m_out.operation(operationNewEmptyArray), m_callFrame,
- m_out.constIntPtr(structure)));
- return;
- }
-
- size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
- ASSERT(scratchSize);
- ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
- EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
-
- for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
- Edge edge = m_graph.varArgChild(m_node, operandIndex);
- m_out.store64(
- lowJSValue(edge, ManualOperandSpeculation),
- m_out.absolute(buffer + operandIndex));
- }
-
- m_out.storePtr(
- m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->activeLengthPtr()));
-
- LValue result = vmCall(
- m_out.operation(operationNewArray), m_callFrame,
- m_out.constIntPtr(structure), m_out.constIntPtr(buffer),
- m_out.constIntPtr(m_node->numChildren()));
-
- m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->activeLengthPtr()));
-
- setJSValue(result);
- }
-
- void compileNewArrayBuffer()
- {
- JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->codeOrigin);
- Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(
- m_node->indexingType());
-
- RELEASE_ASSERT(structure->indexingType() == m_node->indexingType());
-
- if (!globalObject->isHavingABadTime() && !hasArrayStorage(m_node->indexingType())) {
- unsigned numElements = m_node->numConstants();
-
- ArrayValues arrayValues = allocateJSArray(structure, numElements);
-
- JSValue* data = codeBlock()->constantBuffer(m_node->startConstant());
- for (unsigned index = 0; index < m_node->numConstants(); ++index) {
- int64_t value;
- if (hasDouble(m_node->indexingType()))
- value = bitwise_cast<int64_t>(data[index].asNumber());
- else
- value = JSValue::encode(data[index]);
-
- m_out.store64(
- m_out.constInt64(value),
- arrayValues.butterfly,
- m_heaps.forIndexingType(m_node->indexingType())->at(index));
- }
-
- setJSValue(arrayValues.array);
- return;
- }
-
- setJSValue(vmCall(
- m_out.operation(operationNewArrayBuffer), m_callFrame,
- m_out.constIntPtr(structure), m_out.constIntPtr(m_node->startConstant()),
- m_out.constIntPtr(m_node->numConstants())));
- }
-
- void compileAllocatePropertyStorage()
- {
- StructureTransitionData& data = m_node->structureTransitionData();
-
- LValue object = lowCell(m_node->child1());
-
- if (data.previousStructure->couldHaveIndexingHeader()) {
- setStorage(vmCall(
- m_out.operation(
- operationReallocateButterflyToHavePropertyStorageWithInitialCapacity),
- m_callFrame, object));
- return;
- }
-
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("AllocatePropertyStorage slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("AllocatePropertyStorage continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
-
- LValue endOfStorage = allocateBasicStorageAndGetEnd(
- m_out.constIntPtr(initialOutOfLineCapacity * sizeof(JSValue)), slowPath);
-
- ValueFromBlock fastButterfly = m_out.anchor(
- m_out.add(m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage));
-
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowButterfly = m_out.anchor(vmCall(
- m_out.operation(operationAllocatePropertyStorageWithInitialCapacity), m_callFrame));
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- LValue result = m_out.phi(m_out.intPtr, fastButterfly, slowButterfly);
- m_out.storePtr(result, object, m_heaps.JSObject_butterfly);
-
- setStorage(result);
- }
-
- void compileStringCharAt()
- {
- LValue base = lowCell(m_node->child1());
- LValue index = lowInt32(m_node->child2());
- LValue storage = lowStorage(m_node->child3());
-
- LBasicBlock fastPath = FTL_NEW_BLOCK(m_out, ("GetByVal String fast path"));
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("GetByVal String slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal String continuation"));
-
- m_out.branch(
- m_out.aboveOrEqual(
- index, m_out.load32(base, m_heaps.JSString_length)),
- slowPath, fastPath);
-
- LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
-
- LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
-
- LBasicBlock is8Bit = FTL_NEW_BLOCK(m_out, ("GetByVal String 8-bit case"));
- LBasicBlock is16Bit = FTL_NEW_BLOCK(m_out, ("GetByVal String 16-bit case"));
- LBasicBlock bitsContinuation = FTL_NEW_BLOCK(m_out, ("GetByVal String bitness continuation"));
- LBasicBlock bigCharacter = FTL_NEW_BLOCK(m_out, ("GetByVal String big character"));
-
- m_out.branch(
- m_out.testIsZero32(
- m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
- m_out.constInt32(StringImpl::flagIs8Bit())),
- is16Bit, is8Bit);
-
- m_out.appendTo(is8Bit, is16Bit);
-
- ValueFromBlock char8Bit = m_out.anchor(m_out.zeroExt(
- m_out.load8(m_out.baseIndex(
- m_heaps.characters8,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
- m_out.int32));
- m_out.jump(bitsContinuation);
-
- m_out.appendTo(is16Bit, bigCharacter);
-
- ValueFromBlock char16Bit = m_out.anchor(m_out.zeroExt(
- m_out.load16(m_out.baseIndex(
- m_heaps.characters16,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
- m_out.int32));
- m_out.branch(m_out.aboveOrEqual(char16Bit.value(), m_out.constInt32(0x100)), bigCharacter, bitsContinuation);
-
- m_out.appendTo(bigCharacter, bitsContinuation);
-
- Vector<ValueFromBlock, 4> results;
- results.append(m_out.anchor(vmCall(
- m_out.operation(operationSingleCharacterString),
- m_callFrame, char16Bit.value())));
- m_out.jump(continuation);
-
- m_out.appendTo(bitsContinuation, slowPath);
-
- LValue character = m_out.phi(m_out.int32, char8Bit, char16Bit);
-
- LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
-
- results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
- m_heaps.singleCharacterStrings, smallStrings,
- m_out.zeroExt(character, m_out.intPtr)))));
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
-
- if (m_node->arrayMode().isInBounds()) {
- speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
- results.append(m_out.anchor(m_out.intPtrZero));
- } else {
- JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->codeOrigin);
-
- if (globalObject->stringPrototypeChainIsSane()) {
- LBasicBlock negativeIndex = FTL_NEW_BLOCK(m_out, ("GetByVal String negative index"));
-
- results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
- m_out.branch(m_out.lessThan(index, m_out.int32Zero), negativeIndex, continuation);
-
- m_out.appendTo(negativeIndex, continuation);
- }
-
- results.append(m_out.anchor(vmCall(
- m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
- }
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setJSValue(m_out.phi(m_out.int64, results));
- }
-
- void compileStringCharCodeAt()
- {
- LBasicBlock is8Bit = FTL_NEW_BLOCK(m_out, ("StringCharCodeAt 8-bit case"));
- LBasicBlock is16Bit = FTL_NEW_BLOCK(m_out, ("StringCharCodeAt 16-bit case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("StringCharCodeAt continuation"));
-
- LValue base = lowCell(m_node->child1());
- LValue index = lowInt32(m_node->child2());
- LValue storage = lowStorage(m_node->child3());
-
- speculate(
- Uncountable, noValue(), 0,
- m_out.aboveOrEqual(index, m_out.load32(base, m_heaps.JSString_length)));
-
- LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
-
- m_out.branch(
- m_out.testIsZero32(
- m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
- m_out.constInt32(StringImpl::flagIs8Bit())),
- is16Bit, is8Bit);
-
- LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
-
- ValueFromBlock char8Bit = m_out.anchor(m_out.zeroExt(
- m_out.load8(m_out.baseIndex(
- m_heaps.characters8,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
- m_out.int32));
- m_out.jump(continuation);
-
- m_out.appendTo(is16Bit, continuation);
-
- ValueFromBlock char16Bit = m_out.anchor(m_out.zeroExt(
- m_out.load16(m_out.baseIndex(
- m_heaps.characters16,
- storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(m_node->child2()).m_value)),
- m_out.int32));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- setInt32(m_out.phi(m_out.int32, char8Bit, char16Bit));
- }
-
- void compileGetByOffset()
- {
- StorageAccessData& data =
- m_graph.m_storageAccessData[m_node->storageAccessDataIndex()];
-
- setJSValue(
- m_out.load64(
- m_out.address(
- m_heaps.properties[data.identifierNumber],
- lowStorage(m_node->child1()),
- offsetRelativeToBase(data.offset))));
- }
-
- void compilePutByOffset()
- {
- StorageAccessData& data =
- m_graph.m_storageAccessData[m_node->storageAccessDataIndex()];
-
- m_out.store64(
- lowJSValue(m_node->child3()),
- m_out.address(
- m_heaps.properties[data.identifierNumber],
- lowStorage(m_node->child1()),
- offsetRelativeToBase(data.offset)));
- }
-
- void compileGetGlobalVar()
- {
- setJSValue(m_out.load64(m_out.absolute(m_node->registerPointer())));
- }
-
- void compilePutGlobalVar()
- {
- m_out.store64(
- lowJSValue(m_node->child1()), m_out.absolute(m_node->registerPointer()));
- }
-
- void compileNotifyWrite()
- {
- VariableWatchpointSet* set = m_node->variableWatchpointSet();
-
- LValue value = lowJSValue(m_node->child1());
-
- LBasicBlock isNotInvalidated = FTL_NEW_BLOCK(m_out, ("NotifyWrite not invalidated case"));
- LBasicBlock isClear = FTL_NEW_BLOCK(m_out, ("NotifyWrite clear case"));
- LBasicBlock isWatched = FTL_NEW_BLOCK(m_out, ("NotifyWrite watched case"));
- LBasicBlock invalidate = FTL_NEW_BLOCK(m_out, ("NotifyWrite invalidate case"));
- LBasicBlock invalidateFast = FTL_NEW_BLOCK(m_out, ("NotifyWrite invalidate fast case"));
- LBasicBlock invalidateSlow = FTL_NEW_BLOCK(m_out, ("NotifyWrite invalidate slow case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NotifyWrite continuation"));
-
- LValue state = m_out.load8(m_out.absolute(set->addressOfState()));
-
- m_out.branch(
- m_out.equal(state, m_out.constInt8(IsInvalidated)),
- continuation, isNotInvalidated);
-
- LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, isClear);
-
- LValue isClearValue;
- if (set->state() == ClearWatchpoint)
- isClearValue = m_out.equal(state, m_out.constInt8(ClearWatchpoint));
- else
- isClearValue = m_out.booleanFalse;
- m_out.branch(isClearValue, isClear, isWatched);
-
- m_out.appendTo(isClear, isWatched);
-
- m_out.store64(value, m_out.absolute(set->addressOfInferredValue()));
- m_out.store8(m_out.constInt8(IsWatched), m_out.absolute(set->addressOfState()));
- m_out.jump(continuation);
-
- m_out.appendTo(isWatched, invalidate);
-
- m_out.branch(
- m_out.equal(value, m_out.load64(m_out.absolute(set->addressOfInferredValue()))),
- continuation, invalidate);
-
- m_out.appendTo(invalidate, invalidateFast);
-
- m_out.branch(
- m_out.notZero8(m_out.load8(m_out.absolute(set->addressOfSetIsNotEmpty()))),
- invalidateSlow, invalidateFast);
-
- m_out.appendTo(invalidateFast, invalidateSlow);
-
- m_out.store64(
- m_out.constInt64(JSValue::encode(JSValue())),
- m_out.absolute(set->addressOfInferredValue()));
- m_out.store8(m_out.constInt8(IsInvalidated), m_out.absolute(set->addressOfState()));
- m_out.jump(continuation);
-
- m_out.appendTo(invalidateSlow, continuation);
-
- vmCall(m_out.operation(operationInvalidate), m_callFrame, m_out.constIntPtr(set));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- }
-
- void compileGetMyScope()
- {
- setJSValue(m_out.loadPtr(addressFor(
- m_node->codeOrigin.stackOffset() + JSStack::ScopeChain)));
- }
-
- void compileSkipScope()
- {
- setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
- }
-
- void compileGetClosureRegisters()
- {
- if (WriteBarrierBase<Unknown>* registers = m_graph.tryGetRegisters(m_node->child1().node())) {
- setStorage(m_out.constIntPtr(registers));
- return;
- }
-
- setStorage(m_out.loadPtr(
- lowCell(m_node->child1()), m_heaps.JSVariableObject_registers));
- }
-
- void compileGetClosureVar()
- {
- setJSValue(m_out.load64(
- addressFor(lowStorage(m_node->child1()), m_node->varNumber())));
- }
-
- void compilePutClosureVar()
- {
- m_out.store64(
- lowJSValue(m_node->child3()),
- addressFor(lowStorage(m_node->child2()), m_node->varNumber()));
- }
-
- void compileCompareEq()
- {
- if (m_node->isBinaryUseKind(Int32Use)
- || m_node->isBinaryUseKind(MachineIntUse)
- || m_node->isBinaryUseKind(NumberUse)
- || m_node->isBinaryUseKind(ObjectUse)) {
- compileCompareStrictEq();
- return;
- }
-
- if (m_node->isBinaryUseKind(UntypedUse)) {
- nonSpeculativeCompare(LLVMIntEQ, operationCompareEq);
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void compileCompareEqConstant()
- {
- ASSERT(m_graph.valueOfJSConstant(m_node->child2().node()).isNull());
- setBoolean(
- equalNullOrUndefined(
- m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined));
- }
-
- void compileCompareStrictEq()
- {
- if (m_node->isBinaryUseKind(Int32Use)) {
- setBoolean(
- m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
- return;
- }
-
- if (m_node->isBinaryUseKind(MachineIntUse)) {
- Int52Kind kind;
- LValue left = lowWhicheverInt52(m_node->child1(), kind);
- LValue right = lowInt52(m_node->child2(), kind);
- setBoolean(m_out.equal(left, right));
- return;
- }
-
- if (m_node->isBinaryUseKind(NumberUse)) {
- setBoolean(
- m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
- return;
- }
-
- if (m_node->isBinaryUseKind(ObjectUse)) {
- setBoolean(
- m_out.equal(
- lowNonNullObject(m_node->child1()),
- lowNonNullObject(m_node->child2())));
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void compileCompareStrictEqConstant()
- {
- JSValue constant = m_graph.valueOfJSConstant(m_node->child2().node());
-
- if (constant.isUndefinedOrNull()
- && !masqueradesAsUndefinedWatchpointIsStillValid()) {
- if (constant.isNull()) {
- setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNull));
- return;
- }
-
- ASSERT(constant.isUndefined());
- setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
- return;
- }
-
- setBoolean(
- m_out.equal(
- lowJSValue(m_node->child1()),
- m_out.constInt64(JSValue::encode(constant))));
- }
-
- void compileCompareLess()
- {
- compare(LLVMIntSLT, LLVMRealOLT, operationCompareLess);
- }
-
- void compileCompareLessEq()
- {
- compare(LLVMIntSLE, LLVMRealOLE, operationCompareLessEq);
- }
-
- void compileCompareGreater()
- {
- compare(LLVMIntSGT, LLVMRealOGT, operationCompareGreater);
- }
-
- void compileCompareGreaterEq()
- {
- compare(LLVMIntSGE, LLVMRealOGE, operationCompareGreaterEq);
- }
-
- void compileLogicalNot()
- {
- setBoolean(m_out.bitNot(boolify(m_node->child1())));
- }
-
- void compileCallOrConstruct()
- {
- // FIXME: This is unacceptably slow.
- // https://bugs.webkit.org/show_bug.cgi?id=113621
-
- J_JITOperation_E function =
- m_node->op() == Call ? operationFTLCall : operationFTLConstruct;
-
- int dummyThisArgument = m_node->op() == Call ? 0 : 1;
-
- int numPassedArgs = m_node->numChildren() - 1;
-
- LValue calleeFrame = m_out.add(
- m_callFrame,
- m_out.constIntPtr(sizeof(Register) * virtualRegisterForLocal(m_graph.frameRegisterCount()).offset()));
-
- m_out.store32(
- m_out.constInt32(numPassedArgs + dummyThisArgument),
- payloadFor(calleeFrame, JSStack::ArgumentCount));
- m_out.store64(m_callFrame, calleeFrame, m_heaps.CallFrame_callerFrame);
- m_out.store64(
- lowJSValue(m_graph.varArgChild(m_node, 0)),
- addressFor(calleeFrame, JSStack::Callee));
-
- for (int i = 0; i < numPassedArgs; ++i) {
- m_out.store64(
- lowJSValue(m_graph.varArgChild(m_node, 1 + i)),
- addressFor(calleeFrame, virtualRegisterForArgument(i + dummyThisArgument).offset()));
- }
-
- setJSValue(vmCall(m_out.operation(function), calleeFrame));
- }
-
- void compileJump()
- {
- m_out.jump(lowBlock(m_node->takenBlock()));
- }
-
- void compileBranch()
- {
- m_out.branch(
- boolify(m_node->child1()),
- lowBlock(m_node->takenBlock()),
- lowBlock(m_node->notTakenBlock()));
- }
-
- void compileSwitch()
- {
- SwitchData* data = m_node->switchData();
- switch (data->kind) {
- case SwitchImm: {
- Vector<ValueFromBlock, 2> intValues;
- LBasicBlock switchOnInts = FTL_NEW_BLOCK(m_out, ("Switch/SwitchImm int case"));
-
- LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
-
- switch (m_node->child1().useKind()) {
- case Int32Use: {
- intValues.append(m_out.anchor(lowInt32(m_node->child1())));
- m_out.jump(switchOnInts);
- break;
- }
-
- case UntypedUse: {
- LBasicBlock isInt = FTL_NEW_BLOCK(m_out, ("Switch/SwitchImm is int"));
- LBasicBlock isNotInt = FTL_NEW_BLOCK(m_out, ("Switch/SwitchImm is not int"));
- LBasicBlock isDouble = FTL_NEW_BLOCK(m_out, ("Switch/SwitchImm is double"));
-
- LValue boxedValue = lowJSValue(m_node->child1());
- m_out.branch(isNotInt32(boxedValue), isNotInt, isInt);
-
- LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
-
- intValues.append(m_out.anchor(unboxInt32(boxedValue)));
- m_out.jump(switchOnInts);
-
- m_out.appendTo(isNotInt, isDouble);
- m_out.branch(
- isCellOrMisc(boxedValue), lowBlock(data->fallThrough), isDouble);
-
- m_out.appendTo(isDouble, innerLastNext);
- LValue doubleValue = unboxDouble(boxedValue);
- LValue intInDouble = m_out.fpToInt32(doubleValue);
- intValues.append(m_out.anchor(intInDouble));
- m_out.branch(
- m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
- switchOnInts, lowBlock(data->fallThrough));
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- m_out.appendTo(switchOnInts, lastNext);
- buildSwitch(data, m_out.int32, m_out.phi(m_out.int32, intValues));
- return;
- }
-
- case SwitchChar: {
- LValue stringValue;
-
- switch (m_node->child1().useKind()) {
- case StringUse: {
- stringValue = lowString(m_node->child1());
- break;
- }
-
- case UntypedUse: {
- LValue unboxedValue = lowJSValue(m_node->child1());
-
- LBasicBlock isCellCase = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar is cell"));
- LBasicBlock isStringCase = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar is string"));
-
- m_out.branch(
- isNotCell(unboxedValue), lowBlock(data->fallThrough), isCellCase);
-
- LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
- LValue cellValue = unboxedValue;
- m_out.branch(isNotString(cellValue), lowBlock(data->fallThrough), isStringCase);
-
- m_out.appendTo(isStringCase, lastNext);
- stringValue = cellValue;
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- LBasicBlock lengthIs1 = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar length is 1"));
- LBasicBlock needResolution = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar resolution"));
- LBasicBlock resolved = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar resolved"));
- LBasicBlock is8Bit = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar 8bit"));
- LBasicBlock is16Bit = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar 16bit"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Switch/SwitchChar continuation"));
-
- m_out.branch(
- m_out.notEqual(
- m_out.load32(stringValue, m_heaps.JSString_length),
- m_out.int32One),
- lowBlock(data->fallThrough), lengthIs1);
-
- LBasicBlock lastNext = m_out.appendTo(lengthIs1, needResolution);
- Vector<ValueFromBlock, 2> values;
- LValue fastValue = m_out.loadPtr(stringValue, m_heaps.JSString_value);
- values.append(m_out.anchor(fastValue));
- m_out.branch(m_out.isNull(fastValue), needResolution, resolved);
-
- m_out.appendTo(needResolution, resolved);
- values.append(m_out.anchor(
- vmCall(m_out.operation(operationResolveRope), m_callFrame, stringValue)));
- m_out.jump(resolved);
-
- m_out.appendTo(resolved, is8Bit);
- LValue value = m_out.phi(m_out.intPtr, values);
- LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
- m_out.branch(
- m_out.testNonZero32(
- m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
- m_out.constInt32(StringImpl::flagIs8Bit())),
- is8Bit, is16Bit);
-
- Vector<ValueFromBlock, 2> characters;
- m_out.appendTo(is8Bit, is16Bit);
- characters.append(m_out.anchor(
- m_out.zeroExt(m_out.load8(characterData, m_heaps.characters8[0]), m_out.int16)));
- m_out.jump(continuation);
-
- m_out.appendTo(is16Bit, continuation);
- characters.append(m_out.anchor(m_out.load16(characterData, m_heaps.characters16[0])));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- buildSwitch(data, m_out.int16, m_out.phi(m_out.int16, characters));
- return;
- }
-
- case SwitchString:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void compileReturn()
- {
- // FIXME: have a real epilogue when we switch to using our calling convention.
- // https://bugs.webkit.org/show_bug.cgi?id=113621
- m_out.ret(lowJSValue(m_node->child1()));
- }
-
- void compileForceOSRExit()
- {
- terminate(InadequateCoverage);
- }
-
- void compileInvalidationPoint()
- {
- if (verboseCompilationEnabled())
- dataLog(" Invalidation point with availability: ", m_availability, "\n");
-
- m_ftlState.jitCode->osrExit.append(OSRExit(
- UncountableInvalidation, InvalidValueFormat, MethodOfGettingAValueProfile(),
- m_codeOriginForExitTarget, m_codeOriginForExitProfile,
- m_availability.numberOfArguments(), m_availability.numberOfLocals()));
- m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
-
- OSRExit& exit = m_ftlState.jitCode->osrExit.last();
- OSRExitCompilationInfo& info = m_ftlState.finalizer->osrExit.last();
-
- ExitArgumentList arguments;
-
- buildExitArguments(exit, arguments, FormattedValue(), exit.m_codeOrigin);
- callStackmap(exit, arguments);
-
- info.m_isInvalidationPoint = true;
- }
-
- TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge)
- {
- return m_out.baseIndex(
- heap, storage, m_out.zeroExt(index, m_out.intPtr),
- m_state.forNode(edge).m_value);
- }
-
- void compare(
- LIntPredicate intCondition, LRealPredicate realCondition,
- S_JITOperation_EJJ helperFunction)
- {
- if (m_node->isBinaryUseKind(Int32Use)) {
- LValue left = lowInt32(m_node->child1());
- LValue right = lowInt32(m_node->child2());
- setBoolean(m_out.icmp(intCondition, left, right));
- return;
- }
-
- if (m_node->isBinaryUseKind(MachineIntUse)) {
- Int52Kind kind;
- LValue left = lowWhicheverInt52(m_node->child1(), kind);
- LValue right = lowInt52(m_node->child2(), kind);
- setBoolean(m_out.icmp(intCondition, left, right));
- return;
- }
-
- if (m_node->isBinaryUseKind(NumberUse)) {
- LValue left = lowDouble(m_node->child1());
- LValue right = lowDouble(m_node->child2());
- setBoolean(m_out.fcmp(realCondition, left, right));
- return;
- }
-
- if (m_node->isBinaryUseKind(UntypedUse)) {
- nonSpeculativeCompare(intCondition, helperFunction);
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void nonSpeculativeCompare(LIntPredicate intCondition, S_JITOperation_EJJ helperFunction)
- {
- LValue left = lowJSValue(m_node->child1());
- LValue right = lowJSValue(m_node->child2());
-
- LBasicBlock leftIsInt = FTL_NEW_BLOCK(m_out, ("CompareEq untyped left is int"));
- LBasicBlock fastPath = FTL_NEW_BLOCK(m_out, ("CompareEq untyped fast path"));
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CompareEq untyped slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CompareEq untyped continuation"));
-
- m_out.branch(isNotInt32(left), slowPath, leftIsInt);
-
- LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
- m_out.branch(isNotInt32(right), slowPath, fastPath);
-
- m_out.appendTo(fastPath, slowPath);
- ValueFromBlock fastResult = m_out.anchor(
- m_out.icmp(intCondition, unboxInt32(left), unboxInt32(right)));
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
- m_out.operation(helperFunction), m_callFrame, left, right)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- setBoolean(m_out.phi(m_out.boolean, fastResult, slowResult));
- }
-
- LValue allocateCell(LValue allocator, LValue structure, LBasicBlock slowPath)
- {
- LBasicBlock success = FTL_NEW_BLOCK(m_out, ("object allocation success"));
-
- LValue result = m_out.loadPtr(
- allocator, m_heaps.MarkedAllocator_freeListHead);
-
- m_out.branch(m_out.notNull(result), success, slowPath);
-
- m_out.appendTo(success);
-
- m_out.storePtr(
- m_out.loadPtr(result, m_heaps.JSCell_freeListNext),
- allocator, m_heaps.MarkedAllocator_freeListHead);
-
- m_out.storePtr(structure, result, m_heaps.JSCell_structure);
-
- return result;
- }
-
- LValue allocateObject(
- LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
- {
- LValue result = allocateCell(allocator, structure, slowPath);
- m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
- return result;
- }
-
- template<typename ClassType>
- LValue allocateObject(LValue structure, LValue butterfly, LBasicBlock slowPath)
- {
- MarkedAllocator* allocator;
- size_t size = ClassType::allocationSize(0);
- if (ClassType::needsDestruction && ClassType::hasImmortalStructure)
- allocator = &vm().heap.allocatorForObjectWithImmortalStructureDestructor(size);
- else if (ClassType::needsDestruction)
- allocator = &vm().heap.allocatorForObjectWithNormalDestructor(size);
- else
- allocator = &vm().heap.allocatorForObjectWithoutDestructor(size);
- return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, slowPath);
- }
-
- // Returns a pointer to the end of the allocation.
- LValue allocateBasicStorageAndGetEnd(LValue size, LBasicBlock slowPath)
- {
- CopiedAllocator& allocator = vm().heap.storageAllocator();
-
- LBasicBlock success = FTL_NEW_BLOCK(m_out, ("storage allocation success"));
-
- LValue remaining = m_out.loadPtr(m_out.absolute(&allocator.m_currentRemaining));
- LValue newRemaining = m_out.sub(remaining, size);
-
- m_out.branch(m_out.lessThan(newRemaining, m_out.intPtrZero), slowPath, success);
-
- m_out.appendTo(success);
-
- m_out.storePtr(newRemaining, m_out.absolute(&allocator.m_currentRemaining));
- return m_out.sub(
- m_out.loadPtr(m_out.absolute(&allocator.m_currentPayloadEnd)), newRemaining);
- }
-
- struct ArrayValues {
- ArrayValues()
- : array(0)
- , butterfly(0)
- {
- }
-
- ArrayValues(LValue array, LValue butterfly)
- : array(array)
- , butterfly(butterfly)
- {
- }
-
- LValue array;
- LValue butterfly;
- };
- ArrayValues allocateJSArray(
- Structure* structure, unsigned numElements, LBasicBlock slowPath)
- {
- ASSERT(
- hasUndecided(structure->indexingType())
- || hasInt32(structure->indexingType())
- || hasDouble(structure->indexingType())
- || hasContiguous(structure->indexingType()));
-
- unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
-
- LValue endOfStorage = allocateBasicStorageAndGetEnd(
- m_out.constIntPtr(sizeof(JSValue) * vectorLength + sizeof(IndexingHeader)),
- slowPath);
-
- LValue butterfly = m_out.sub(
- endOfStorage, m_out.constIntPtr(sizeof(JSValue) * vectorLength));
-
- LValue object = allocateObject<JSArray>(
- m_out.constIntPtr(structure), butterfly, slowPath);
-
- m_out.store32(m_out.constInt32(numElements), butterfly, m_heaps.Butterfly_publicLength);
- m_out.store32(m_out.constInt32(vectorLength), butterfly, m_heaps.Butterfly_vectorLength);
-
- if (hasDouble(structure->indexingType())) {
- for (unsigned i = numElements; i < vectorLength; ++i) {
- m_out.store64(
- m_out.constInt64(bitwise_cast<int64_t>(QNaN)),
- butterfly, m_heaps.indexedDoubleProperties[i]);
- }
- }
-
- return ArrayValues(object, butterfly);
- }
-
- ArrayValues allocateJSArray(Structure* structure, unsigned numElements)
- {
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("JSArray allocation slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("JSArray allocation continuation"));
-
- LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
-
- ArrayValues fastValues = allocateJSArray(structure, numElements, slowPath);
- ValueFromBlock fastArray = m_out.anchor(fastValues.array);
- ValueFromBlock fastButterfly = m_out.anchor(fastValues.butterfly);
-
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
-
- ValueFromBlock slowArray = m_out.anchor(vmCall(
- m_out.operation(operationNewArrayWithSize), m_callFrame,
- m_out.constIntPtr(structure), m_out.constInt32(numElements)));
- ValueFromBlock slowButterfly = m_out.anchor(
- m_out.loadPtr(slowArray.value(), m_heaps.JSObject_butterfly));
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- return ArrayValues(
- m_out.phi(m_out.intPtr, fastArray, slowArray),
- m_out.phi(m_out.intPtr, fastButterfly, slowButterfly));
- }
-
- LValue typedArrayLength(Edge baseEdge, ArrayMode arrayMode, LValue base)
- {
- if (JSArrayBufferView* view = m_graph.tryGetFoldableView(baseEdge.node(), arrayMode))
- return m_out.constInt32(view->length());
- return m_out.load32(base, m_heaps.JSArrayBufferView_length);
- }
-
- LValue typedArrayLength(Edge baseEdge, ArrayMode arrayMode)
- {
- return typedArrayLength(baseEdge, arrayMode, lowCell(baseEdge));
- }
-
- LValue boolify(Edge edge)
- {
- switch (edge.useKind()) {
- case BooleanUse:
- return lowBoolean(m_node->child1());
- case Int32Use:
- return m_out.notZero32(lowInt32(m_node->child1()));
- case NumberUse:
- return m_out.doubleNotEqual(lowDouble(edge), m_out.doubleZero);
- case ObjectOrOtherUse:
- return m_out.bitNot(
- equalNullOrUndefined(
- edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
- ManualOperandSpeculation));
- case StringUse: {
- LValue stringValue = lowString(m_node->child1());
- LValue length = m_out.load32(stringValue, m_heaps.JSString_length);
- return m_out.notEqual(length, m_out.int32Zero);
- }
- case UntypedUse: {
- LValue value = lowJSValue(m_node->child1());
-
- LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped slow case"));
- LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("Boolify untyped fast case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Boolify untyped continuation"));
-
- m_out.branch(isNotBoolean(value), slowCase, fastCase);
-
- LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
- ValueFromBlock fastResult = m_out.anchor(unboxBoolean(value));
- m_out.jump(continuation);
-
- m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
- m_out.operation(operationConvertJSValueToBoolean), m_callFrame, value)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- return m_out.phi(m_out.boolean, fastResult, slowResult);
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
- }
-
- enum StringOrObjectMode {
- AllCellsAreFalse,
- CellCaseSpeculatesObject
- };
- enum EqualNullOrUndefinedMode {
- EqualNull,
- EqualUndefined,
- EqualNullOrUndefined,
- SpeculateNullOrUndefined
- };
- LValue equalNullOrUndefined(
- Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
- OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
- {
- bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
-
- LValue value = lowJSValue(edge, operandMode);
-
- LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined cell case"));
- LBasicBlock primitiveCase = FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined primitive case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined continuation"));
-
- m_out.branch(isNotCell(value), primitiveCase, cellCase);
-
- LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
-
- Vector<ValueFromBlock, 3> results;
-
- switch (cellMode) {
- case AllCellsAreFalse:
- break;
- case CellCaseSpeculatesObject:
- FTL_TYPE_CHECK(
- jsValueValue(value), edge, (~SpecCell) | SpecObject,
- m_out.equal(
- m_out.loadPtr(value, m_heaps.JSCell_structure),
- m_out.constIntPtr(vm().stringStructure.get())));
- break;
- }
-
- if (validWatchpoint) {
- results.append(m_out.anchor(m_out.booleanFalse));
- m_out.jump(continuation);
- } else {
- LBasicBlock masqueradesCase =
- FTL_NEW_BLOCK(m_out, ("EqualNullOrUndefined masquerades case"));
-
- LValue structure = m_out.loadPtr(value, m_heaps.JSCell_structure);
-
- results.append(m_out.anchor(m_out.booleanFalse));
-
- m_out.branch(
- m_out.testNonZero8(
- m_out.load8(structure, m_heaps.Structure_typeInfoFlags),
- m_out.constInt8(MasqueradesAsUndefined)),
- masqueradesCase, continuation);
-
- m_out.appendTo(masqueradesCase, primitiveCase);
-
- results.append(m_out.anchor(
- m_out.equal(
- m_out.constIntPtr(m_graph.globalObjectFor(m_node->codeOrigin)),
- m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
- m_out.jump(continuation);
- }
-
- m_out.appendTo(primitiveCase, continuation);
-
- LValue primitiveResult;
- switch (primitiveMode) {
- case EqualNull:
- primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
- break;
- case EqualUndefined:
- primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
- break;
- case EqualNullOrUndefined:
- primitiveResult = m_out.equal(
- m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
- m_out.constInt64(ValueNull));
- break;
- case SpeculateNullOrUndefined:
- FTL_TYPE_CHECK(
- jsValueValue(value), edge, SpecCell | SpecOther,
- m_out.notEqual(
- m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
- m_out.constInt64(ValueNull)));
- primitiveResult = m_out.booleanTrue;
- break;
- }
- results.append(m_out.anchor(primitiveResult));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- return m_out.phi(m_out.boolean, results);
- }
-
- template<typename FunctionType>
- void contiguousPutByValOutOfBounds(
- FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
- LBasicBlock continuation)
- {
- LValue isNotInBounds = m_out.aboveOrEqual(
- index, m_out.load32(storage, m_heaps.Butterfly_publicLength));
- if (!m_node->arrayMode().isInBounds()) {
- LBasicBlock notInBoundsCase =
- FTL_NEW_BLOCK(m_out, ("PutByVal not in bounds"));
- LBasicBlock performStore =
- FTL_NEW_BLOCK(m_out, ("PutByVal perform store"));
-
- m_out.branch(isNotInBounds, notInBoundsCase, performStore);
-
- LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
-
- LValue isOutOfBounds = m_out.aboveOrEqual(
- index, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
-
- if (!m_node->arrayMode().isOutOfBounds())
- speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
- else {
- LBasicBlock outOfBoundsCase =
- FTL_NEW_BLOCK(m_out, ("PutByVal out of bounds"));
- LBasicBlock holeCase =
- FTL_NEW_BLOCK(m_out, ("PutByVal hole case"));
-
- m_out.branch(isOutOfBounds, outOfBoundsCase, holeCase);
-
- LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
-
- vmCall(
- m_out.operation(slowPathFunction),
- m_callFrame, base, index, value);
-
- m_out.jump(continuation);
-
- m_out.appendTo(holeCase, innerLastNext);
- }
-
- m_out.store32(
- m_out.add(index, m_out.int32One),
- storage, m_heaps.Butterfly_publicLength);
-
- m_out.jump(performStore);
- m_out.appendTo(performStore, lastNext);
- }
- }
-
- void buildSwitch(SwitchData* data, LType type, LValue switchValue)
- {
- Vector<SwitchCase> cases;
- for (unsigned i = 0; i < data->cases.size(); ++i) {
- cases.append(SwitchCase(
- constInt(type, data->cases[i].value.switchLookupValue()),
- lowBlock(data->cases[i].target)));
- }
-
- m_out.switchInstruction(switchValue, cases, lowBlock(data->fallThrough));
- }
-
- LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
- {
- // FIXME: Optimize double-to-int conversions.
- // <rdar://problem/14938465>
-
- LBasicBlock greatEnough = FTL_NEW_BLOCK(m_out, ("doubleToInt32 greatEnough"));
- LBasicBlock withinRange = FTL_NEW_BLOCK(m_out, ("doubleToInt32 withinRange"));
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("doubleToInt32 slowPath"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("doubleToInt32 continuation"));
-
- Vector<ValueFromBlock, 2> results;
-
- m_out.branch(
- m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
- greatEnough, slowPath);
-
- LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
- m_out.branch(
- m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
- withinRange, slowPath);
-
- m_out.appendTo(withinRange, slowPath);
- LValue fastResult;
- if (isSigned)
- fastResult = m_out.fpToInt32(doubleValue);
- else
- fastResult = m_out.fpToUInt32(doubleValue);
- results.append(m_out.anchor(fastResult));
- m_out.jump(continuation);
-
- m_out.appendTo(slowPath, continuation);
- results.append(m_out.anchor(m_out.call(m_out.operation(toInt32), doubleValue)));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- return m_out.phi(m_out.int32, results);
- }
-
- LValue doubleToInt32(LValue doubleValue)
- {
- if (Output::hasSensibleDoubleToInt())
- return sensibleDoubleToInt32(doubleValue);
-
- double limit = pow(2, 31) - 1;
- return doubleToInt32(doubleValue, -limit, limit);
- }
-
- LValue sensibleDoubleToInt32(LValue doubleValue)
- {
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("sensible doubleToInt32 slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("sensible doubleToInt32 continuation"));
-
- ValueFromBlock fastResult = m_out.anchor(
- m_out.sensibleDoubleToInt(doubleValue));
- m_out.branch(
- m_out.equal(fastResult.value(), m_out.constInt32(0x80000000)),
- slowPath, continuation);
-
- LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
- ValueFromBlock slowResult = m_out.anchor(
- m_out.call(m_out.operation(toInt32), doubleValue));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- return m_out.phi(m_out.int32, fastResult, slowResult);
- }
-
- void speculate(
- ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
- {
- appendOSRExit(kind, lowValue, highValue, failCondition);
- }
-
- void terminate(ExitKind kind)
- {
- speculate(kind, noValue(), 0, m_out.booleanTrue);
- }
-
- void typeCheck(
- FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
- LValue failCondition)
- {
- appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition);
- }
-
- void appendTypeCheck(
- FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
- LValue failCondition)
- {
- if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
- return;
- ASSERT(mayHaveTypeCheck(highValue.useKind()));
- appendOSRExit(BadType, lowValue, highValue.node(), failCondition);
- m_interpreter.filter(highValue, typesPassedThrough);
- }
-
- LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
-
- if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
- if (!value.isInt32()) {
- terminate(Uncountable);
- return m_out.int32Zero;
- }
- return m_out.constInt32(value.asInt32());
- }
-
- LoweredNodeValue value = m_int32Values.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_strictInt52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToInt32(edge, value.value());
-
- value = m_int52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
-
- value = m_jsValueValues.get(edge.node());
- if (isValid(value)) {
- LValue boxedResult = value.value();
- FTL_TYPE_CHECK(
- jsValueValue(boxedResult), edge, SpecInt32, isNotInt32(boxedResult));
- LValue result = unboxInt32(boxedResult);
- setInt32(edge.node(), result);
- return result;
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecInt32));
- terminate(Uncountable);
- return m_out.int32Zero;
- }
-
- enum Int52Kind { StrictInt52, Int52 };
- LValue lowInt52(Edge edge, Int52Kind kind, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == MachineIntUse);
-
- if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
- if (!value.isMachineInt()) {
- terminate(Uncountable);
- return m_out.int64Zero;
- }
- int64_t result = value.asMachineInt();
- if (kind == Int52)
- result <<= JSValue::int52ShiftAmount;
- return m_out.constInt64(result);
- }
-
- LoweredNodeValue value;
-
- switch (kind) {
- case Int52:
- value = m_int52Values.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_strictInt52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToInt52(value.value());
- break;
-
- case StrictInt52:
- value = m_strictInt52Values.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_int52Values.get(edge.node());
- if (isValid(value))
- return int52ToStrictInt52(value.value());
- break;
- }
-
- value = m_int32Values.get(edge.node());
- if (isValid(value)) {
- return setInt52WithStrictValue(
- edge.node(), m_out.signExt(value.value(), m_out.int64), kind);
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecInt52));
-
- value = m_jsValueValues.get(edge.node());
- if (isValid(value)) {
- LValue boxedResult = value.value();
- FTL_TYPE_CHECK(
- jsValueValue(boxedResult), edge, SpecMachineInt, isNotInt32(boxedResult));
- return setInt52WithStrictValue(
- edge.node(), m_out.signExt(unboxInt32(boxedResult), m_out.int64), kind);
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecMachineInt));
- terminate(Uncountable);
- return m_out.int64Zero;
- }
-
- LValue lowInt52(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- return lowInt52(edge, Int52, mode);
- }
-
- LValue lowStrictInt52(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- return lowInt52(edge, StrictInt52, mode);
- }
-
- bool betterUseStrictInt52(Node* node)
- {
- return !isValid(m_int52Values.get(node));
- }
- bool betterUseStrictInt52(Edge edge)
- {
- return betterUseStrictInt52(edge.node());
- }
- template<typename T>
- Int52Kind bestInt52Kind(T node)
- {
- return betterUseStrictInt52(node) ? StrictInt52 : Int52;
- }
- Int52Kind opposite(Int52Kind kind)
- {
- switch (kind) {
- case Int52:
- return StrictInt52;
- case StrictInt52:
- return Int52;
- }
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- LValue lowWhicheverInt52(Edge edge, Int52Kind& kind, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- kind = bestInt52Kind(edge);
- return lowInt52(edge, kind, mode);
- }
-
- LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()));
-
- if (edge->op() == JSConstant) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
- if (!value.isCell()) {
- terminate(Uncountable);
- return m_out.intPtrZero;
- }
- return m_out.constIntPtr(value.asCell());
- }
-
- LoweredNodeValue value = m_jsValueValues.get(edge.node());
- if (isValid(value)) {
- LValue uncheckedValue = value.value();
- FTL_TYPE_CHECK(
- jsValueValue(uncheckedValue), edge, SpecCell, isNotCell(uncheckedValue));
- return uncheckedValue;
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecCell));
- terminate(Uncountable);
- return m_out.intPtrZero;
- }
-
- LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
-
- LValue result = lowCell(edge, mode);
- speculateObject(edge, result);
- return result;
- }
-
- LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse);
-
- LValue result = lowCell(edge, mode);
- speculateString(edge, result);
- return result;
- }
-
- LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
-
- LValue result = lowCell(edge, mode);
- speculateNonNullObject(edge, result);
- return result;
- }
-
- LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse);
-
- if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
- if (!value.isBoolean()) {
- terminate(Uncountable);
- return m_out.booleanFalse;
- }
- return m_out.constBool(value.asBoolean());
- }
-
- LoweredNodeValue value = m_booleanValues.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_jsValueValues.get(edge.node());
- if (isValid(value)) {
- LValue unboxedResult = value.value();
- FTL_TYPE_CHECK(
- jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
- LValue result = unboxBoolean(unboxedResult);
- setBoolean(edge.node(), result);
- return result;
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecBoolean));
- terminate(Uncountable);
- return m_out.booleanFalse;
- }
-
- LValue lowDouble(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isDouble(edge.useKind()));
-
- if (edge->hasConstant()) {
- JSValue value = m_graph.valueOfJSConstant(edge.node());
- if (!value.isNumber()) {
- terminate(Uncountable);
- return m_out.doubleZero;
- }
- return m_out.constDouble(value.asNumber());
- }
-
- LoweredNodeValue value = m_doubleValues.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_int32Values.get(edge.node());
- if (isValid(value)) {
- LValue result = m_out.intToDouble(value.value());
- setDouble(edge.node(), result);
- return result;
- }
-
- value = m_strictInt52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToDouble(edge, value.value());
-
- value = m_int52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToDouble(edge, int52ToStrictInt52(value.value()));
-
- value = m_jsValueValues.get(edge.node());
- if (isValid(value)) {
- LValue boxedResult = value.value();
-
- LBasicBlock intCase = FTL_NEW_BLOCK(m_out, ("Double unboxing int case"));
- LBasicBlock doubleCase = FTL_NEW_BLOCK(m_out, ("Double unboxing double case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Double unboxing continuation"));
-
- m_out.branch(isNotInt32(boxedResult), doubleCase, intCase);
-
- LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
-
- ValueFromBlock intToDouble = m_out.anchor(
- m_out.intToDouble(unboxInt32(boxedResult)));
- m_out.jump(continuation);
-
- m_out.appendTo(doubleCase, continuation);
-
- FTL_TYPE_CHECK(
- jsValueValue(boxedResult), edge, SpecFullNumber, isCellOrMisc(boxedResult));
-
- ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(boxedResult));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-
- LValue result = m_out.phi(m_out.doubleType, intToDouble, unboxedDouble);
-
- setDouble(edge.node(), result);
- return result;
- }
-
- RELEASE_ASSERT(!(m_state.forNode(edge).m_type & SpecFullNumber));
- terminate(Uncountable);
- return m_out.doubleZero;
- }
-
- LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
- {
- ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
-
- if (edge->hasConstant())
- return m_out.constInt64(JSValue::encode(m_graph.valueOfJSConstant(edge.node())));
-
- LoweredNodeValue value = m_jsValueValues.get(edge.node());
- if (isValid(value))
- return value.value();
-
- value = m_int32Values.get(edge.node());
- if (isValid(value)) {
- LValue result = boxInt32(value.value());
- setJSValue(edge.node(), result);
- return result;
- }
-
- value = m_strictInt52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToJSValue(value.value());
-
- value = m_int52Values.get(edge.node());
- if (isValid(value))
- return strictInt52ToJSValue(int52ToStrictInt52(value.value()));
-
- value = m_booleanValues.get(edge.node());
- if (isValid(value)) {
- LValue result = boxBoolean(value.value());
- setJSValue(edge.node(), result);
- return result;
- }
-
- value = m_doubleValues.get(edge.node());
- if (isValid(value)) {
- LValue result = boxDouble(value.value());
- setJSValue(edge.node(), result);
- return result;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
-
- LValue lowStorage(Edge edge)
- {
- LoweredNodeValue value = m_storageValues.get(edge.node());
- if (isValid(value))
- return value.value();
-
- LValue result = lowCell(edge);
- setStorage(edge.node(), result);
- return result;
- }
-
- LValue strictInt52ToInt32(Edge edge, LValue value)
- {
- LValue result = m_out.castToInt32(value);
- FTL_TYPE_CHECK(
- noValue(), edge, SpecInt32,
- m_out.notEqual(m_out.signExt(result, m_out.int64), value));
- setInt32(edge.node(), result);
- return result;
- }
-
- LValue strictInt52ToDouble(Edge edge, LValue value)
- {
- LValue result = m_out.intToDouble(value);
- setDouble(edge.node(), result);
- return result;
- }
-
- LValue strictInt52ToJSValue(LValue value)
- {
- LBasicBlock isInt32 = FTL_NEW_BLOCK(m_out, ("strictInt52ToJSValue isInt32 case"));
- LBasicBlock isDouble = FTL_NEW_BLOCK(m_out, ("strictInt52ToJSValue isDouble case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("strictInt52ToJSValue continuation"));
-
- Vector<ValueFromBlock, 2> results;
-
- LValue int32Value = m_out.castToInt32(value);
- m_out.branch(
- m_out.equal(m_out.signExt(int32Value, m_out.int64), value),
- isInt32, isDouble);
-
- LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
-
- results.append(m_out.anchor(boxInt32(int32Value)));
- m_out.jump(continuation);
-
- m_out.appendTo(isDouble, continuation);
-
- results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- return m_out.phi(m_out.int64, results);
- }
-
- LValue setInt52WithStrictValue(Node* node, LValue value, Int52Kind kind)
- {
- switch (kind) {
- case StrictInt52:
- setStrictInt52(node, value);
- return value;
-
- case Int52:
- value = strictInt52ToInt52(value);
- setInt52(node, value);
- return value;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
-
- LValue strictInt52ToInt52(LValue value)
- {
- return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
- }
-
- LValue int52ToStrictInt52(LValue value)
- {
- return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
- }
-
- LValue isNotInt32(LValue jsValue)
- {
- return m_out.below(jsValue, m_tagTypeNumber);
- }
- LValue unboxInt32(LValue jsValue)
- {
- return m_out.castToInt32(jsValue);
- }
- LValue boxInt32(LValue value)
- {
- return m_out.add(m_out.zeroExt(value, m_out.int64), m_tagTypeNumber);
- }
-
- LValue isCellOrMisc(LValue jsValue)
- {
- return m_out.testIsZero64(jsValue, m_tagTypeNumber);
- }
- LValue unboxDouble(LValue jsValue)
- {
- return m_out.bitCast(m_out.add(jsValue, m_tagTypeNumber), m_out.doubleType);
- }
- LValue boxDouble(LValue doubleValue)
- {
- return m_out.sub(m_out.bitCast(doubleValue, m_out.int64), m_tagTypeNumber);
- }
-
- LValue isNotCell(LValue jsValue)
- {
- return m_out.testNonZero64(jsValue, m_tagMask);
- }
-
- LValue isCell(LValue jsValue)
- {
- return m_out.testIsZero64(jsValue, m_tagMask);
- }
-
- LValue isNotBoolean(LValue jsValue)
- {
- return m_out.testNonZero64(
- m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
- m_out.constInt64(~1));
- }
- LValue unboxBoolean(LValue jsValue)
- {
- // We want to use a cast that guarantees that LLVM knows that even the integer
- // value is just 0 or 1. But for now we do it the dumb way.
- return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
- }
- LValue boxBoolean(LValue value)
- {
- return m_out.select(
- value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
- }
-
- void speculate(Edge edge)
- {
- switch (edge.useKind()) {
- case UntypedUse:
- break;
- case KnownInt32Use:
- case KnownNumberUse:
- ASSERT(!m_interpreter.needsTypeCheck(edge));
- break;
- case Int32Use:
- speculateInt32(edge);
- break;
- case CellUse:
- speculateCell(edge);
- break;
- case KnownCellUse:
- ASSERT(!m_interpreter.needsTypeCheck(edge));
- break;
- case ObjectUse:
- speculateObject(edge);
- break;
- case ObjectOrOtherUse:
- speculateObjectOrOther(edge);
- break;
- case FinalObjectUse:
- speculateFinalObject(edge);
- break;
- case StringUse:
- speculateString(edge);
- break;
- case RealNumberUse:
- speculateRealNumber(edge);
- break;
- case NumberUse:
- speculateNumber(edge);
- break;
- case MachineIntUse:
- speculateMachineInt(edge);
- break;
- case BooleanUse:
- speculateBoolean(edge);
- break;
- default:
- dataLog("Unsupported speculation use kind: ", edge.useKind(), "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- void speculate(Node*, Edge edge)
- {
- speculate(edge);
- }
-
- void speculateInt32(Edge edge)
- {
- lowInt32(edge);
- }
-
- void speculateCell(Edge edge)
- {
- lowCell(edge);
- }
-
- LValue isObject(LValue cell)
- {
- return m_out.notEqual(
- m_out.loadPtr(cell, m_heaps.JSCell_structure),
- m_out.constIntPtr(vm().stringStructure.get()));
- }
-
- LValue isNotString(LValue cell)
- {
- return isObject(cell);
- }
-
- LValue isString(LValue cell)
- {
- return m_out.equal(
- m_out.loadPtr(cell, m_heaps.JSCell_structure),
- m_out.constIntPtr(vm().stringStructure.get()));
- }
-
- LValue isNotObject(LValue cell)
- {
- return isString(cell);
- }
-
- LValue isArrayType(LValue cell, ArrayMode arrayMode)
- {
- switch (arrayMode.type()) {
- case Array::Int32:
- case Array::Double:
- case Array::Contiguous: {
- LValue indexingType = m_out.load8(
- m_out.loadPtr(cell, m_heaps.JSCell_structure),
- m_heaps.Structure_indexingType);
-
- switch (arrayMode.arrayClass()) {
- case Array::OriginalArray:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
-
- case Array::Array:
- return m_out.equal(
- m_out.bitAnd(indexingType, m_out.constInt8(IsArray | IndexingShapeMask)),
- m_out.constInt8(IsArray | arrayMode.shapeMask()));
-
- case Array::NonArray:
- case Array::OriginalNonArray:
- return m_out.equal(
- m_out.bitAnd(indexingType, m_out.constInt8(IsArray | IndexingShapeMask)),
- m_out.constInt8(arrayMode.shapeMask()));
-
- case Array::PossiblyArray:
- return m_out.equal(
- m_out.bitAnd(indexingType, m_out.constInt8(IndexingShapeMask)),
- m_out.constInt8(arrayMode.shapeMask()));
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- default:
- return hasClassInfo(cell, classInfoForType(arrayMode.typedArrayType()));
- }
- }
-
- LValue hasClassInfo(LValue cell, const ClassInfo* classInfo)
- {
- return m_out.equal(
- m_out.loadPtr(
- m_out.loadPtr(cell, m_heaps.JSCell_structure),
- m_heaps.Structure_classInfo),
- m_out.constIntPtr(classInfo));
- }
-
- LValue isType(LValue cell, JSType type)
- {
- return m_out.equal(
- m_out.load8(
- m_out.loadPtr(cell, m_heaps.JSCell_structure),
- m_heaps.Structure_typeInfoType),
- m_out.constInt8(type));
- }
-
- LValue isNotType(LValue cell, JSType type)
- {
- return m_out.bitNot(isType(cell, type));
- }
-
- void speculateObject(Edge edge, LValue cell)
- {
- FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
- }
-
- void speculateObject(Edge edge)
- {
- speculateObject(edge, lowCell(edge));
- }
-
- void speculateObjectOrOther(Edge edge)
- {
- if (!m_interpreter.needsTypeCheck(edge))
- return;
-
- LValue value = lowJSValue(edge);
-
- LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther cell case"));
- LBasicBlock primitiveCase = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther primitive case"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("speculateObjectOrOther continuation"));
-
- m_out.branch(isNotCell(value), primitiveCase, cellCase);
-
- LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
-
- FTL_TYPE_CHECK(
- jsValueValue(value), edge, (~SpecCell) | SpecObject,
- m_out.equal(
- m_out.loadPtr(value, m_heaps.JSCell_structure),
- m_out.constIntPtr(vm().stringStructure.get())));
-
- m_out.jump(continuation);
-
- m_out.appendTo(primitiveCase, continuation);
-
- FTL_TYPE_CHECK(
- jsValueValue(value), edge, SpecCell | SpecOther,
- m_out.notEqual(
- m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
- m_out.constInt64(ValueNull)));
-
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
- }
-
- void speculateFinalObject(Edge edge, LValue cell)
- {
- FTL_TYPE_CHECK(
- jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
- }
-
- void speculateFinalObject(Edge edge)
- {
- speculateFinalObject(edge, lowCell(edge));
- }
-
- void speculateString(Edge edge, LValue cell)
- {
- FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
- }
-
- void speculateString(Edge edge)
- {
- speculateString(edge, lowCell(edge));
- }
-
- void speculateNonNullObject(Edge edge, LValue cell)
- {
- LValue structure = m_out.loadPtr(cell, m_heaps.JSCell_structure);
- FTL_TYPE_CHECK(
- jsValueValue(cell), edge, SpecObject,
- m_out.equal(structure, m_out.constIntPtr(vm().stringStructure.get())));
- if (masqueradesAsUndefinedWatchpointIsStillValid())
- return;
-
- speculate(
- BadType, jsValueValue(cell), edge.node(),
- m_out.testNonZero8(
- m_out.load8(structure, m_heaps.Structure_typeInfoFlags),
- m_out.constInt8(MasqueradesAsUndefined)));
- }
-
- void speculateNumber(Edge edge)
- {
- // Do an early return here because lowDouble() can create a lot of control flow.
- if (!m_interpreter.needsTypeCheck(edge))
- return;
-
- lowDouble(edge);
- }
-
- void speculateRealNumber(Edge edge)
- {
- // Do an early return here because lowDouble() can create a lot of control flow.
- if (!m_interpreter.needsTypeCheck(edge))
- return;
-
- LValue value = lowDouble(edge);
- FTL_TYPE_CHECK(
- doubleValue(value), edge, SpecFullRealNumber,
- m_out.doubleNotEqualOrUnordered(value, value));
- }
-
- void speculateMachineInt(Edge edge)
- {
- if (!m_interpreter.needsTypeCheck(edge))
- return;
-
- Int52Kind kind;
- lowWhicheverInt52(edge, kind);
- }
-
- void speculateBoolean(Edge edge)
- {
- lowBoolean(edge);
- }
-
- bool masqueradesAsUndefinedWatchpointIsStillValid()
- {
- return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->codeOrigin);
- }
-
- LValue loadMarkByte(LValue base)
- {
- LValue markedBlock = m_out.bitAnd(base, m_out.constInt64(MarkedBlock::blockMask));
- LValue baseOffset = m_out.bitAnd(base, m_out.constInt64(~MarkedBlock::blockMask));
- LValue markByteIndex = m_out.lShr(baseOffset, m_out.constInt64(MarkedBlock::atomShiftAmount + MarkedBlock::markByteShiftAmount));
- return m_out.load8(m_out.baseIndex(m_heaps.MarkedBlock_markBits, markedBlock, markByteIndex, ScaleOne, MarkedBlock::offsetOfMarks()));
- }
-
- void emitStoreBarrier(LValue base, LValue value, Edge& valueEdge)
- {
-#if ENABLE(GGC)
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Store barrier continuation"));
- LBasicBlock isCell = FTL_NEW_BLOCK(m_out, ("Store barrier is cell block"));
-
- if (m_state.forNode(valueEdge.node()).couldBeType(SpecCell))
- m_out.branch(isNotCell(value), continuation, isCell);
- else
- m_out.jump(isCell);
-
- LBasicBlock lastNext = m_out.appendTo(isCell, continuation);
- emitStoreBarrier(base);
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-#else
- UNUSED_PARAM(base);
- UNUSED_PARAM(value);
- UNUSED_PARAM(valueEdge);
-#endif
- }
-
- void emitStoreBarrier(LValue base)
- {
-#if ENABLE(GGC)
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Store barrier continuation"));
- LBasicBlock isMarked = FTL_NEW_BLOCK(m_out, ("Store barrier is marked block"));
- LBasicBlock bufferHasSpace = FTL_NEW_BLOCK(m_out, ("Store barrier buffer is full"));
- LBasicBlock bufferIsFull = FTL_NEW_BLOCK(m_out, ("Store barrier buffer is full"));
-
- // Check the mark byte.
- m_out.branch(m_out.isZero8(loadMarkByte(base)), continuation, isMarked);
-
- // Append to the write barrier buffer.
- LBasicBlock lastNext = m_out.appendTo(isMarked, bufferHasSpace);
- LValue currentBufferIndex = m_out.load32(m_out.absolute(&vm().heap.writeBarrierBuffer().m_currentIndex));
- LValue bufferCapacity = m_out.load32(m_out.absolute(&vm().heap.writeBarrierBuffer().m_capacity));
- m_out.branch(m_out.lessThan(currentBufferIndex, bufferCapacity), bufferHasSpace, bufferIsFull);
-
- // Buffer has space, store to it.
- m_out.appendTo(bufferHasSpace, bufferIsFull);
- LValue writeBarrierBufferBase = m_out.loadPtr(m_out.absolute(&vm().heap.writeBarrierBuffer().m_buffer));
- m_out.storePtr(base, m_out.baseIndex(m_heaps.WriteBarrierBuffer_bufferContents, writeBarrierBufferBase, m_out.zeroExt(currentBufferIndex, m_out.intPtr), ScalePtr));
- m_out.store32(m_out.add(currentBufferIndex, m_out.constInt32(1)), m_out.absolute(&vm().heap.writeBarrierBuffer().m_currentIndex));
- m_out.jump(continuation);
-
- // Buffer is out of space, flush it.
- m_out.appendTo(bufferIsFull, continuation);
- vmCall(m_out.operation(operationFlushWriteBarrierBuffer), m_callFrame, base);
- m_out.jump(continuation);
-
- m_out.appendTo(continuation, lastNext);
-#else
- UNUSED_PARAM(base);
-#endif
- }
-
- enum ExceptionCheckMode { NoExceptions, CheckExceptions };
-
- LValue vmCall(LValue function, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, LValue arg2, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1, arg2);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, LValue arg2, LValue arg3, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1, arg2, arg3);
- callCheck(mode);
- return result;
- }
- LValue vmCall(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, ExceptionCheckMode mode = CheckExceptions)
- {
- callPreflight();
- LValue result = m_out.call(function, arg1, arg2, arg3, arg4);
- callCheck(mode);
- return result;
- }
-
- void callPreflight(CodeOrigin codeOrigin)
- {
- m_out.store32(
- m_out.constInt32(
- CallFrame::Location::encodeAsCodeOriginIndex(
- m_ftlState.jitCode->common.addCodeOrigin(codeOrigin))),
- tagFor(JSStack::ArgumentCount));
- }
- void callPreflight()
- {
- callPreflight(m_node->codeOrigin);
- }
-
- void callCheck(ExceptionCheckMode mode = CheckExceptions)
- {
- if (mode == NoExceptions)
- return;
-
- LBasicBlock didHaveException = FTL_NEW_BLOCK(m_out, ("Did have exception"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("Exception check continuation"));
-
- m_out.branch(
- m_out.notZero64(m_out.load64(m_out.absolute(vm().addressOfException()))),
- didHaveException, continuation);
-
- LBasicBlock lastNext = m_out.appendTo(didHaveException, continuation);
- // FIXME: Handle exceptions. https://bugs.webkit.org/show_bug.cgi?id=113622
- m_out.crash();
-
- m_out.appendTo(continuation, lastNext);
- }
-
- LBasicBlock lowBlock(BasicBlock* block)
- {
- return m_blocks.get(block);
- }
-
- void initializeOSRExitStateForBlock()
- {
- m_availability = m_highBlock->ssa->availabilityAtHead;
- }
-
- void appendOSRExit(
- ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
- {
- if (verboseCompilationEnabled())
- dataLog(" OSR exit #", m_ftlState.jitCode->osrExit.size(), " with availability: ", m_availability, "\n");
-
- ASSERT(m_ftlState.jitCode->osrExit.size() == m_ftlState.finalizer->osrExit.size());
-
- m_ftlState.jitCode->osrExit.append(OSRExit(
- kind, lowValue.format(), m_graph.methodOfGettingAValueProfileFor(highValue),
- m_codeOriginForExitTarget, m_codeOriginForExitProfile,
- m_availability.numberOfArguments(), m_availability.numberOfLocals()));
- m_ftlState.finalizer->osrExit.append(OSRExitCompilationInfo());
-
- OSRExit& exit = m_ftlState.jitCode->osrExit.last();
-
- LBasicBlock lastNext = 0;
- LBasicBlock continuation = 0;
-
- LBasicBlock failCase = FTL_NEW_BLOCK(m_out, ("OSR exit failCase for ", m_node));
- continuation = FTL_NEW_BLOCK(m_out, ("OSR exit continuation for ", m_node));
-
- m_out.branch(failCondition, failCase, continuation);
-
- lastNext = m_out.appendTo(failCase, continuation);
-
- emitOSRExitCall(exit, lowValue);
-
- m_out.unreachable();
-
- m_out.appendTo(continuation, lastNext);
- }
-
- void emitOSRExitCall(OSRExit& exit, FormattedValue lowValue)
- {
- ExitArgumentList arguments;
-
- CodeOrigin codeOrigin = exit.m_codeOrigin;
-
- buildExitArguments(exit, arguments, lowValue, codeOrigin);
-
- callStackmap(exit, arguments);
- }
-
- void buildExitArguments(
- OSRExit& exit, ExitArgumentList& arguments, FormattedValue lowValue,
- CodeOrigin codeOrigin)
- {
- arguments.append(m_callFrame);
- if (!!lowValue)
- arguments.append(lowValue.value());
-
- for (unsigned i = 0; i < exit.m_values.size(); ++i) {
- int operand = exit.m_values.operandForIndex(i);
- bool isLive = m_graph.isLiveInBytecode(VirtualRegister(operand), codeOrigin);
- if (!isLive) {
- exit.m_values[i] = ExitValue::dead();
- continue;
- }
-
- Availability availability = m_availability[i];
- FlushedAt flush = availability.flushedAt();
- switch (flush.format()) {
- case DeadFlush:
- case ConflictingFlush:
- if (availability.hasNode()) {
- addExitArgumentForNode(exit, arguments, i, availability.node());
- break;
- }
-
- if (Options::validateFTLOSRExitLiveness()) {
- dataLog("Expected r", operand, " to be available but it wasn't.\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- // This means that the DFG's DCE proved that the value is dead in bytecode
- // even though the bytecode liveness analysis thinks it's live. This is
- // acceptable since the DFG's DCE is by design more aggressive while still
- // being sound.
- exit.m_values[i] = ExitValue::dead();
- break;
-
- case FlushedJSValue:
- case FlushedCell:
- case FlushedBoolean:
- exit.m_values[i] = ExitValue::inJSStack(flush.virtualRegister());
- break;
-
- case FlushedInt32:
- exit.m_values[i] = ExitValue::inJSStackAsInt32(flush.virtualRegister());
- break;
-
- case FlushedInt52:
- exit.m_values[i] = ExitValue::inJSStackAsInt52(flush.virtualRegister());
- break;
-
- case FlushedDouble:
- exit.m_values[i] = ExitValue::inJSStackAsDouble(flush.virtualRegister());
- break;
-
- case FlushedArguments:
- // FIXME: implement PhantomArguments.
- // https://bugs.webkit.org/show_bug.cgi?id=113986
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- }
-
- if (verboseCompilationEnabled())
- dataLog(" Exit values: ", exit.m_values, "\n");
- }
-
- void callStackmap(OSRExit& exit, ExitArgumentList& arguments)
- {
- exit.m_stackmapID = m_stackmapIDs++;
- arguments.insert(0, m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
- arguments.insert(0, m_out.constInt32(exit.m_stackmapID));
-
- m_out.call(m_out.stackmapIntrinsic(), arguments);
- }
-
- void addExitArgumentForNode(
- OSRExit& exit, ExitArgumentList& arguments, unsigned index, Node* node)
- {
- ASSERT(node->shouldGenerate());
- ASSERT(node->hasResult());
-
- if (tryToSetConstantExitArgument(exit, index, node))
- return;
-
- LoweredNodeValue value = m_int32Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatInt32, value.value());
- return;
- }
-
- value = m_int52Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatInt52, value.value());
- return;
- }
-
- value = m_strictInt52Values.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatStrictInt52, value.value());
- return;
- }
-
- value = m_booleanValues.get(node);
- if (isValid(value)) {
- LValue valueToPass = m_out.zeroExt(value.value(), m_out.int32);
- addExitArgument(exit, arguments, index, ValueFormatBoolean, valueToPass);
- return;
- }
-
- value = m_jsValueValues.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatJSValue, value.value());
- return;
- }
-
- value = m_doubleValues.get(node);
- if (isValid(value)) {
- addExitArgument(exit, arguments, index, ValueFormatDouble, value.value());
- return;
- }
-
- dataLog("Cannot find value for node: ", node, "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- bool tryToSetConstantExitArgument(OSRExit& exit, unsigned index, Node* node)
- {
- if (!node)
- return false;
-
- switch (node->op()) {
- case JSConstant:
- case WeakJSConstant:
- exit.m_values[index] = ExitValue::constant(m_graph.valueOfJSConstant(node));
- return true;
- case PhantomArguments:
- // FIXME: implement PhantomArguments.
- // https://bugs.webkit.org/show_bug.cgi?id=113986
- RELEASE_ASSERT_NOT_REACHED();
- return true;
- default:
- return false;
- }
- }
-
- void addExitArgument(
- OSRExit& exit, ExitArgumentList& arguments, unsigned index, ValueFormat format,
- LValue value)
- {
- exit.m_values[index] = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
- arguments.append(value);
- }
-
- void setInt32(Node* node, LValue value)
- {
- m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setInt52(Node* node, LValue value)
- {
- m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setStrictInt52(Node* node, LValue value)
- {
- m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setInt52(Node* node, LValue value, Int52Kind kind)
- {
- switch (kind) {
- case Int52:
- setInt52(node, value);
- return;
-
- case StrictInt52:
- setStrictInt52(node, value);
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
- void setJSValue(Node* node, LValue value)
- {
- m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setBoolean(Node* node, LValue value)
- {
- m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setStorage(Node* node, LValue value)
- {
- m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
- }
- void setDouble(Node* node, LValue value)
- {
- m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
- }
-
- void setInt32(LValue value)
- {
- setInt32(m_node, value);
- }
- void setInt52(LValue value)
- {
- setInt52(m_node, value);
- }
- void setStrictInt52(LValue value)
- {
- setStrictInt52(m_node, value);
- }
- void setInt52(LValue value, Int52Kind kind)
- {
- setInt52(m_node, value, kind);
- }
- void setJSValue(LValue value)
- {
- setJSValue(m_node, value);
- }
- void setBoolean(LValue value)
- {
- setBoolean(m_node, value);
- }
- void setStorage(LValue value)
- {
- setStorage(m_node, value);
- }
- void setDouble(LValue value)
- {
- setDouble(m_node, value);
- }
-
- bool isValid(const LoweredNodeValue& value)
- {
- if (!value)
- return false;
- if (!m_graph.m_dominators.dominates(value.block(), m_highBlock))
- return false;
- return true;
- }
-
- void addWeakReference(JSCell* target)
- {
- m_graph.m_plan.weakReferences.addLazily(target);
- }
-
- LValue weakPointer(JSCell* pointer)
- {
- addWeakReference(pointer);
- return m_out.constIntPtr(pointer);
- }
-
- TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
- {
- return m_out.address(base, m_heaps.variables[operand], offset);
- }
- TypedPointer payloadFor(LValue base, int operand)
- {
- return addressFor(base, operand, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- }
- TypedPointer tagFor(LValue base, int operand)
- {
- return addressFor(base, operand, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- }
- TypedPointer addressFor(int operand)
- {
- return addressFor(m_callFrame, operand);
- }
- TypedPointer addressFor(VirtualRegister operand)
- {
- return addressFor(m_callFrame, operand.offset());
- }
- TypedPointer payloadFor(int operand)
- {
- return payloadFor(m_callFrame, operand);
- }
- TypedPointer payloadFor(VirtualRegister operand)
- {
- return payloadFor(m_callFrame, operand.offset());
- }
- TypedPointer tagFor(int operand)
- {
- return tagFor(m_callFrame, operand);
- }
- TypedPointer tagFor(VirtualRegister operand)
- {
- return tagFor(m_callFrame, operand.offset());
- }
-
- VM& vm() { return m_graph.m_vm; }
- CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
-
- Graph& m_graph;
- State& m_ftlState;
- AbstractHeapRepository m_heaps;
- Output m_out;
-
- LBasicBlock m_prologue;
- HashMap<BasicBlock*, LBasicBlock> m_blocks;
-
- LValue m_callFrame;
- LValue m_tagTypeNumber;
- LValue m_tagMask;
-
- HashMap<Node*, LoweredNodeValue> m_int32Values;
- HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
- HashMap<Node*, LoweredNodeValue> m_int52Values;
- HashMap<Node*, LoweredNodeValue> m_jsValueValues;
- HashMap<Node*, LoweredNodeValue> m_booleanValues;
- HashMap<Node*, LoweredNodeValue> m_storageValues;
- HashMap<Node*, LoweredNodeValue> m_doubleValues;
-
- HashMap<Node*, LValue> m_phis;
-
- Operands<Availability> m_availability;
-
- InPlaceAbstractState m_state;
- AbstractInterpreter<InPlaceAbstractState> m_interpreter;
- BasicBlock* m_highBlock;
- BasicBlock* m_nextHighBlock;
- LBasicBlock m_nextLowBlock;
-
- CodeOrigin m_codeOriginForExitTarget;
- CodeOrigin m_codeOriginForExitProfile;
- unsigned m_nodeIndex;
- Node* m_node;
-
- uint32_t m_stackmapIDs;
-};
-
-void lowerDFGToLLVM(State& state)
-{
- LowerDFGToLLVM lowering(state);
- lowering.lower();
-}
-
-} } // namespace JSC::FTL
-
-#endif // ENABLE(FTL_JIT)
-
diff --git a/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h b/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h
index 93e6c4d09..059667b7f 100644
--- a/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h
+++ b/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLLoweredNodeValue_h
-#define FTLLoweredNodeValue_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -73,6 +70,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLLoweredNodeValue_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLOSREntry.cpp b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
index 185046054..9a391e34b 100644
--- a/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,19 +30,22 @@
#include "CodeBlock.h"
#include "DFGJITCode.h"
#include "FTLForOSREntryJITCode.h"
-#include "JSStackInlines.h"
#include "OperandsInlines.h"
+#include "JSCInlines.h"
+#include "VMInlines.h"
#if ENABLE(FTL_JIT)
namespace JSC { namespace FTL {
+SUPPRESS_ASAN
void* prepareOSREntry(
ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
unsigned bytecodeIndex, unsigned streamIndex)
{
VM& vm = exec->vm();
CodeBlock* baseline = dfgCodeBlock->baselineVersion();
+ ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
@@ -52,9 +55,12 @@ void* prepareOSREntry(
bytecodeIndex, ".\n");
}
+ if (bytecodeIndex)
+ jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);
+
if (bytecodeIndex != entryCode->bytecodeIndex()) {
if (Options::verboseOSR())
- dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex());
+ dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
return 0;
}
@@ -66,12 +72,18 @@ void* prepareOSREntry(
dataLog(" Values at entry: ", values, "\n");
for (int argument = values.numberOfArguments(); argument--;) {
- RELEASE_ASSERT(
- exec->r(virtualRegisterForArgument(argument).offset()).jsValue() == values.argument(argument));
+ JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
+ JSValue reconstructedValue = values.argument(argument);
+ if (valueOnStack == reconstructedValue || !argument)
+ continue;
+ dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
+ dataLog(" Value on stack: ", valueOnStack, "\n");
+ dataLog(" Reconstructed value: ", reconstructedValue, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
}
RELEASE_ASSERT(
- static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
+ static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeLocals);
EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
entryCode->entryBuffer()->dataBuffer());
@@ -80,7 +92,7 @@ void* prepareOSREntry(
scratch[local] = JSValue::encode(values.local(local));
int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
- if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) {
+ if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()]))) {
if (Options::verboseOSR())
dataLog(" OSR failed because stack growth failed.\n");
return 0;
@@ -88,7 +100,7 @@ void* prepareOSREntry(
exec->setCodeBlock(entryCodeBlock);
- void* result = entryCode->addressForCall().executableAddress();
+ void* result = entryCode->addressForCall(ArityCheckNotRequired).executableAddress();
if (Options::verboseOSR())
dataLog(" Entry will succeed, going to address", RawPointer(result), "\n");
diff --git a/Source/JavaScriptCore/ftl/FTLOSREntry.h b/Source/JavaScriptCore/ftl/FTLOSREntry.h
index d19f10e74..439c4c20a 100644
--- a/Source/JavaScriptCore/ftl/FTLOSREntry.h
+++ b/Source/JavaScriptCore/ftl/FTLOSREntry.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLOSREntry_h
-#define FTLOSREntry_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -44,6 +41,3 @@ void* prepareOSREntry(
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLOSREntry_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.cpp b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
index 00ea014cb..d7b7838d7 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,37 +28,90 @@
#if ENABLE(FTL_JIT)
+#include "AirGenerationContext.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3StackmapValue.h"
#include "CodeBlock.h"
#include "DFGBasicBlock.h"
#include "DFGNode.h"
#include "FTLExitArgument.h"
-#include "FTLExitArgumentList.h"
#include "FTLJITCode.h"
-#include "Operations.h"
+#include "FTLLocation.h"
+#include "FTLState.h"
+#include "JSCInlines.h"
namespace JSC { namespace FTL {
+using namespace B3;
using namespace DFG;
-OSRExit::OSRExit(
- ExitKind exitKind, ValueFormat profileValueFormat,
- MethodOfGettingAValueProfile valueProfile, CodeOrigin codeOrigin,
- CodeOrigin originForProfile, unsigned numberOfArguments,
- unsigned numberOfLocals)
- : OSRExitBase(exitKind, codeOrigin, originForProfile)
- , m_profileValueFormat(profileValueFormat)
+OSRExitDescriptor::OSRExitDescriptor(
+ DataFormat profileDataFormat, MethodOfGettingAValueProfile valueProfile,
+ unsigned numberOfArguments, unsigned numberOfLocals)
+ : m_profileDataFormat(profileDataFormat)
, m_valueProfile(valueProfile)
- , m_patchableCodeOffset(0)
, m_values(numberOfArguments, numberOfLocals)
{
}
+void OSRExitDescriptor::validateReferences(const TrackedReferences& trackedReferences)
+{
+ for (unsigned i = m_values.size(); i--;)
+ m_values[i].validateReferences(trackedReferences);
+
+ for (ExitTimeObjectMaterialization* materialization : m_materializations)
+ materialization->validateReferences(trackedReferences);
+}
+
+RefPtr<OSRExitHandle> OSRExitDescriptor::emitOSRExit(
+ State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin, CCallHelpers& jit,
+ const StackmapGenerationParams& params, unsigned offset)
+{
+ RefPtr<OSRExitHandle> handle =
+ prepareOSRExitHandle(state, exitKind, nodeOrigin, params, offset);
+ handle->emitExitThunk(state, jit);
+ return handle;
+}
+
+RefPtr<OSRExitHandle> OSRExitDescriptor::emitOSRExitLater(
+ State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin,
+ const StackmapGenerationParams& params, unsigned offset)
+{
+ RefPtr<OSRExitHandle> handle =
+ prepareOSRExitHandle(state, exitKind, nodeOrigin, params, offset);
+ params.addLatePath(
+ [handle, &state] (CCallHelpers& jit) {
+ handle->emitExitThunk(state, jit);
+ });
+ return handle;
+}
+
+RefPtr<OSRExitHandle> OSRExitDescriptor::prepareOSRExitHandle(
+ State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin,
+ const StackmapGenerationParams& params, unsigned offset)
+{
+ unsigned index = state.jitCode->osrExit.size();
+ OSRExit& exit = state.jitCode->osrExit.alloc(
+ this, exitKind, nodeOrigin.forExit, nodeOrigin.semantic, nodeOrigin.wasHoisted);
+ RefPtr<OSRExitHandle> handle = adoptRef(new OSRExitHandle(index, exit));
+ for (unsigned i = offset; i < params.size(); ++i)
+ exit.m_valueReps.append(params[i]);
+ exit.m_valueReps.shrinkToFit();
+ return handle;
+}
+
+OSRExit::OSRExit(
+ OSRExitDescriptor* descriptor, ExitKind exitKind, CodeOrigin codeOrigin,
+ CodeOrigin codeOriginForExitProfile, bool wasHoisted)
+ : OSRExitBase(exitKind, codeOrigin, codeOriginForExitProfile, wasHoisted)
+ , m_descriptor(descriptor)
+{
+}
+
CodeLocationJump OSRExit::codeLocationForRepatch(CodeBlock* ftlCodeBlock) const
{
- return CodeLocationJump(
- reinterpret_cast<char*>(
- ftlCodeBlock->jitCode()->ftl()->exitThunks().dataLocation()) +
- m_patchableCodeOffset);
+ UNUSED_PARAM(ftlCodeBlock);
+ return m_patchableJump;
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.h b/Source/JavaScriptCore/ftl/FTLOSRExit.h
index 4e479ebdd..d17909b0a 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExit.h
+++ b/Source/JavaScriptCore/ftl/FTLOSRExit.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,154 +23,114 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLOSRExit_h
-#define FTLOSRExit_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
+#include "B3ValueRep.h"
#include "CodeOrigin.h"
#include "DFGExitProfile.h"
+#include "DFGNodeOrigin.h"
#include "DFGOSRExitBase.h"
-#include "FTLAbbreviations.h"
-#include "FTLExitArgumentList.h"
+#include "FTLAbbreviatedTypes.h"
+#include "FTLExitTimeObjectMaterialization.h"
#include "FTLExitValue.h"
#include "FTLFormattedValue.h"
+#include "FTLOSRExitHandle.h"
+#include "FTLStackmapArgumentList.h"
+#include "HandlerInfo.h"
#include "MethodOfGettingAValueProfile.h"
#include "Operands.h"
+#include "Reg.h"
#include "ValueProfile.h"
#include "VirtualRegister.h"
-namespace JSC { namespace FTL {
-
-// Tracks one OSR exit site within the FTL JIT. OSR exit in FTL works by deconstructing
-// the crazy that is OSR down to simple SSA CFG primitives that any compiler backend
-// (including of course LLVM) can grok and do meaningful things to. Except for
-// watchpoint-based exits, which haven't yet been implemented (see webkit.org/b/113647),
-// an exit is just a conditional branch in the emitted code where one destination is the
-// continuation and the other is a basic block that performs a no-return tail-call to an
-// exit thunk. This thunk takes as its arguments the live non-constant
-// not-already-accounted-for bytecode state. To appreciate how this works consider the
-// following JavaScript program, and its lowering down to LLVM IR including the relevant
-// exits:
-//
-// function foo(o) {
-// var a = o.a; // predicted int
-// var b = o.b;
-// var c = o.c; // NB this is dead
-// a = a | 5; // our example OSR exit: need to check if a is an int
-// return a + b;
-// }
-//
-// Just consider the "a | 5". In the DFG IR, this looks like:
-//
-// BitOr(Check:Int32:@a, Int32:5)
-//
-// Where @a is the node for the GetLocal node that gets the value of the 'a' variable.
-// Conceptually, this node can be further broken down to the following (note that this
-// particular lowering never actually happens - we skip this step and go straight to
-// LLVM IR - but it's still useful to see this):
-//
-// exitIf(@a is not int32);
-// continuation;
-//
-// Where 'exitIf()' is a function that will exit if the argument is true, and
-// 'continuation' is the stuff that we will do after the exitIf() check. (Note that
-// FTL refers to 'exitIf()' as 'speculate()', which is in line with DFG terminology.)
-// This then gets broken down to the following LLVM IR, assuming that %0 is the LLVM
-// value corresponding to variable 'a', and %1 is the LLVM value for variable 'b':
-//
-// %2 = ... // the predictate corresponding to '@a is not int32'
-// br i1 %2, label %3, label %4
-// ; <label>:3
-// call void exitThunk1(%0, %1) // pass 'a' and 'b', since they're both live-in-bytecode
-// unreachable
-// ; <label>:4
-// ... // code for the continuation
-//
-// Where 'exitThunk1' is the IR to get the exit thunk for *this* OSR exit. Each OSR
-// exit will appear to LLVM to have a distinct exit thunk.
-//
-// Note that this didn't have to pass '5', 'o', or 'c' to the exit thunk. 5 is a
-// constant and the DFG already knows that, and can already tell the OSR exit machinery
-// what that contant is and which bytecode variables (if any) it needs to be dropped
-// into. This is conveyed to the exit statically, via the OSRExit data structure below.
-// See the code for ExitValue for details. 'o' is an argument, and arguments are always
-// "flushed" - if you never assign them then their values are still in the argument
-// stack slots, and if you do assign them then we eagerly store them into those slots.
-// 'c' is dead in bytecode, and the DFG knows this; we statically tell the exit thunk
-// that it's dead and don't have to pass anything. The exit thunk will "initialize" its
-// value to Undefined.
-//
-// This approach to OSR exit has a number of virtues:
-//
-// - It is an entirely unsurprising representation for a compiler that already groks
-// CFG-like IRs for C-like languages. All existing analyses and transformations just
-// work.
-//
-// - It lends itself naturally to modern approaches to code motion. For example, you
-// could sink operations from above the exit to below it, if you just duplicate the
-// operation into the OSR exit block. This is both legal and desirable. It works
-// because the backend sees the OSR exit block as being no different than any other,
-// and LLVM already supports sinking if it sees that a value is only partially used.
-// Hence there exists a value that dominates the exit but is only used by the exit
-// thunk and not by the continuation, sinking ought to kick in for that value.
-// Hoisting operations from below it to above it is also possible, for similar
-// reasons.
-//
-// - The no-return tail-call to the OSR exit thunk can be subjected to specialized
-// code-size reduction optimizations, though this is optional. For example, instead
-// of actually emitting a call along with all that goes with it (like placing the
-// arguments into argument position), the backend could choose to simply inform us
-// where it had placed the arguments and expect the callee (i.e. the exit thunk) to
-// figure it out from there. It could also tell us what we need to do to pop stack,
-// although again, it doesn't have to; it could just emit that code normally. Though
-// we don't support this yet, we could; the only thing that would change on our end
-// is that we'd need feedback from the backend about the location of the arguments
-// and a description of the things that need to be done to pop stack. This would
-// involve switching the m_values array to use something more akin to ValueRecovery
-// rather than the current ExitValue, albeit possibly with some hacks to better
-// understand the kinds of places where the LLVM backend would put values.
-//
-// - It could be extended to allow the backend to do its own exit hoisting, by using
-// intrinsics (or meta-data, or something) to inform the backend that it's safe to
-// make the predicate passed to 'exitIf()' more truthy.
-//
-// - It could be extended to support watchpoints (see webkit.org/b/113647) by making
-// the predicate passed to 'exitIf()' be an intrinsic that the backend knows to be
-// true at compile-time. The backend could then turn the conditional branch into a
-// replaceable jump, much like the DFG does.
+namespace JSC {
-struct OSRExit : public DFG::OSRExitBase {
- OSRExit(
- ExitKind, ValueFormat profileValueFormat, MethodOfGettingAValueProfile,
- CodeOrigin, CodeOrigin originForProfile,
+class TrackedReferences;
+
+namespace B3 {
+class StackmapGenerationParams;
+namespace Air {
+struct GenerationContext;
+} // namespace Air
+} // namespace B3
+
+namespace DFG {
+struct NodeOrigin;
+} // namespace DFG;
+
+namespace FTL {
+
+class State;
+struct OSRExitDescriptorImpl;
+struct OSRExitHandle;
+
+struct OSRExitDescriptor {
+ OSRExitDescriptor(
+ DataFormat profileDataFormat, MethodOfGettingAValueProfile,
unsigned numberOfArguments, unsigned numberOfLocals);
-
- MacroAssemblerCodeRef m_code;
-
+
// The first argument to the exit call may be a value we wish to profile.
// If that's the case, the format will be not Invalid and we'll have a
// method of getting a value profile. Note that all of the ExitArgument's
// are already aware of this possible off-by-one, so there is no need to
// correct them.
- ValueFormat m_profileValueFormat;
+ DataFormat m_profileDataFormat;
MethodOfGettingAValueProfile m_valueProfile;
- // Offset within the exit stubs of the stub for this exit.
- unsigned m_patchableCodeOffset;
-
Operands<ExitValue> m_values;
-
- uint32_t m_stackmapID;
-
+ Bag<ExitTimeObjectMaterialization> m_materializations;
+
+ void validateReferences(const TrackedReferences&);
+
+ // Call this once we have a place to emit the OSR exit jump and we have data about how the state
+ // should be recovered. This effectively emits code that does the exit, though the code is really a
+ // patchable jump and we emit the real code lazily. The description of how to emit the real code is
+ // up to the OSRExit object, which this creates. Note that it's OK to drop the OSRExitHandle object
+ // on the ground. It contains information that is mostly not useful if you use this API, since after
+ // this call, the OSRExit is simply ready to go.
+ RefPtr<OSRExitHandle> emitOSRExit(
+ State&, ExitKind, const DFG::NodeOrigin&, CCallHelpers&, const B3::StackmapGenerationParams&,
+ unsigned offset = 0);
+
+ // In some cases you want an OSRExit to come into existence, but you don't want to emit it right now.
+ // This will emit the OSR exit in a late path. You can't be sure exactly when that will happen, but
+ // you know that it will be done by the time late path emission is done. So, a linker task will
+ // surely happen after that. You can use the OSRExitHandle to retrieve the exit's label.
+ //
+ // This API is meant to be used for things like exception handling, where some patchpoint wants to
+ // have a place to jump to for OSR exit. It doesn't care where that OSR exit is emitted so long as it
+ // eventually gets access to its label.
+ RefPtr<OSRExitHandle> emitOSRExitLater(
+ State&, ExitKind, const DFG::NodeOrigin&, const B3::StackmapGenerationParams&,
+ unsigned offset = 0);
+
+private:
+ // This is the low-level interface. It will create a handle representing the desire to emit code for
+ // an OSR exit. You can call OSRExitHandle::emitExitThunk() once you have a place to emit it. Note
+ // that the above two APIs are written in terms of this and OSRExitHandle::emitExitThunk().
+ RefPtr<OSRExitHandle> prepareOSRExitHandle(
+ State&, ExitKind, const DFG::NodeOrigin&, const B3::StackmapGenerationParams&,
+ unsigned offset = 0);
+};
+
+struct OSRExit : public DFG::OSRExitBase {
+ OSRExit(OSRExitDescriptor*, ExitKind, CodeOrigin, CodeOrigin codeOriginForExitProfile, bool wasHoisted);
+
+ OSRExitDescriptor* m_descriptor;
+ MacroAssemblerCodeRef m_code;
+ // This tells us where to place a jump.
+ CodeLocationJump m_patchableJump;
+ Vector<B3::ValueRep> m_valueReps;
+
CodeLocationJump codeLocationForRepatch(CodeBlock* ftlCodeBlock) const;
+ void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock)
+ {
+ OSRExitBase::considerAddingAsFrequentExitSite(profiledCodeBlock, ExitFromFTL);
+ }
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLOSRExit_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
index 076a495ea..9919e71d7 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,155 +32,513 @@
#include "DFGOSRExitPreparation.h"
#include "FTLExitArgumentForOperand.h"
#include "FTLJITCode.h"
+#include "FTLLocation.h"
#include "FTLOSRExit.h"
+#include "FTLOperations.h"
+#include "FTLState.h"
#include "FTLSaveRestore.h"
+#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
#include "OperandsInlines.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
namespace JSC { namespace FTL {
using namespace DFG;
-static void compileStub(
- unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
+static void reboxAccordingToFormat(
+ DataFormat format, AssemblyHelpers& jit, GPRReg value, GPRReg scratch1, GPRReg scratch2)
{
- StackMaps::Record* record;
-
- for (unsigned i = jitCode->stackmaps.records.size(); i--;) {
- record = &jitCode->stackmaps.records[i];
- if (record->patchpointID == exit.m_stackmapID)
+ switch (format) {
+ case DataFormatInt32: {
+ jit.zeroExtend32ToPtr(value, value);
+ jit.or64(GPRInfo::tagTypeNumberRegister, value);
+ break;
+ }
+
+ case DataFormatInt52: {
+ jit.rshift64(AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), value);
+ jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
+ jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
+ jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
+ break;
+ }
+
+ case DataFormatStrictInt52: {
+ jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
+ jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
+ jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
+ break;
+ }
+
+ case DataFormatBoolean: {
+ jit.zeroExtend32ToPtr(value, value);
+ jit.or32(MacroAssembler::TrustedImm32(ValueFalse), value);
+ break;
+ }
+
+ case DataFormatJS: {
+ // Done already!
+ break;
+ }
+
+ case DataFormatDouble: {
+ jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch1);
+ jit.move64ToDouble(value, FPRInfo::fpRegT0);
+ jit.purifyNaN(FPRInfo::fpRegT0);
+ jit.boxDouble(FPRInfo::fpRegT0, value);
+ jit.move64ToDouble(scratch1, FPRInfo::fpRegT0);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+}
+
+static void compileRecovery(
+ CCallHelpers& jit, const ExitValue& value,
+ Vector<B3::ValueRep>& valueReps,
+ char* registerScratch,
+ const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer)
+{
+ switch (value.kind()) {
+ case ExitValueDead:
+ jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
+ break;
+
+ case ExitValueConstant:
+ jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0);
+ break;
+
+ case ExitValueArgument:
+ Location::forValueRep(valueReps[value.exitArgument().argument()]).restoreInto(
+ jit, registerScratch, GPRInfo::regT0);
+ break;
+
+ case ExitValueInJSStack:
+ case ExitValueInJSStackAsInt32:
+ case ExitValueInJSStackAsInt52:
+ case ExitValueInJSStackAsDouble:
+ jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0);
+ break;
+
+ case ExitValueRecovery:
+ Location::forValueRep(valueReps[value.rightRecoveryArgument()]).restoreInto(
+ jit, registerScratch, GPRInfo::regT1);
+ Location::forValueRep(valueReps[value.leftRecoveryArgument()]).restoreInto(
+ jit, registerScratch, GPRInfo::regT0);
+ switch (value.recoveryOpcode()) {
+ case AddRecovery:
+ switch (value.recoveryFormat()) {
+ case DataFormatInt32:
+ jit.add32(GPRInfo::regT1, GPRInfo::regT0);
+ break;
+ case DataFormatInt52:
+ jit.add64(GPRInfo::regT1, GPRInfo::regT0);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
break;
+ case SubRecovery:
+ switch (value.recoveryFormat()) {
+ case DataFormatInt32:
+ jit.sub32(GPRInfo::regT1, GPRInfo::regT0);
+ break;
+ case DataFormatInt52:
+ jit.sub64(GPRInfo::regT1, GPRInfo::regT0);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ break;
+
+ case ExitValueMaterializeNewObject:
+ jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0);
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
-
- RELEASE_ASSERT(record->patchpointID == exit.m_stackmapID);
-
+
+ reboxAccordingToFormat(
+ value.dataFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
+}
+
+static void compileStub(
+ unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
+{
// This code requires framePointerRegister is the same as callFrameRegister
static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");
CCallHelpers jit(vm, codeBlock);
+
+ // The first thing we need to do is restablish our frame in the case of an exception.
+ if (exit.isGenericUnwindHandler()) {
+ RELEASE_ASSERT(vm->callFrameForCatch); // The first time we hit this exit, like at all other times, this field should be non-null.
+ jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+ jit.loadPtr(vm->addressOfCallFrameForCatch(), MacroAssembler::framePointerRegister);
+ jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
+ MacroAssembler::framePointerRegister, CCallHelpers::stackPointerRegister);
+
+ // Do a pushToSave because that's what the exit compiler below expects the stack
+ // to look like because that's the last thing the ExitThunkGenerator does. The code
+ // below doesn't actually use the value that was pushed, but it does rely on the
+ // general shape of the stack being as it is in the non-exception OSR case.
+ jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(0xbadbeef));
+ }
+
+ // We need scratch space to save all registers, to build up the JS stack, to deal with unwind
+ // fixup, pointers to all of the objects we materialize, and the elements inside those objects
+ // that we materialize.
+
+ // Figure out how much space we need for those object allocations.
+ unsigned numMaterializations = 0;
+ size_t maxMaterializationNumArguments = 0;
+ for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
+ numMaterializations++;
+
+ maxMaterializationNumArguments = std::max(
+ maxMaterializationNumArguments,
+ materialization->properties().size());
+ }
- // We need scratch space to save all registers and to build up the JSStack.
- // Use a scratch buffer to transfer all values.
- ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(sizeof(EncodedJSValue) * exit.m_values.size() + requiredScratchMemorySizeInBytes());
+ ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(
+ sizeof(EncodedJSValue) * (
+ exit.m_descriptor->m_values.size() + numMaterializations + maxMaterializationNumArguments) +
+ requiredScratchMemorySizeInBytes() +
+ codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t));
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
- char* registerScratch = bitwise_cast<char*>(scratch + exit.m_values.size());
+ EncodedJSValue* materializationPointers = scratch + exit.m_descriptor->m_values.size();
+ EncodedJSValue* materializationArguments = materializationPointers + numMaterializations;
+ char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments);
+ uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes());
+
+ HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer;
+ unsigned materializationCount = 0;
+ for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
+ materializationToPointer.add(
+ materialization, materializationPointers + materializationCount++);
+ }
+
+ auto recoverValue = [&] (const ExitValue& value) {
+ compileRecovery(
+ jit, value,
+ exit.m_valueReps,
+ registerScratch, materializationToPointer);
+ };
- // Make sure that saveAllRegisters() has a place on top of the stack to spill things. That
- // function expects to be able to use top of stack for scratch memory.
- jit.push(GPRInfo::regT0);
+ // Note that we come in here, the stack used to be as B3 left it except that someone called pushToSave().
+ // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use
+ // that slot for saveAllRegisters().
+
saveAllRegisters(jit, registerScratch);
- // Bring the stack back into a sane form.
- jit.pop(GPRInfo::regT0);
- jit.pop(GPRInfo::regT0);
+ // Bring the stack back into a sane form and assert that it's sane.
+ jit.popToRestore(GPRInfo::regT0);
+ jit.checkStackPointerAlignment();
+ if (vm->m_perBytecodeProfiler && jitCode->dfgCommon()->compilation) {
+ Profiler::Database& database = *vm->m_perBytecodeProfiler;
+ Profiler::Compilation* compilation = jitCode->dfgCommon()->compilation.get();
+
+ Profiler::OSRExit* profilerExit = compilation->addOSRExit(
+ exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
+ exit.m_kind, exit.m_kind == UncountableInvalidation);
+ jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
+ }
+
// The remaining code assumes that SP/FP are in the same state that they were in the FTL's
// call frame.
// Get the call frame and tag thingies.
// Restore the exiting function's callFrame value into a regT4
- record->locations[0].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT4);
jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister);
// Do some value profiling.
- if (exit.m_profileValueFormat != InvalidValueFormat) {
- record->locations[1].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0);
+ if (exit.m_descriptor->m_profileDataFormat != DataFormatNone) {
+ Location::forValueRep(exit.m_valueReps[0]).restoreInto(jit, registerScratch, GPRInfo::regT0);
reboxAccordingToFormat(
- exit.m_profileValueFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
+ exit.m_descriptor->m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
- jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::regT1);
- jit.storePtr(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructure());
- jit.load8(MacroAssembler::Address(GPRInfo::regT1, Structure::indexingTypeOffset()), GPRInfo::regT1);
+ jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1);
+ jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID());
+ jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeAndMiscOffset()), GPRInfo::regT1);
jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2);
jit.lshift32(GPRInfo::regT1, GPRInfo::regT2);
jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
}
}
-
- if (!!exit.m_valueProfile)
- jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0));
+
+ if (exit.m_descriptor->m_valueProfile)
+ exit.m_descriptor->m_valueProfile.emitReportValue(jit, JSValueRegs(GPRInfo::regT0));
}
- // Save all state from wherever the exit data tells us it was, into the appropriate place in
- // the scratch buffer. This doesn't rebox any values yet.
-
- for (unsigned index = exit.m_values.size(); index--;) {
- ExitValue value = exit.m_values[index];
-
- switch (value.kind()) {
- case ExitValueDead:
- jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
- break;
-
- case ExitValueConstant:
- jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0);
- break;
-
- case ExitValueArgument:
- record->locations[value.exitArgument().argument()].restoreInto(
- jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0);
- break;
-
- case ExitValueInJSStack:
- case ExitValueInJSStackAsInt32:
- case ExitValueInJSStackAsInt52:
- case ExitValueInJSStackAsDouble:
- jit.load64(AssemblyHelpers::addressFor(value.virtualRegister(), GPRInfo::regT4), GPRInfo::regT0);
- break;
+ // Materialize all objects. Don't materialize an object until all
+ // of the objects it needs have been materialized. We break cycles
+ // by populating objects late - we only consider an object as
+ // needing another object if the later is needed for the
+ // allocation of the former.
+
+ HashSet<ExitTimeObjectMaterialization*> toMaterialize;
+ for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
+ toMaterialize.add(materialization);
+
+ while (!toMaterialize.isEmpty()) {
+ unsigned previousToMaterializeSize = toMaterialize.size();
+
+ Vector<ExitTimeObjectMaterialization*> worklist;
+ worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
+ for (ExitTimeObjectMaterialization* materialization : worklist) {
+ // Check if we can do anything about this right now.
+ bool allGood = true;
+ for (ExitPropertyValue value : materialization->properties()) {
+ if (!value.value().isObjectMaterialization())
+ continue;
+ if (!value.location().neededForMaterialization())
+ continue;
+ if (toMaterialize.contains(value.value().objectMaterialization())) {
+ // Gotta skip this one, since it needs a
+ // materialization that hasn't been materialized.
+ allGood = false;
+ break;
+ }
+ }
+ if (!allGood)
+ continue;
+
+ // All systems go for materializing the object. First we
+ // recover the values of all of its fields and then we
+ // call a function to actually allocate the beast.
+ // We only recover the fields that are needed for the allocation.
+ for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
+ const ExitPropertyValue& property = materialization->properties()[propertyIndex];
+ if (!property.location().neededForMaterialization())
+ continue;
+
+ recoverValue(property.value());
+ jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
+ }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
+ // This call assumes that we don't pass arguments on the stack.
+ jit.setupArgumentsWithExecState(
+ CCallHelpers::TrustedImmPtr(materialization),
+ CCallHelpers::TrustedImmPtr(materializationArguments));
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
+
+ // Let everyone know that we're done.
+ toMaterialize.remove(materialization);
}
+ // We expect progress! This ensures that we crash rather than looping infinitely if there
+ // is something broken about this fixpoint. Or, this could happen if we ever violate the
+ // "materializations form a DAG" rule.
+ RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
+ }
+
+ // Now that all the objects have been allocated, we populate them
+ // with the correct values. This time we can recover all the
+ // fields, including those that are only needed for the allocation.
+ for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
+ for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
+ recoverValue(materialization->properties()[propertyIndex].value());
+ jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
+ }
+
+ // This call assumes that we don't pass arguments on the stack
+ jit.setupArgumentsWithExecState(
+ CCallHelpers::TrustedImmPtr(materialization),
+ CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
+ CCallHelpers::TrustedImmPtr(materializationArguments));
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ }
+
+ // Save all state from wherever the exit data tells us it was, into the appropriate place in
+ // the scratch buffer. This also does the reboxing.
+
+ for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
+ recoverValue(exit.m_descriptor->m_values[index]);
jit.store64(GPRInfo::regT0, scratch + index);
}
- // Now get state out of the scratch buffer and place it back into the stack. This part does
- // all reboxing.
- for (unsigned index = exit.m_values.size(); index--;) {
- int operand = exit.m_values.operandForIndex(index);
- ExitValue value = exit.m_values[index];
-
- jit.load64(scratch + index, GPRInfo::regT0);
- reboxAccordingToFormat(
- value.valueFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
- jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(static_cast<VirtualRegister>(operand), GPRInfo::regT4));
+ // Henceforth we make it look like the exiting function was called through a register
+ // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then
+ // we restore the various things according to either exit.m_descriptor->m_values or by copying from the
+ // old frame, and finally we save the various callee-save registers into where the
+ // restoration thunk would restore them from.
+
+ // Before we start messing with the frame, we need to set aside any registers that the
+ // FTL code was preserving.
+ for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) {
+ RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i);
+ jit.load64(
+ MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()),
+ GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, unwindScratch + i);
}
- // Restore the old stack pointer and then put regT4 into callFrameRegister. The idea is
- // that the FTL call frame is pushed onto the JS call frame and we can recover the old
- // value of the stack pointer by popping the FTL call frame. We already know what the
- // frame pointer in the JS call frame was because it would have been passed as an argument
- // to the FTL call frame.
- jit.move(MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
- jit.pop(GPRInfo::nonArgGPR0);
- jit.pop(GPRInfo::nonArgGPR0);
- jit.move(GPRInfo::regT4, GPRInfo::callFrameRegister);
+ jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT2);
+
+ // Let's say that the FTL function had failed its arity check. In that case, the stack will
+ // contain some extra stuff.
+ //
+ // We compute the padded stack space:
+ //
+ // paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1)
+ //
+ // The stack will have regT2 + CallFrameHeaderSize stuff.
+ // We want to make the stack look like this, from higher addresses down:
+ //
+ // - argument padding
+ // - actual arguments
+ // - call frame header
+
+ // This code assumes that we're dealing with FunctionCode.
+ RELEASE_ASSERT(codeBlock->codeType() == FunctionCode);
+
+ jit.add32(
+ MacroAssembler::TrustedImm32(-codeBlock->numParameters()), GPRInfo::regT2,
+ GPRInfo::regT3);
+ MacroAssembler::Jump arityIntact = jit.branch32(
+ MacroAssembler::GreaterThanOrEqual, GPRInfo::regT3, MacroAssembler::TrustedImm32(0));
+ jit.neg32(GPRInfo::regT3);
+ jit.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3);
+ jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3);
+ jit.add32(GPRInfo::regT3, GPRInfo::regT2);
+ arityIntact.link(&jit);
+
+ CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
+
+ // First set up SP so that our data doesn't get clobbered by signals.
+ unsigned conservativeStackDelta =
+ (exit.m_descriptor->m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) +
+ maxFrameExtentForSlowPathCall;
+ conservativeStackDelta = WTF::roundUpToMultipleOf(
+ stackAlignmentBytes(), conservativeStackDelta);
+ jit.addPtr(
+ MacroAssembler::TrustedImm32(-conservativeStackDelta),
+ MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
+ jit.checkStackPointerAlignment();
+
+ RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters();
+ RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters();
+ RegisterAtOffsetList* vmCalleeSaves = vm->getAllCalleeSaveRegisterOffsets();
+ RegisterSet vmCalleeSavesToSkip = RegisterSet::stackRegisters();
+ if (exit.isExceptionHandler()) {
+ jit.loadPtr(&vm->topVMEntryFrame, GPRInfo::regT1);
+ jit.addPtr(CCallHelpers::TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), GPRInfo::regT1);
+ }
+
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!allFTLCalleeSaves.get(reg)) {
+ if (exit.isExceptionHandler())
+ RELEASE_ASSERT(!vmCalleeSaves->find(reg));
+ continue;
+ }
+ unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg);
+ RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg);
+ RegisterAtOffset* vmCalleeSave = nullptr;
+ if (exit.isExceptionHandler())
+ vmCalleeSave = vmCalleeSaves->find(reg);
+
+ if (reg.isGPR()) {
+ GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr();
+ RELEASE_ASSERT(regToLoad != GPRInfo::regT1);
+
+ if (unwindIndex == UINT_MAX) {
+ // The FTL compilation didn't preserve this register. This means that it also
+ // didn't use the register. So its value at the beginning of OSR exit should be
+ // preserved by the thunk. Luckily, we saved all registers into the register
+ // scratch buffer, so we can restore them from there.
+ jit.load64(registerScratch + offsetOfReg(reg), regToLoad);
+ } else {
+ // The FTL compilation preserved the register. Its new value is therefore
+ // irrelevant, but we can get the value that was preserved by using the unwind
+ // data. We've already copied all unwind-able preserved registers into the unwind
+ // scratch buffer, so we can get it from there.
+ jit.load64(unwindScratch + unwindIndex, regToLoad);
+ }
+
+ if (baselineRegisterOffset)
+ jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
+ if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
+ jit.store64(regToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+ } else {
+ FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr();
+
+ if (unwindIndex == UINT_MAX)
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad);
+ else
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad);
+
+ if (baselineRegisterOffset)
+ jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
+ if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
+ jit.storeDouble(fpRegToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+ }
+ }
+
+ if (exit.isExceptionHandler()) {
+ RegisterAtOffset* vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagTypeNumberRegister);
+ jit.store64(GPRInfo::tagTypeNumberRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+
+ vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagMaskRegister);
+ jit.store64(GPRInfo::tagMaskRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+ }
+
+ size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters();
+
+ // Now get state out of the scratch buffer and place it back into the stack. The values are
+ // already reboxed so we just move them.
+ for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
+ VirtualRegister reg = exit.m_descriptor->m_values.virtualRegisterForIndex(index);
+
+ if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves))
+ continue;
+
+ jit.load64(scratch + index, GPRInfo::regT0);
+ jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg));
+ }
handleExitCounts(jit, exit);
reifyInlinedCallFrames(jit, exit);
adjustAndJumpToTarget(jit, exit);
- LinkBuffer patchBuffer(*vm, &jit, codeBlock);
+ LinkBuffer patchBuffer(*vm, jit, codeBlock);
exit.m_code = FINALIZE_CODE_IF(
- shouldShowDisassembly(),
+ shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
patchBuffer,
- ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s, and record = %s",
+ ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s",
exitID, toCString(exit.m_codeOrigin).data(),
exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
- toCString(ignoringContext<DumpContext>(exit.m_values)).data(),
- toCString(*record).data()));
+ toCString(ignoringContext<DumpContext>(exit.m_descriptor->m_values)).data())
+ );
}
extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID)
{
- SamplingRegion samplingRegion("FTL OSR Exit Compilation");
+ if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit())
+ dataLog("Compiling OSR exit with exitID = ", exitID, "\n");
+
+ if (exec->vm().callFrameForCatch)
+ RELEASE_ASSERT(exec->vm().callFrameForCatch == exec);
CodeBlock* codeBlock = exec->codeBlock();
@@ -196,12 +554,28 @@ extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID)
JITCode* jitCode = codeBlock->jitCode()->ftl();
OSRExit& exit = jitCode->osrExit[exitID];
+ if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) {
+ dataLog(" Owning block: ", pointerDump(codeBlock), "\n");
+ dataLog(" Origin: ", exit.m_codeOrigin, "\n");
+ if (exit.m_codeOriginForExitProfile != exit.m_codeOrigin)
+ dataLog(" Origin for exit profile: ", exit.m_codeOriginForExitProfile, "\n");
+ dataLog(" Current call site index: ", exec->callSiteIndex().bits(), "\n");
+ dataLog(" Exit is exception handler: ", exit.isExceptionHandler(), "\n");
+ dataLog(" Is unwind handler: ", exit.isGenericUnwindHandler(), "\n");
+ dataLog(" Exit values: ", exit.m_descriptor->m_values, "\n");
+ dataLog(" Value reps: ", listDump(exit.m_valueReps), "\n");
+ if (!exit.m_descriptor->m_materializations.isEmpty()) {
+ dataLog(" Materializations:\n");
+ for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
+ dataLog(" ", pointerDump(materialization), "\n");
+ }
+ }
+
prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
compileStub(exitID, jitCode, exit, vm, codeBlock);
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(
+
+ MacroAssembler::repatchJump(
exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
return exit.m_code.code().executableAddress();
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h
index eba9cb859..405b7a5e1 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLOSRExitCompiler_h
-#define FTLOSRExitCompiler_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -43,6 +40,3 @@ void* JIT_OPERATION compileFTLOSRExit(ExecState*, unsigned exitID) WTF_INTERNAL;
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLOSRExitCompiler_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp
new file mode 100644
index 000000000..11942b766
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLOSRExitHandle.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "FTLOSRExit.h"
+#include "FTLState.h"
+#include "FTLThunks.h"
+#include "LinkBuffer.h"
+#include "ProfilerCompilation.h"
+
+namespace JSC { namespace FTL {
+
+void OSRExitHandle::emitExitThunk(State& state, CCallHelpers& jit)
+{
+ Profiler::Compilation* compilation = state.graph.compilation();
+ CCallHelpers::Label myLabel = jit.label();
+ label = myLabel;
+ jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(index));
+ CCallHelpers::PatchableJump jump = jit.patchableJump();
+ RefPtr<OSRExitHandle> self = this;
+ jit.addLinkTask(
+ [self, jump, myLabel, compilation] (LinkBuffer& linkBuffer) {
+ self->exit.m_patchableJump = CodeLocationJump(linkBuffer.locationOf(jump));
+
+ linkBuffer.link(
+ jump.m_jump,
+ CodeLocationLabel(linkBuffer.vm().getCTIStub(osrExitGenerationThunkGenerator).code()));
+ if (compilation)
+ compilation->addOSRExitSite({ linkBuffer.locationOf(myLabel).executableAddress() });
+ });
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h
new file mode 100644
index 000000000..0763d797c
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DFGCommon.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+#include <wtf/ThreadSafeRefCounted.h>
+
+namespace JSC { namespace FTL {
+
+class State;
+struct OSRExit;
+
+// This is an object that stores some interesting data about an OSR exit. It's expected that you will
+// scrape this data from this object by the time compilation finishes.
+struct OSRExitHandle : public ThreadSafeRefCounted<OSRExitHandle> {
+ OSRExitHandle(unsigned index, OSRExit& exit)
+ : index(index)
+ , exit(exit)
+ {
+ }
+
+ unsigned index;
+ OSRExit& exit;
+
+ // This is the label at which the OSR exit jump lives. This will get populated once the OSR exit
+ // emits its jump. This happens immediately when you call OSRExit::appendOSRExit(). It happens at
+ // some time during late path emission if you do OSRExit::appendOSRExitLater().
+ CCallHelpers::Label label;
+
+ // This emits the exit thunk and populates 'label'.
+ void emitExitThunk(State&, CCallHelpers&);
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLOperations.cpp b/Source/JavaScriptCore/ftl/FTLOperations.cpp
new file mode 100644
index 000000000..1d061480f
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLOperations.cpp
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLOperations.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "ClonedArguments.h"
+#include "DirectArguments.h"
+#include "FTLJITCode.h"
+#include "FTLLazySlowPath.h"
+#include "InlineCallFrame.h"
+#include "JSAsyncFunction.h"
+#include "JSCInlines.h"
+#include "JSFixedArray.h"
+#include "JSGeneratorFunction.h"
+#include "JSLexicalEnvironment.h"
+
+namespace JSC { namespace FTL {
+
+using namespace JSC::DFG;
+
+extern "C" void JIT_OPERATION operationPopulateObjectInOSR(
+ ExecState* exec, ExitTimeObjectMaterialization* materialization,
+ EncodedJSValue* encodedValue, EncodedJSValue* values)
+{
+ VM& vm = exec->vm();
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ // We cannot GC. We've got pointers in evil places.
+ // FIXME: We are not doing anything that can GC here, and this is
+ // probably unnecessary.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ switch (materialization->type()) {
+ case PhantomNewObject: {
+ JSFinalObject* object = jsCast<JSFinalObject*>(JSValue::decode(*encodedValue));
+ Structure* structure = object->structure();
+
+ // Figure out what the heck to populate the object with. Use
+ // getPropertiesConcurrently() because that happens to be
+ // lower-level and more convenient. It doesn't change the
+ // materialization of the property table. We want to have
+ // minimal visible effects on the system. Also, don't mind
+ // that this is O(n^2). It doesn't matter. We only get here
+ // from OSR exit.
+ for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != NamedPropertyPLoc)
+ continue;
+ if (codeBlock->identifier(property.location().info()).impl() != entry.key)
+ continue;
+
+ object->putDirect(vm, entry.offset, JSValue::decode(values[i]));
+ }
+ }
+ break;
+ }
+
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction:
+ case PhantomDirectArguments:
+ case PhantomClonedArguments:
+ case PhantomCreateRest:
+ case PhantomSpread:
+ case PhantomNewArrayWithSpread:
+ // Those are completely handled by operationMaterializeObjectInOSR
+ break;
+
+ case PhantomCreateActivation: {
+ JSLexicalEnvironment* activation = jsCast<JSLexicalEnvironment*>(JSValue::decode(*encodedValue));
+
+ // Figure out what to populate the activation with
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ClosureVarPLoc)
+ continue;
+
+ activation->variableAt(ScopeOffset(property.location().info())).set(exec->vm(), activation, JSValue::decode(values[i]));
+ }
+
+ break;
+ }
+
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ }
+}
+
+extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
+ ExecState* exec, ExitTimeObjectMaterialization* materialization, EncodedJSValue* values)
+{
+ VM& vm = exec->vm();
+
+ // We cannot GC. We've got pointers in evil places.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ switch (materialization->type()) {
+ case PhantomNewObject: {
+ // Figure out what the structure is
+ Structure* structure = nullptr;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location() != PromotedLocationDescriptor(StructurePLoc))
+ continue;
+
+ RELEASE_ASSERT(JSValue::decode(values[i]).asCell()->inherits(vm, Structure::info()));
+ structure = jsCast<Structure*>(JSValue::decode(values[i]));
+ break;
+ }
+ RELEASE_ASSERT(structure);
+
+ JSFinalObject* result = JSFinalObject::create(vm, structure);
+
+ // The real values will be put subsequently by
+ // operationPopulateNewObjectInOSR. We can't fill them in
+ // now, because they may not be available yet (typically
+ // because we have a cyclic dependency graph).
+
+ // We put a dummy value here in order to avoid super-subtle
+ // GC-and-OSR-exit crashes in case we have a bug and some
+ // field is, for any reason, not filled later.
+ // We use a random-ish number instead of a sensible value like
+ // undefined to make possible bugs easier to track.
+ for (PropertyMapEntry entry : structure->getPropertiesConcurrently())
+ result->putDirect(vm, entry.offset, jsNumber(19723));
+
+ return result;
+ }
+
+ case PhantomNewFunction:
+ case PhantomNewGeneratorFunction:
+ case PhantomNewAsyncFunction: {
+ // Figure out what the executable and activation are
+ FunctionExecutable* executable = nullptr;
+ JSScope* activation = nullptr;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location() == PromotedLocationDescriptor(FunctionExecutablePLoc)) {
+ RELEASE_ASSERT(JSValue::decode(values[i]).asCell()->inherits(vm, FunctionExecutable::info()));
+ executable = jsCast<FunctionExecutable*>(JSValue::decode(values[i]));
+ }
+ if (property.location() == PromotedLocationDescriptor(FunctionActivationPLoc)) {
+ RELEASE_ASSERT(JSValue::decode(values[i]).asCell()->inherits(vm, JSScope::info()));
+ activation = jsCast<JSScope*>(JSValue::decode(values[i]));
+ }
+ }
+ RELEASE_ASSERT(executable && activation);
+
+ if (materialization->type() == PhantomNewFunction)
+ return JSFunction::createWithInvalidatedReallocationWatchpoint(vm, executable, activation);
+ else if (materialization->type() == PhantomNewGeneratorFunction)
+ return JSGeneratorFunction::createWithInvalidatedReallocationWatchpoint(vm, executable, activation);
+ ASSERT(materialization->type() == PhantomNewAsyncFunction);
+ return JSAsyncFunction::createWithInvalidatedReallocationWatchpoint(vm, executable, activation);
+ }
+
+ case PhantomCreateActivation: {
+ // Figure out what the scope and symbol table are
+ JSScope* scope = nullptr;
+ SymbolTable* table = nullptr;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location() == PromotedLocationDescriptor(ActivationScopePLoc)) {
+ RELEASE_ASSERT(JSValue::decode(values[i]).asCell()->inherits(vm, JSScope::info()));
+ scope = jsCast<JSScope*>(JSValue::decode(values[i]));
+ } else if (property.location() == PromotedLocationDescriptor(ActivationSymbolTablePLoc)) {
+ RELEASE_ASSERT(JSValue::decode(values[i]).asCell()->inherits(vm, SymbolTable::info()));
+ table = jsCast<SymbolTable*>(JSValue::decode(values[i]));
+ }
+ }
+ RELEASE_ASSERT(scope);
+ RELEASE_ASSERT(table);
+
+ CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(
+ materialization->origin(), exec->codeBlock());
+ Structure* structure = codeBlock->globalObject()->activationStructure();
+
+ // It doesn't matter what values we initialize as bottom values inside the activation constructor because
+ // activation sinking will set bottom values for each slot.
+ // FIXME: Slight optimization would be to create a constructor that doesn't initialize all slots.
+ JSLexicalEnvironment* result = JSLexicalEnvironment::create(vm, structure, scope, table, jsUndefined());
+
+ RELEASE_ASSERT(materialization->properties().size() - 2 == table->scopeSize());
+
+ // The real values will be put subsequently by
+ // operationPopulateNewObjectInOSR. See the PhantomNewObject
+ // case for details.
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ClosureVarPLoc)
+ continue;
+
+ result->variableAt(ScopeOffset(property.location().info())).set(
+ exec->vm(), result, jsNumber(29834));
+ }
+
+ if (validationEnabled()) {
+ // Validate to make sure every slot in the scope has one value.
+ ConcurrentJSLocker locker(table->m_lock);
+ for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
+ bool found = false;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ClosureVarPLoc)
+ continue;
+ if (ScopeOffset(property.location().info()) == iter->value.scopeOffset()) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(found, found);
+ }
+ unsigned numberOfClosureVarPloc = 0;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() == ClosureVarPLoc)
+ numberOfClosureVarPloc++;
+ }
+ ASSERT(numberOfClosureVarPloc == table->scopeSize());
+ }
+
+ return result;
+ }
+
+ case PhantomCreateRest:
+ case PhantomDirectArguments:
+ case PhantomClonedArguments: {
+ if (!materialization->origin().inlineCallFrame) {
+ switch (materialization->type()) {
+ case PhantomDirectArguments:
+ return DirectArguments::createByCopying(exec);
+ case PhantomClonedArguments:
+ return ClonedArguments::createWithMachineFrame(exec, exec, ArgumentsMode::Cloned);
+ case PhantomCreateRest: {
+ CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(
+ materialization->origin(), exec->codeBlock());
+
+ unsigned numberOfArgumentsToSkip = codeBlock->numberOfArgumentsToSkip();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Structure* structure = globalObject->restParameterStructure();
+ JSValue* argumentsToCopyRegion = exec->addressOfArgumentsStart() + numberOfArgumentsToSkip;
+ unsigned arraySize = exec->argumentCount() > numberOfArgumentsToSkip ? exec->argumentCount() - numberOfArgumentsToSkip : 0;
+ return constructArray(exec, structure, argumentsToCopyRegion, arraySize);
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+ }
+
+ // First figure out the argument count. If there isn't one then we represent the machine frame.
+ unsigned argumentCount = 0;
+ if (materialization->origin().inlineCallFrame->isVarargs()) {
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location() != PromotedLocationDescriptor(ArgumentCountPLoc))
+ continue;
+ argumentCount = JSValue::decode(values[i]).asUInt32();
+ break;
+ }
+ } else
+ argumentCount = materialization->origin().inlineCallFrame->arguments.size();
+ RELEASE_ASSERT(argumentCount);
+
+ JSFunction* callee = nullptr;
+ if (materialization->origin().inlineCallFrame->isClosureCall) {
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location() != PromotedLocationDescriptor(ArgumentsCalleePLoc))
+ continue;
+
+ callee = jsCast<JSFunction*>(JSValue::decode(values[i]));
+ break;
+ }
+ } else
+ callee = materialization->origin().inlineCallFrame->calleeConstant();
+ RELEASE_ASSERT(callee);
+
+ CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(
+ materialization->origin(), exec->codeBlock());
+
+ // We have an inline frame and we have all of the data we need to recreate it.
+ switch (materialization->type()) {
+ case PhantomDirectArguments: {
+ unsigned length = argumentCount - 1;
+ unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
+ DirectArguments* result = DirectArguments::create(
+ vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
+ result->callee().set(vm, result, callee);
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ArgumentPLoc)
+ continue;
+
+ unsigned index = property.location().info();
+ if (index >= capacity)
+ continue;
+
+ // We don't want to use setIndexQuickly(), since that's only for the passed-in
+ // arguments but sometimes the number of named arguments is greater. For
+ // example:
+ //
+ // function foo(a, b, c) { ... }
+ // foo();
+ //
+ // setIndexQuickly() would fail for indices 0, 1, 2 - but we need to recover
+ // those here.
+ result->argument(DirectArgumentsOffset(index)).set(
+ vm, result, JSValue::decode(values[i]));
+ }
+ return result;
+ }
+ case PhantomClonedArguments: {
+ unsigned length = argumentCount - 1;
+ ClonedArguments* result = ClonedArguments::createEmpty(
+ vm, codeBlock->globalObject()->clonedArgumentsStructure(), callee, length);
+
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ArgumentPLoc)
+ continue;
+
+ unsigned index = property.location().info();
+ if (index >= length)
+ continue;
+ result->initializeIndex(vm, index, JSValue::decode(values[i]));
+ }
+
+ return result;
+ }
+ case PhantomCreateRest: {
+ unsigned numberOfArgumentsToSkip = codeBlock->numberOfArgumentsToSkip();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Structure* structure = globalObject->restParameterStructure();
+ ASSERT(argumentCount > 0);
+ unsigned arraySize = (argumentCount - 1) > numberOfArgumentsToSkip ? argumentCount - 1 - numberOfArgumentsToSkip : 0;
+
+ // FIXME: we should throw an out of memory error here if tryCreateForInitializationPrivate() fails.
+ // https://bugs.webkit.org/show_bug.cgi?id=169784
+ JSArray* array = JSArray::tryCreateForInitializationPrivate(vm, structure, arraySize);
+ RELEASE_ASSERT(array);
+
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ArgumentPLoc)
+ continue;
+
+ unsigned argIndex = property.location().info();
+ if (numberOfArgumentsToSkip > argIndex)
+ continue;
+ unsigned arrayIndex = argIndex - numberOfArgumentsToSkip;
+ if (arrayIndex >= arraySize)
+ continue;
+ array->initializeIndex(vm, arrayIndex, JSValue::decode(values[i]));
+ }
+
+#if !ASSERT_DISABLED
+ // We avoid this O(n^2) loop when asserts are disabled, but the condition checked here
+ // must hold to ensure the correctness of the above loop because of how we allocate the array.
+ for (unsigned targetIndex = 0; targetIndex < arraySize; ++targetIndex) {
+ bool found = false;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() != ArgumentPLoc)
+ continue;
+
+ unsigned argIndex = property.location().info();
+ if (numberOfArgumentsToSkip > argIndex)
+ continue;
+ unsigned arrayIndex = argIndex - numberOfArgumentsToSkip;
+ if (arrayIndex >= arraySize)
+ continue;
+ if (arrayIndex == targetIndex) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+#endif
+ return array;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+ }
+
+ case PhantomSpread: {
+ JSArray* array = nullptr;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() == SpreadPLoc) {
+ array = jsCast<JSArray*>(JSValue::decode(values[i]));
+ break;
+ }
+ }
+ RELEASE_ASSERT(array);
+
+ // Note: it is sound for JSFixedArray::createFromArray to call getDirectIndex here
+ // because we're guaranteed we won't be calling any getters. The reason for this is
+ // that we only support PhantomSpread over CreateRest, which is an array we create.
+ // Any attempts to put a getter on any indices on the rest array will escape the array.
+ JSFixedArray* fixedArray = JSFixedArray::createFromArray(exec, vm, array);
+ return fixedArray;
+ }
+
+ case PhantomNewArrayWithSpread: {
+ CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(
+ materialization->origin(), exec->codeBlock());
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous);
+
+ Checked<unsigned, RecordOverflow> checkedArraySize = 0;
+ unsigned numProperties = 0;
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() == NewArrayWithSpreadArgumentPLoc) {
+ ++numProperties;
+ JSValue value = JSValue::decode(values[i]);
+ if (JSFixedArray* fixedArray = jsDynamicCast<JSFixedArray*>(vm, value))
+ checkedArraySize += fixedArray->size();
+ else
+ checkedArraySize += 1;
+ }
+ }
+
+ // FIXME: we should throw an out of memory error here if checkedArraySize has hasOverflowed() or tryCreateForInitializationPrivate() fails.
+ // https://bugs.webkit.org/show_bug.cgi?id=169784
+ unsigned arraySize = checkedArraySize.unsafeGet(); // Crashes if overflowed.
+ JSArray* result = JSArray::tryCreateForInitializationPrivate(vm, structure, arraySize);
+ RELEASE_ASSERT(result);
+
+#if !ASSERT_DISABLED
+ // Ensure we see indices for everything in the range: [0, numProperties)
+ for (unsigned i = 0; i < numProperties; ++i) {
+ bool found = false;
+ for (unsigned j = 0; j < materialization->properties().size(); ++j) {
+ const ExitPropertyValue& property = materialization->properties()[j];
+ if (property.location().kind() == NewArrayWithSpreadArgumentPLoc && property.location().info() == i) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+#endif
+
+ Vector<JSValue, 8> arguments;
+ arguments.grow(numProperties);
+
+ for (unsigned i = materialization->properties().size(); i--;) {
+ const ExitPropertyValue& property = materialization->properties()[i];
+ if (property.location().kind() == NewArrayWithSpreadArgumentPLoc) {
+ JSValue value = JSValue::decode(values[i]);
+ RELEASE_ASSERT(property.location().info() < numProperties);
+ arguments[property.location().info()] = value;
+ }
+ }
+
+ unsigned arrayIndex = 0;
+ for (JSValue value : arguments) {
+ if (JSFixedArray* fixedArray = jsDynamicCast<JSFixedArray*>(vm, value)) {
+ for (unsigned i = 0; i < fixedArray->size(); i++) {
+ ASSERT(fixedArray->get(i));
+ result->initializeIndex(vm, arrayIndex, fixedArray->get(i));
+ ++arrayIndex;
+ }
+ } else {
+ // We are not spreading.
+ result->initializeIndex(vm, arrayIndex, value);
+ ++arrayIndex;
+ }
+ }
+
+ return result;
+ }
+
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+}
+
+extern "C" void* JIT_OPERATION compileFTLLazySlowPath(ExecState* exec, unsigned index)
+{
+ VM& vm = exec->vm();
+
+ // We cannot GC. We've got pointers in evil places.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JITCode* jitCode = codeBlock->jitCode()->ftl();
+
+ LazySlowPath& lazySlowPath = *jitCode->lazySlowPaths[index];
+ lazySlowPath.generate(codeBlock);
+
+ return lazySlowPath.stub().code().executableAddress();
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLOperations.h b/Source/JavaScriptCore/ftl/FTLOperations.h
new file mode 100644
index 000000000..031840100
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLOperations.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGOperations.h"
+#include "FTLExitTimeObjectMaterialization.h"
+
+namespace JSC { namespace FTL {
+
+class LazySlowPath;
+
+extern "C" {
+
+JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
+ ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*) WTF_INTERNAL;
+
+void JIT_OPERATION operationPopulateObjectInOSR(
+ ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*, EncodedJSValue*) WTF_INTERNAL;
+
+void* JIT_OPERATION compileFTLLazySlowPath(ExecState*, unsigned) WTF_INTERNAL;
+
+} // extern "C"
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLOutput.cpp b/Source/JavaScriptCore/ftl/FTLOutput.cpp
index fb2fc93b1..bd65e64e9 100644
--- a/Source/JavaScriptCore/ftl/FTLOutput.cpp
+++ b/Source/JavaScriptCore/ftl/FTLOutput.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,21 +28,803 @@
#if ENABLE(FTL_JIT)
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Const32Value.h"
+#include "B3ConstPtrValue.h"
+#include "B3FenceValue.h"
+#include "B3MathExtras.h"
+#include "B3MemoryValue.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "SuperSampler.h"
+
namespace JSC { namespace FTL {
-Output::Output(LContext context)
- : IntrinsicRepository(context)
- , m_function(0)
- , m_heaps(0)
- , m_builder(llvm->CreateBuilderInContext(m_context))
- , m_block(0)
- , m_nextBlock(0)
+using namespace B3;
+
+Output::Output(State& state)
+ : m_proc(*state.proc)
{
}
Output::~Output()
{
- llvm->DisposeBuilder(m_builder);
+}
+
+void Output::initialize(AbstractHeapRepository& heaps)
+{
+ m_heaps = &heaps;
+}
+
+LBasicBlock Output::newBlock()
+{
+ LBasicBlock result = m_proc.addBlock(m_frequency);
+
+ if (!m_nextBlock)
+ m_blockOrder.append(result);
+ else
+ m_blockOrder.insertBefore(m_nextBlock, result);
+
+ return result;
+}
+
+void Output::applyBlockOrder()
+{
+ m_proc.setBlockOrder(m_blockOrder);
+}
+
+LBasicBlock Output::appendTo(LBasicBlock block, LBasicBlock nextBlock)
+{
+ appendTo(block);
+ return insertNewBlocksBefore(nextBlock);
+}
+
+void Output::appendTo(LBasicBlock block)
+{
+ m_block = block;
+}
+
+LValue Output::framePointer()
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::FramePointer, origin());
+}
+
+SlotBaseValue* Output::lockedStackSlot(size_t bytes)
+{
+ return m_block->appendNew<SlotBaseValue>(m_proc, origin(), m_proc.addStackSlot(bytes));
+}
+
+LValue Output::constBool(bool value)
+{
+ if (value)
+ return booleanTrue;
+ return booleanFalse;
+}
+
+LValue Output::constInt32(int32_t value)
+{
+ return m_block->appendNew<B3::Const32Value>(m_proc, origin(), value);
+}
+
+LValue Output::constInt64(int64_t value)
+{
+ return m_block->appendNew<B3::Const64Value>(m_proc, origin(), value);
+}
+
+LValue Output::constDouble(double value)
+{
+ return m_block->appendNew<B3::ConstDoubleValue>(m_proc, origin(), value);
+}
+
+LValue Output::phi(LType type)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Phi, type, origin());
+}
+
+LValue Output::add(LValue left, LValue right)
+{
+ if (Value* result = left->addConstant(m_proc, right)) {
+ m_block->append(result);
+ return result;
+ }
+ return m_block->appendNew<B3::Value>(m_proc, B3::Add, origin(), left, right);
+}
+
+LValue Output::sub(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Sub, origin(), left, right);
+}
+
+LValue Output::mul(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Mul, origin(), left, right);
+}
+
+LValue Output::div(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Div, origin(), left, right);
+}
+
+LValue Output::chillDiv(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, chill(B3::Div), origin(), left, right);
+}
+
+LValue Output::mod(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Mod, origin(), left, right);
+}
+
+LValue Output::chillMod(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, chill(B3::Mod), origin(), left, right);
+}
+
+LValue Output::neg(LValue value)
+{
+ return m_block->appendNew<Value>(m_proc, B3::Neg, origin(), value);
+}
+
+LValue Output::doubleAdd(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Add, origin(), left, right);
+}
+
+LValue Output::doubleSub(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Sub, origin(), left, right);
+}
+
+LValue Output::doubleMul(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Mul, origin(), left, right);
+}
+
+LValue Output::doubleDiv(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Div, origin(), left, right);
+}
+
+LValue Output::doubleMod(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Mod, origin(), left, right);
+}
+
+LValue Output::bitAnd(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::BitAnd, origin(), left, right);
+}
+
+LValue Output::bitOr(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::BitOr, origin(), left, right);
+}
+
+LValue Output::bitXor(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::BitXor, origin(), left, right);
+}
+
+LValue Output::shl(LValue left, LValue right)
+{
+ right = castToInt32(right);
+ if (Value* result = left->shlConstant(m_proc, right)) {
+ m_block->append(result);
+ return result;
+ }
+ return m_block->appendNew<B3::Value>(m_proc, B3::Shl, origin(), left, right);
+}
+
+LValue Output::aShr(LValue left, LValue right)
+{
+ right = castToInt32(right);
+ if (Value* result = left->sShrConstant(m_proc, right)) {
+ m_block->append(result);
+ return result;
+ }
+ return m_block->appendNew<B3::Value>(m_proc, B3::SShr, origin(), left, right);
+}
+
+LValue Output::lShr(LValue left, LValue right)
+{
+ right = castToInt32(right);
+ if (Value* result = left->zShrConstant(m_proc, right)) {
+ m_block->append(result);
+ return result;
+ }
+ return m_block->appendNew<B3::Value>(m_proc, B3::ZShr, origin(), left, right);
+}
+
+LValue Output::bitNot(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::BitXor, origin(),
+ value,
+ m_block->appendIntConstant(m_proc, origin(), value->type(), -1));
+}
+
+LValue Output::logicalNot(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int32Zero);
+}
+
+LValue Output::ctlz32(LValue operand)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Clz, origin(), operand);
+}
+
+LValue Output::doubleAbs(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Abs, origin(), value);
+}
+
+LValue Output::doubleCeil(LValue operand)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Ceil, origin(), operand);
+}
+
+LValue Output::doubleFloor(LValue operand)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Floor, origin(), operand);
+}
+
+LValue Output::doubleTrunc(LValue value)
+{
+ if (MacroAssembler::supportsFloatingPointRounding()) {
+ PatchpointValue* result = patchpoint(Double);
+ result->append(value, ValueRep::SomeRegister);
+ result->setGenerator(
+ [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ jit.roundTowardZeroDouble(params[1].fpr(), params[0].fpr());
+ });
+ result->effects = Effects::none();
+ return result;
+ }
+ double (*truncDouble)(double) = trunc;
+ return callWithoutSideEffects(Double, truncDouble, value);
+}
+
+LValue Output::doubleSin(LValue value)
+{
+ double (*sinDouble)(double) = sin;
+ return callWithoutSideEffects(B3::Double, sinDouble, value);
+}
+
+LValue Output::doubleCos(LValue value)
+{
+ double (*cosDouble)(double) = cos;
+ return callWithoutSideEffects(B3::Double, cosDouble, value);
+}
+
+LValue Output::doubleTan(LValue value)
+{
+ double (*tanDouble)(double) = tan;
+ return callWithoutSideEffects(B3::Double, tanDouble, value);
+}
+
+LValue Output::doublePow(LValue xOperand, LValue yOperand)
+{
+ double (*powDouble)(double, double) = pow;
+ return callWithoutSideEffects(B3::Double, powDouble, xOperand, yOperand);
+}
+
+LValue Output::doublePowi(LValue x, LValue y)
+{
+ // FIXME: powDoubleInt32() should be inlined here since Output knows about block layout and
+ // should be involved in any operation that creates blocks.
+ // https://bugs.webkit.org/show_bug.cgi?id=152223
+ auto result = powDoubleInt32(m_proc, m_block, origin(), x, y);
+ m_block = result.first;
+ return result.second;
+}
+
+LValue Output::doubleSqrt(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Sqrt, origin(), value);
+}
+
+LValue Output::doubleLog(LValue value)
+{
+ double (*logDouble)(double) = log;
+ return callWithoutSideEffects(B3::Double, logDouble, value);
+}
+
+LValue Output::doubleToInt(LValue value)
+{
+ PatchpointValue* result = patchpoint(Int32);
+ result->append(value, ValueRep::SomeRegister);
+ result->setGenerator(
+ [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ jit.truncateDoubleToInt32(params[1].fpr(), params[0].gpr());
+ });
+ result->effects = Effects::none();
+ return result;
+}
+
+LValue Output::doubleToUInt(LValue value)
+{
+ PatchpointValue* result = patchpoint(Int32);
+ result->append(value, ValueRep::SomeRegister);
+ result->setGenerator(
+ [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ jit.truncateDoubleToUint32(params[1].fpr(), params[0].gpr());
+ });
+ result->effects = Effects::none();
+ return result;
+}
+
+LValue Output::signExt32To64(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::SExt32, origin(), value);
+}
+
+LValue Output::signExt32ToPtr(LValue value)
+{
+ return signExt32To64(value);
+}
+
+LValue Output::zeroExt(LValue value, LType type)
+{
+ if (value->type() == type)
+ return value;
+ if (value->hasInt32())
+ return m_block->appendIntConstant(m_proc, origin(), Int64, static_cast<uint64_t>(static_cast<uint32_t>(value->asInt32())));
+ return m_block->appendNew<B3::Value>(m_proc, B3::ZExt32, origin(), value);
+}
+
+LValue Output::intToDouble(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::IToD, origin(), value);
+}
+
+LValue Output::unsignedToDouble(LValue value)
+{
+ return intToDouble(zeroExt(value, Int64));
+}
+
+LValue Output::castToInt32(LValue value)
+{
+ if (value->type() == Int32)
+ return value;
+ if (value->hasInt64())
+ return constInt32(static_cast<int32_t>(value->asInt64()));
+ return m_block->appendNew<B3::Value>(m_proc, B3::Trunc, origin(), value);
+}
+
+LValue Output::doubleToFloat(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::DoubleToFloat, origin(), value);
+}
+
+LValue Output::floatToDouble(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::FloatToDouble, origin(), value);
+}
+
+LValue Output::load(TypedPointer pointer, LType type)
+{
+ LValue load = m_block->appendNew<MemoryValue>(m_proc, Load, type, origin(), pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), load);
+ return load;
+}
+
+LValue Output::load8SignExt32(TypedPointer pointer)
+{
+ LValue load = m_block->appendNew<MemoryValue>(m_proc, Load8S, Int32, origin(), pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), load);
+ return load;
+}
+
+LValue Output::load8ZeroExt32(TypedPointer pointer)
+{
+ LValue load = m_block->appendNew<MemoryValue>(m_proc, Load8Z, Int32, origin(), pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), load);
+ return load;
+}
+
+LValue Output::load16SignExt32(TypedPointer pointer)
+{
+ LValue load = m_block->appendNew<MemoryValue>(m_proc, Load16S, Int32, origin(), pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), load);
+ return load;
+}
+
+LValue Output::load16ZeroExt32(TypedPointer pointer)
+{
+ LValue load = m_block->appendNew<MemoryValue>(m_proc, Load16Z, Int32, origin(), pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), load);
+ return load;
+}
+
+void Output::store(LValue value, TypedPointer pointer)
+{
+ LValue store = m_block->appendNew<MemoryValue>(m_proc, Store, origin(), value, pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), store);
+}
+
+FenceValue* Output::fence(const AbstractHeap* read, const AbstractHeap* write)
+{
+ FenceValue* result = m_block->appendNew<FenceValue>(m_proc, origin());
+ m_heaps->decorateFenceRead(read, result);
+ m_heaps->decorateFenceWrite(write, result);
+ return result;
+}
+
+void Output::store32As8(LValue value, TypedPointer pointer)
+{
+ LValue store = m_block->appendNew<MemoryValue>(m_proc, Store8, origin(), value, pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), store);
+}
+
+void Output::store32As16(LValue value, TypedPointer pointer)
+{
+ LValue store = m_block->appendNew<MemoryValue>(m_proc, Store16, origin(), value, pointer.value());
+ m_heaps->decorateMemory(pointer.heap(), store);
+}
+
+LValue Output::baseIndex(LValue base, LValue index, Scale scale, ptrdiff_t offset)
+{
+ LValue accumulatedOffset;
+
+ switch (scale) {
+ case ScaleOne:
+ accumulatedOffset = index;
+ break;
+ case ScaleTwo:
+ accumulatedOffset = shl(index, intPtrOne);
+ break;
+ case ScaleFour:
+ accumulatedOffset = shl(index, intPtrTwo);
+ break;
+ case ScaleEight:
+ case ScalePtr:
+ accumulatedOffset = shl(index, intPtrThree);
+ break;
+ }
+
+ if (offset)
+ accumulatedOffset = add(accumulatedOffset, constIntPtr(offset));
+
+ return add(base, accumulatedOffset);
+}
+
+LValue Output::equal(LValue left, LValue right)
+{
+ TriState result = left->equalConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), left, right);
+}
+
+LValue Output::notEqual(LValue left, LValue right)
+{
+ TriState result = left->notEqualConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), left, right);
+}
+
+LValue Output::above(LValue left, LValue right)
+{
+ TriState result = left->aboveConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::Above, origin(), left, right);
+}
+
+LValue Output::aboveOrEqual(LValue left, LValue right)
+{
+ TriState result = left->aboveEqualConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::AboveEqual, origin(), left, right);
+}
+
+LValue Output::below(LValue left, LValue right)
+{
+ TriState result = left->belowConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::Below, origin(), left, right);
+}
+
+LValue Output::belowOrEqual(LValue left, LValue right)
+{
+ TriState result = left->belowEqualConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::BelowEqual, origin(), left, right);
+}
+
+LValue Output::greaterThan(LValue left, LValue right)
+{
+ TriState result = left->greaterThanConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::GreaterThan, origin(), left, right);
+}
+
+LValue Output::greaterThanOrEqual(LValue left, LValue right)
+{
+ TriState result = left->greaterEqualConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::GreaterEqual, origin(), left, right);
+}
+
+LValue Output::lessThan(LValue left, LValue right)
+{
+ TriState result = left->lessThanConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::LessThan, origin(), left, right);
+}
+
+LValue Output::lessThanOrEqual(LValue left, LValue right)
+{
+ TriState result = left->lessEqualConstant(right);
+ if (result != MixedTriState)
+ return constBool(result == TrueTriState);
+ return m_block->appendNew<B3::Value>(m_proc, B3::LessEqual, origin(), left, right);
+}
+
+LValue Output::doubleEqual(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), left, right);
+}
+
+LValue Output::doubleEqualOrUnordered(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::EqualOrUnordered, origin(), left, right);
+}
+
+LValue Output::doubleNotEqualOrUnordered(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), left, right);
+}
+
+LValue Output::doubleLessThan(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::LessThan, origin(), left, right);
+}
+
+LValue Output::doubleLessThanOrEqual(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::LessEqual, origin(), left, right);
+}
+
+LValue Output::doubleGreaterThan(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::GreaterThan, origin(), left, right);
+}
+
+LValue Output::doubleGreaterThanOrEqual(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::GreaterEqual, origin(), left, right);
+}
+
+LValue Output::doubleNotEqualAndOrdered(LValue left, LValue right)
+{
+ return logicalNot(doubleEqualOrUnordered(left, right));
+}
+
+LValue Output::doubleLessThanOrUnordered(LValue left, LValue right)
+{
+ return logicalNot(doubleGreaterThanOrEqual(left, right));
+}
+
+LValue Output::doubleLessThanOrEqualOrUnordered(LValue left, LValue right)
+{
+ return logicalNot(doubleGreaterThan(left, right));
+}
+
+LValue Output::doubleGreaterThanOrUnordered(LValue left, LValue right)
+{
+ return logicalNot(doubleLessThanOrEqual(left, right));
+}
+
+LValue Output::doubleGreaterThanOrEqualOrUnordered(LValue left, LValue right)
+{
+ return logicalNot(doubleLessThan(left, right));
+}
+
+LValue Output::isZero32(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int32Zero);
+}
+
+LValue Output::notZero32(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), value, int32Zero);
+}
+
+LValue Output::isZero64(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int64Zero);
+}
+
+LValue Output::notZero64(LValue value)
+{
+ return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), value, int64Zero);
+}
+
+LValue Output::select(LValue value, LValue taken, LValue notTaken)
+{
+ if (value->hasInt32()) {
+ if (value->asInt32())
+ return taken;
+ else
+ return notTaken;
+ }
+ return m_block->appendNew<B3::Value>(m_proc, B3::Select, origin(), value, taken, notTaken);
+}
+
+void Output::jump(LBasicBlock destination)
+{
+ m_block->appendNewControlValue(m_proc, B3::Jump, origin(), B3::FrequentedBlock(destination));
+}
+
+void Output::branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight)
+{
+ m_block->appendNewControlValue(
+ m_proc, B3::Branch, origin(), condition,
+ FrequentedBlock(taken, takenWeight.frequencyClass()),
+ FrequentedBlock(notTaken, notTakenWeight.frequencyClass()));
+}
+
+void Output::check(LValue condition, WeightedTarget taken, Weight notTakenWeight)
+{
+ LBasicBlock continuation = newBlock();
+ branch(condition, taken, WeightedTarget(continuation, notTakenWeight));
+ appendTo(continuation);
+}
+
+void Output::check(LValue condition, WeightedTarget taken)
+{
+ check(condition, taken, taken.weight().inverse());
+}
+
+void Output::ret(LValue value)
+{
+ m_block->appendNewControlValue(m_proc, B3::Return, origin(), value);
+}
+
+void Output::unreachable()
+{
+ m_block->appendNewControlValue(m_proc, B3::Oops, origin());
+}
+
+void Output::appendSuccessor(WeightedTarget target)
+{
+ m_block->appendSuccessor(target.frequentedBlock());
+}
+
+CheckValue* Output::speculate(LValue value)
+{
+ return m_block->appendNew<B3::CheckValue>(m_proc, B3::Check, origin(), value);
+}
+
+CheckValue* Output::speculateAdd(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckAdd, origin(), left, right);
+}
+
+CheckValue* Output::speculateSub(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckSub, origin(), left, right);
+}
+
+CheckValue* Output::speculateMul(LValue left, LValue right)
+{
+ return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckMul, origin(), left, right);
+}
+
+PatchpointValue* Output::patchpoint(LType type)
+{
+ return m_block->appendNew<B3::PatchpointValue>(m_proc, type, origin());
+}
+
+void Output::trap()
+{
+ m_block->appendNewControlValue(m_proc, B3::Oops, origin());
+}
+
+ValueFromBlock Output::anchor(LValue value)
+{
+ B3::UpsilonValue* upsilon = m_block->appendNew<B3::UpsilonValue>(m_proc, origin(), value);
+ return ValueFromBlock(upsilon, m_block);
+}
+
+LValue Output::bitCast(LValue value, LType type)
+{
+ ASSERT_UNUSED(type, type == Int64 || type == Double);
+ return m_block->appendNew<B3::Value>(m_proc, B3::BitwiseCast, origin(), value);
+}
+
+LValue Output::fround(LValue doubleValue)
+{
+ return floatToDouble(doubleToFloat(doubleValue));
+}
+
+LValue Output::load(TypedPointer pointer, LoadType type)
+{
+ switch (type) {
+ case Load8SignExt32:
+ return load8SignExt32(pointer);
+ case Load8ZeroExt32:
+ return load8ZeroExt32(pointer);
+ case Load16SignExt32:
+ return load8SignExt32(pointer);
+ case Load16ZeroExt32:
+ return load8ZeroExt32(pointer);
+ case Load32:
+ return load32(pointer);
+ case Load64:
+ return load64(pointer);
+ case LoadPtr:
+ return loadPtr(pointer);
+ case LoadFloat:
+ return loadFloat(pointer);
+ case LoadDouble:
+ return loadDouble(pointer);
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+}
+
+void Output::store(LValue value, TypedPointer pointer, StoreType type)
+{
+ switch (type) {
+ case Store32As8:
+ store32As8(value, pointer);
+ return;
+ case Store32As16:
+ store32As16(value, pointer);
+ return;
+ case Store32:
+ store32(value, pointer);
+ return;
+ case Store64:
+ store64(value, pointer);
+ return;
+ case StorePtr:
+ storePtr(value, pointer);
+ return;
+ case StoreFloat:
+ storeFloat(value, pointer);
+ return;
+ case StoreDouble:
+ storeDouble(value, pointer);
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+TypedPointer Output::absolute(const void* address)
+{
+ return TypedPointer(m_heaps->absolute[address], constIntPtr(address));
+}
+
+void Output::incrementSuperSamplerCount()
+{
+ TypedPointer counter = absolute(bitwise_cast<void*>(&g_superSamplerCount));
+ store32(add(load32(counter), int32One), counter);
+}
+
+void Output::decrementSuperSamplerCount()
+{
+ TypedPointer counter = absolute(bitwise_cast<void*>(&g_superSamplerCount));
+ store32(sub(load32(counter), int32One), counter);
+}
+
+void Output::addIncomingToPhi(LValue phi, ValueFromBlock value)
+{
+ if (value)
+ value.value()->as<B3::UpsilonValue>()->setPhi(phi);
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLOutput.h b/Source/JavaScriptCore/ftl/FTLOutput.h
index 2ec873503..91e548c78 100644
--- a/Source/JavaScriptCore/ftl/FTLOutput.h
+++ b/Source/JavaScriptCore/ftl/FTLOutput.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,228 +20,260 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLOutput_h
-#define FTLOutput_h
+#pragma once
-#include <wtf/Platform.h>
+#include "DFGCommon.h"
#if ENABLE(FTL_JIT)
-#include "DFGCommon.h"
-#include "FTLAbbreviations.h"
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Compilation.h"
+#include "B3FrequentedBlock.h"
+#include "B3Procedure.h"
+#include "B3SwitchValue.h"
+#include "FTLAbbreviatedTypes.h"
#include "FTLAbstractHeapRepository.h"
#include "FTLCommonValues.h"
-#include "FTLIntrinsicRepository.h"
+#include "FTLState.h"
+#include "FTLSwitchCase.h"
#include "FTLTypedPointer.h"
+#include "FTLValueFromBlock.h"
+#include "FTLWeight.h"
+#include "FTLWeightedTarget.h"
+#include "HeapCell.h"
+#include <wtf/OrderMaker.h>
#include <wtf/StringPrintStream.h>
-namespace JSC { namespace FTL {
-
-// Idiomatic LLVM IR builder specifically designed for FTL. This uses our own lowering
-// terminology, and has some of its own notions:
-//
-// We say that a "reference" is what LLVM considers to be a "pointer". That is, it has
-// an element type and can be passed directly to memory access instructions. Note that
-// broadly speaking the users of FTL::Output should only use references for alloca'd
-// slots for mutable local variables.
-//
-// We say that a "pointer" is what LLVM considers to be a pointer-width integer.
-//
-// We say that a "typed pointer" is a pointer that carries TBAA meta-data (i.e. an
-// AbstractHeap). These should usually not have further computation performed on them
-// prior to access, though there are exceptions (like offsetting into the payload of
-// a typed pointer to a JSValue).
-//
-// We say that "get" and "set" are what LLVM considers to be "load" and "store". Get
-// and set take references.
-//
-// We say that "load" and "store" are operations that take a typed pointer. These
-// operations translate the pointer into a reference (or, a pointer in LLVM-speak),
-// emit get or set on the reference (or, load and store in LLVM-speak), and apply the
-// TBAA meta-data to the get or set.
+// FIXME: remove this once everything can be generated through B3.
+#if COMPILER(GCC_OR_CLANG)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif // COMPILER(GCC_OR_CLANG)
+
+namespace JSC {
+
+namespace DFG {
+struct Node;
+} // namespace DFG
+
+namespace B3 {
+class FenceValue;
+class SlotBaseValue;
+} // namespace B3
+
+namespace FTL {
enum Scale { ScaleOne, ScaleTwo, ScaleFour, ScaleEight, ScalePtr };
-class Output : public IntrinsicRepository {
+class Output : public CommonValues {
public:
- Output(LContext);
+ Output(State&);
~Output();
-
- void initialize(LModule module, LValue function, AbstractHeapRepository& heaps)
+
+ void initialize(AbstractHeapRepository&);
+
+ void setFrequency(double value)
{
- IntrinsicRepository::initialize(module);
- m_function = function;
- m_heaps = &heaps;
+ m_frequency = value;
}
-
+
+ LBasicBlock newBlock();
+
LBasicBlock insertNewBlocksBefore(LBasicBlock nextBlock)
{
LBasicBlock lastNextBlock = m_nextBlock;
m_nextBlock = nextBlock;
return lastNextBlock;
}
-
- LBasicBlock appendTo(LBasicBlock block, LBasicBlock nextBlock)
- {
- appendTo(block);
- return insertNewBlocksBefore(nextBlock);
- }
-
- void appendTo(LBasicBlock block)
+
+ void applyBlockOrder();
+
+ LBasicBlock appendTo(LBasicBlock, LBasicBlock nextBlock);
+ void appendTo(LBasicBlock);
+
+ void setOrigin(DFG::Node* node) { m_origin = node; }
+ B3::Origin origin() { return B3::Origin(m_origin); }
+
+ LValue framePointer();
+
+ B3::SlotBaseValue* lockedStackSlot(size_t bytes);
+
+ LValue constBool(bool value);
+ LValue constInt32(int32_t value);
+
+ LValue weakPointer(DFG::Graph& graph, JSCell* cell)
{
- m_block = block;
-
- llvm->PositionBuilderAtEnd(m_builder, block);
+ ASSERT(graph.m_plan.weakReferences.contains(cell));
+
+ if (sizeof(void*) == 8)
+ return constInt64(bitwise_cast<intptr_t>(cell));
+ return constInt32(bitwise_cast<intptr_t>(cell));
}
- LBasicBlock newBlock(const char* name = "")
+
+ LValue weakPointer(DFG::FrozenValue* value)
{
- if (!m_nextBlock)
- return appendBasicBlock(m_context, m_function, name);
- return insertBasicBlock(m_context, m_nextBlock, name);
+ RELEASE_ASSERT(value->value().isCell());
+
+ if (sizeof(void*) == 8)
+ return constInt64(bitwise_cast<intptr_t>(value->cell()));
+ return constInt32(bitwise_cast<intptr_t>(value->cell()));
}
-
- LValue param(unsigned index) { return getParam(m_function, index); }
- LValue constBool(bool value) { return constInt(boolean, value); }
- LValue constInt8(int8_t value) { return constInt(int8, value); }
- LValue constInt32(int32_t value) { return constInt(int32, value); }
- template<typename T>
- LValue constIntPtr(T* value) { return constInt(intPtr, bitwise_cast<intptr_t>(value)); }
+
template<typename T>
- LValue constIntPtr(T value) { return constInt(intPtr, static_cast<intptr_t>(value)); }
- LValue constInt64(int64_t value) { return constInt(int64, value); }
- LValue constDouble(double value) { return constReal(doubleType, value); }
-
- LValue phi(LType type) { return buildPhi(m_builder, type); }
- LValue phi(LType type, ValueFromBlock value1)
+ LValue constIntPtr(T* value)
{
- return buildPhi(m_builder, type, value1);
+ static_assert(!std::is_base_of<HeapCell, T>::value, "To use a GC pointer, the graph must be aware of it. Use gcPointer instead and make sure the graph is aware of this reference.");
+ if (sizeof(void*) == 8)
+ return constInt64(bitwise_cast<intptr_t>(value));
+ return constInt32(bitwise_cast<intptr_t>(value));
}
- LValue phi(LType type, ValueFromBlock value1, ValueFromBlock value2)
+ template<typename T>
+ LValue constIntPtr(T value)
{
- return buildPhi(m_builder, type, value1, value2);
+ if (sizeof(void*) == 8)
+ return constInt64(static_cast<intptr_t>(value));
+ return constInt32(static_cast<intptr_t>(value));
}
+ LValue constInt64(int64_t value);
+ LValue constDouble(double value);
+
+ LValue phi(LType);
+ template<typename... Params>
+ LValue phi(LType, ValueFromBlock, Params... theRest);
template<typename VectorType>
- LValue phi(LType type, const VectorType& vector)
- {
- LValue result = phi(type);
- for (unsigned i = 0; i < vector.size(); ++i)
- addIncoming(result, vector[i]);
- return result;
- }
-
- LValue add(LValue left, LValue right) { return buildAdd(m_builder, left, right); }
- LValue sub(LValue left, LValue right) { return buildSub(m_builder, left, right); }
- LValue mul(LValue left, LValue right) { return buildMul(m_builder, left, right); }
- LValue div(LValue left, LValue right) { return buildDiv(m_builder, left, right); }
- LValue rem(LValue left, LValue right) { return buildRem(m_builder, left, right); }
- LValue neg(LValue value) { return buildNeg(m_builder, value); }
-
- LValue doubleAdd(LValue left, LValue right) { return buildFAdd(m_builder, left, right); }
- LValue doubleSub(LValue left, LValue right) { return buildFSub(m_builder, left, right); }
- LValue doubleMul(LValue left, LValue right) { return buildFMul(m_builder, left, right); }
- LValue doubleDiv(LValue left, LValue right) { return buildFDiv(m_builder, left, right); }
- LValue doubleRem(LValue left, LValue right) { return buildFRem(m_builder, left, right); }
- LValue doubleNeg(LValue value) { return buildFNeg(m_builder, value); }
-
- LValue bitAnd(LValue left, LValue right) { return buildAnd(m_builder, left, right); }
- LValue bitOr(LValue left, LValue right) { return buildOr(m_builder, left, right); }
- LValue bitXor(LValue left, LValue right) { return buildXor(m_builder, left, right); }
- LValue shl(LValue left, LValue right) { return buildShl(m_builder, left, right); }
- LValue aShr(LValue left, LValue right) { return buildAShr(m_builder, left, right); }
- LValue lShr(LValue left, LValue right) { return buildLShr(m_builder, left, right); }
- LValue bitNot(LValue value) { return buildNot(m_builder, value); }
-
- LValue insertElement(LValue vector, LValue element, LValue index) { return buildInsertElement(m_builder, vector, element, index); }
-
- LValue addWithOverflow32(LValue left, LValue right)
- {
- return call(addWithOverflow32Intrinsic(), left, right);
- }
- LValue subWithOverflow32(LValue left, LValue right)
- {
- return call(subWithOverflow32Intrinsic(), left, right);
- }
- LValue mulWithOverflow32(LValue left, LValue right)
- {
- return call(mulWithOverflow32Intrinsic(), left, right);
- }
- LValue addWithOverflow64(LValue left, LValue right)
- {
- return call(addWithOverflow64Intrinsic(), left, right);
- }
- LValue subWithOverflow64(LValue left, LValue right)
- {
- return call(subWithOverflow64Intrinsic(), left, right);
- }
- LValue mulWithOverflow64(LValue left, LValue right)
+ LValue phi(LType, const VectorType&);
+ void addIncomingToPhi(LValue phi, ValueFromBlock);
+ template<typename... Params>
+ void addIncomingToPhi(LValue phi, ValueFromBlock, Params... theRest);
+
+ LValue add(LValue, LValue);
+ LValue sub(LValue, LValue);
+ LValue mul(LValue, LValue);
+ LValue div(LValue, LValue);
+ LValue chillDiv(LValue, LValue);
+ LValue mod(LValue, LValue);
+ LValue chillMod(LValue, LValue);
+ LValue neg(LValue);
+
+ LValue doubleAdd(LValue, LValue);
+ LValue doubleSub(LValue, LValue);
+ LValue doubleMul(LValue, LValue);
+ LValue doubleDiv(LValue, LValue);
+ LValue doubleMod(LValue, LValue);
+ LValue doubleNeg(LValue value) { return neg(value); }
+
+ LValue bitAnd(LValue, LValue);
+ LValue bitOr(LValue, LValue);
+ LValue bitXor(LValue, LValue);
+ LValue shl(LValue, LValue shiftAmount);
+ LValue aShr(LValue, LValue shiftAmount);
+ LValue lShr(LValue, LValue shiftAmount);
+ LValue bitNot(LValue);
+ LValue logicalNot(LValue);
+
+ LValue ctlz32(LValue);
+ LValue doubleAbs(LValue);
+ LValue doubleCeil(LValue);
+ LValue doubleFloor(LValue);
+ LValue doubleTrunc(LValue);
+
+ LValue doubleSin(LValue);
+ LValue doubleCos(LValue);
+ LValue doubleTan(LValue);
+
+ LValue doublePow(LValue base, LValue exponent);
+ LValue doublePowi(LValue base, LValue exponent);
+
+ LValue doubleSqrt(LValue);
+
+ LValue doubleLog(LValue);
+
+ LValue doubleToInt(LValue);
+ LValue doubleToUInt(LValue);
+
+ LValue signExt32To64(LValue);
+ LValue signExt32ToPtr(LValue);
+ LValue zeroExt(LValue, LType);
+ LValue zeroExtPtr(LValue value) { return zeroExt(value, B3::Int64); }
+ LValue intToDouble(LValue);
+ LValue unsignedToDouble(LValue);
+ LValue castToInt32(LValue);
+ LValue doubleToFloat(LValue);
+ LValue floatToDouble(LValue);
+ LValue bitCast(LValue, LType);
+ LValue fround(LValue);
+
+ LValue load(TypedPointer, LType);
+ void store(LValue, TypedPointer);
+ B3::FenceValue* fence(const AbstractHeap* read, const AbstractHeap* write);
+
+ LValue load8SignExt32(TypedPointer);
+ LValue load8ZeroExt32(TypedPointer);
+ LValue load16SignExt32(TypedPointer);
+ LValue load16ZeroExt32(TypedPointer);
+ LValue load32(TypedPointer pointer) { return load(pointer, B3::Int32); }
+ LValue load64(TypedPointer pointer) { return load(pointer, B3::Int64); }
+ LValue loadPtr(TypedPointer pointer) { return load(pointer, B3::pointerType()); }
+ LValue loadFloat(TypedPointer pointer) { return load(pointer, B3::Float); }
+ LValue loadDouble(TypedPointer pointer) { return load(pointer, B3::Double); }
+ void store32As8(LValue, TypedPointer);
+ void store32As16(LValue, TypedPointer);
+ void store32(LValue value, TypedPointer pointer)
{
- return call(mulWithOverflow64Intrinsic(), left, right);
+ ASSERT(value->type() == B3::Int32);
+ store(value, pointer);
}
- LValue doubleAbs(LValue value)
+ void store64(LValue value, TypedPointer pointer)
{
- return call(doubleAbsIntrinsic(), value);
+ ASSERT(value->type() == B3::Int64);
+ store(value, pointer);
}
-
- static bool hasSensibleDoubleToInt() { return isX86(); }
- LValue sensibleDoubleToInt(LValue value)
+ void storePtr(LValue value, TypedPointer pointer)
{
- RELEASE_ASSERT(isX86());
- return call(
- x86SSE2CvtTSD2SIIntrinsic(),
- insertElement(
- insertElement(getUndef(vectorType(doubleType, 2)), value, int32Zero),
- doubleZero, int32One));
+ ASSERT(value->type() == B3::pointerType());
+ store(value, pointer);
}
-
- LValue signExt(LValue value, LType type) { return buildSExt(m_builder, value, type); }
- LValue zeroExt(LValue value, LType type) { return buildZExt(m_builder, value, type); }
- LValue fpToInt(LValue value, LType type) { return buildFPToSI(m_builder, value, type); }
- LValue fpToUInt(LValue value, LType type) { return buildFPToUI(m_builder, value, type); }
- LValue fpToInt32(LValue value) { return fpToInt(value, int32); }
- LValue fpToUInt32(LValue value) { return fpToUInt(value, int32); }
- LValue intToFP(LValue value, LType type) { return buildSIToFP(m_builder, value, type); }
- LValue intToDouble(LValue value) { return intToFP(value, doubleType); }
- LValue unsignedToFP(LValue value, LType type) { return buildUIToFP(m_builder, value, type); }
- LValue unsignedToDouble(LValue value) { return unsignedToFP(value, doubleType); }
- LValue intCast(LValue value, LType type) { return buildIntCast(m_builder, value, type); }
- LValue castToInt32(LValue value) { return intCast(value, int32); }
- LValue fpCast(LValue value, LType type) { return buildFPCast(m_builder, value, type); }
- LValue intToPtr(LValue value, LType type) { return buildIntToPtr(m_builder, value, type); }
- LValue bitCast(LValue value, LType type) { return buildBitCast(m_builder, value, type); }
-
- LValue alloca(LType type) { return buildAlloca(m_builder, type); }
- LValue get(LValue reference) { return buildLoad(m_builder, reference); }
- LValue set(LValue value, LValue reference) { return buildStore(m_builder, value, reference); }
-
- LValue load(TypedPointer pointer, LType refType)
+ void storeFloat(LValue value, TypedPointer pointer)
{
- LValue result = get(intToPtr(pointer.value(), refType));
- pointer.heap().decorateInstruction(result, *m_heaps);
- return result;
+ ASSERT(value->type() == B3::Float);
+ store(value, pointer);
}
- void store(LValue value, TypedPointer pointer, LType refType)
+ void storeDouble(LValue value, TypedPointer pointer)
{
- LValue result = set(value, intToPtr(pointer.value(), refType));
- pointer.heap().decorateInstruction(result, *m_heaps);
+ ASSERT(value->type() == B3::Double);
+ store(value, pointer);
}
-
- LValue load8(TypedPointer pointer) { return load(pointer, ref8); }
- LValue load16(TypedPointer pointer) { return load(pointer, ref16); }
- LValue load32(TypedPointer pointer) { return load(pointer, ref32); }
- LValue load64(TypedPointer pointer) { return load(pointer, ref64); }
- LValue loadPtr(TypedPointer pointer) { return load(pointer, refPtr); }
- LValue loadFloat(TypedPointer pointer) { return load(pointer, refFloat); }
- LValue loadDouble(TypedPointer pointer) { return load(pointer, refDouble); }
- void store8(LValue value, TypedPointer pointer) { store(value, pointer, ref8); }
- void store16(LValue value, TypedPointer pointer) { store(value, pointer, ref16); }
- void store32(LValue value, TypedPointer pointer) { store(value, pointer, ref32); }
- void store64(LValue value, TypedPointer pointer) { store(value, pointer, ref64); }
- void storePtr(LValue value, TypedPointer pointer) { store(value, pointer, refPtr); }
- void storeFloat(LValue value, TypedPointer pointer) { store(value, pointer, refFloat); }
- void storeDouble(LValue value, TypedPointer pointer) { store(value, pointer, refDouble); }
+
+ enum LoadType {
+ Load8SignExt32,
+ Load8ZeroExt32,
+ Load16SignExt32,
+ Load16ZeroExt32,
+ Load32,
+ Load64,
+ LoadPtr,
+ LoadFloat,
+ LoadDouble
+ };
+
+ LValue load(TypedPointer, LoadType);
+
+ enum StoreType {
+ Store32As8,
+ Store32As16,
+ Store32,
+ Store64,
+ StorePtr,
+ StoreFloat,
+ StoreDouble
+ };
+
+ void store(LValue, TypedPointer, StoreType);
LValue addPtr(LValue value, ptrdiff_t immediate = 0)
{
@@ -249,7 +281,7 @@ public:
return value;
return add(value, constIntPtr(immediate));
}
-
+
// Construct an address by offsetting base by the requested amount and ascribing
// the requested abstract heap to it.
TypedPointer address(const AbstractHeap& heap, LValue base, ptrdiff_t offset = 0)
@@ -259,36 +291,13 @@ public:
// Construct an address by offsetting base by the amount specified by the field,
// and optionally an additional amount (use this with care), and then creating
// a TypedPointer with the given field as the heap.
- TypedPointer address(LValue base, const AbstractField& field, ptrdiff_t offset = 0)
+ TypedPointer address(LValue base, const AbstractHeap& field, ptrdiff_t offset = 0)
{
return address(field, base, offset + field.offset());
}
-
- LValue baseIndex(LValue base, LValue index, Scale scale, ptrdiff_t offset = 0)
- {
- LValue accumulatedOffset;
-
- switch (scale) {
- case ScaleOne:
- accumulatedOffset = index;
- break;
- case ScaleTwo:
- accumulatedOffset = shl(index, intPtrOne);
- break;
- case ScaleFour:
- accumulatedOffset = shl(index, intPtrTwo);
- break;
- case ScaleEight:
- case ScalePtr:
- accumulatedOffset = shl(index, intPtrThree);
- break;
- }
-
- if (offset)
- accumulatedOffset = add(accumulatedOffset, constIntPtr(offset));
-
- return add(base, accumulatedOffset);
- }
+
+ LValue baseIndex(LValue base, LValue index, Scale, ptrdiff_t offset = 0);
+
TypedPointer baseIndex(const AbstractHeap& heap, LValue base, LValue index, Scale scale, ptrdiff_t offset = 0)
{
return TypedPointer(heap, baseIndex(base, index, scale, offset));
@@ -297,132 +306,183 @@ public:
{
return heap.baseIndex(*this, base, index, indexAsConstant, offset);
}
-
- TypedPointer absolute(void* address)
- {
- return TypedPointer(m_heaps->absolute[address], constIntPtr(address));
- }
-
- LValue load8(LValue base, const AbstractField& field) { return load8(address(base, field)); }
- LValue load16(LValue base, const AbstractField& field) { return load16(address(base, field)); }
- LValue load32(LValue base, const AbstractField& field) { return load32(address(base, field)); }
- LValue load64(LValue base, const AbstractField& field) { return load64(address(base, field)); }
- LValue loadPtr(LValue base, const AbstractField& field) { return loadPtr(address(base, field)); }
- LValue loadDouble(LValue base, const AbstractField& field) { return loadDouble(address(base, field)); }
- void store32(LValue value, LValue base, const AbstractField& field) { store32(value, address(base, field)); }
- void store64(LValue value, LValue base, const AbstractField& field) { store64(value, address(base, field)); }
- void storePtr(LValue value, LValue base, const AbstractField& field) { storePtr(value, address(base, field)); }
- void storeDouble(LValue value, LValue base, const AbstractField& field) { storeDouble(value, address(base, field)); }
-
- LValue icmp(LIntPredicate cond, LValue left, LValue right) { return buildICmp(m_builder, cond, left, right); }
- LValue equal(LValue left, LValue right) { return icmp(LLVMIntEQ, left, right); }
- LValue notEqual(LValue left, LValue right) { return icmp(LLVMIntNE, left, right); }
- LValue above(LValue left, LValue right) { return icmp(LLVMIntUGT, left, right); }
- LValue aboveOrEqual(LValue left, LValue right) { return icmp(LLVMIntUGE, left, right); }
- LValue below(LValue left, LValue right) { return icmp(LLVMIntULT, left, right); }
- LValue belowOrEqual(LValue left, LValue right) { return icmp(LLVMIntULE, left, right); }
- LValue greaterThan(LValue left, LValue right) { return icmp(LLVMIntSGT, left, right); }
- LValue greaterThanOrEqual(LValue left, LValue right) { return icmp(LLVMIntSGE, left, right); }
- LValue lessThan(LValue left, LValue right) { return icmp(LLVMIntSLT, left, right); }
- LValue lessThanOrEqual(LValue left, LValue right) { return icmp(LLVMIntSLE, left, right); }
-
- LValue fcmp(LRealPredicate cond, LValue left, LValue right) { return buildFCmp(m_builder, cond, left, right); }
- LValue doubleEqual(LValue left, LValue right) { return fcmp(LLVMRealOEQ, left, right); }
- LValue doubleNotEqualOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealUNE, left, right); }
- LValue doubleLessThan(LValue left, LValue right) { return fcmp(LLVMRealOLT, left, right); }
- LValue doubleLessThanOrEqual(LValue left, LValue right) { return fcmp(LLVMRealOLE, left, right); }
- LValue doubleGreaterThan(LValue left, LValue right) { return fcmp(LLVMRealOGT, left, right); }
- LValue doubleGreaterThanOrEqual(LValue left, LValue right) { return fcmp(LLVMRealOGE, left, right); }
- LValue doubleEqualOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealUEQ, left, right); }
- LValue doubleNotEqual(LValue left, LValue right) { return fcmp(LLVMRealONE, left, right); }
- LValue doubleLessThanOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealULT, left, right); }
- LValue doubleLessThanOrEqualOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealULE, left, right); }
- LValue doubleGreaterThanOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealUGT, left, right); }
- LValue doubleGreaterThanOrEqualOrUnordered(LValue left, LValue right) { return fcmp(LLVMRealUGE, left, right); }
-
- LValue isZero8(LValue value) { return equal(value, int8Zero); }
- LValue notZero8(LValue value) { return notEqual(value, int8Zero); }
- LValue isZero32(LValue value) { return equal(value, int32Zero); }
- LValue notZero32(LValue value) { return notEqual(value, int32Zero); }
- LValue isZero64(LValue value) { return equal(value, int64Zero); }
- LValue notZero64(LValue value) { return notEqual(value, int64Zero); }
- LValue isNull(LValue value) { return equal(value, intPtrZero); }
- LValue notNull(LValue value) { return notEqual(value, intPtrZero); }
-
- LValue testIsZero8(LValue value, LValue mask) { return isZero8(bitAnd(value, mask)); }
- LValue testNonZero8(LValue value, LValue mask) { return notZero8(bitAnd(value, mask)); }
+
+ TypedPointer absolute(const void* address);
+
+ LValue load8SignExt32(LValue base, const AbstractHeap& field) { return load8SignExt32(address(base, field)); }
+ LValue load8ZeroExt32(LValue base, const AbstractHeap& field) { return load8ZeroExt32(address(base, field)); }
+ LValue load16SignExt32(LValue base, const AbstractHeap& field) { return load16SignExt32(address(base, field)); }
+ LValue load16ZeroExt32(LValue base, const AbstractHeap& field) { return load16ZeroExt32(address(base, field)); }
+ LValue load32(LValue base, const AbstractHeap& field) { return load32(address(base, field)); }
+ LValue load64(LValue base, const AbstractHeap& field) { return load64(address(base, field)); }
+ LValue loadPtr(LValue base, const AbstractHeap& field) { return loadPtr(address(base, field)); }
+ LValue loadDouble(LValue base, const AbstractHeap& field) { return loadDouble(address(base, field)); }
+ void store32(LValue value, LValue base, const AbstractHeap& field) { store32(value, address(base, field)); }
+ void store64(LValue value, LValue base, const AbstractHeap& field) { store64(value, address(base, field)); }
+ void storePtr(LValue value, LValue base, const AbstractHeap& field) { storePtr(value, address(base, field)); }
+ void storeDouble(LValue value, LValue base, const AbstractHeap& field) { storeDouble(value, address(base, field)); }
+
+ // FIXME: Explore adding support for value range constraints to B3. Maybe it could be as simple as having
+ // a load instruction that guarantees that its result is non-negative.
+ // https://bugs.webkit.org/show_bug.cgi?id=151458
+ void ascribeRange(LValue, const ValueRange&) { }
+ LValue nonNegative32(LValue loadInstruction) { return loadInstruction; }
+ LValue load32NonNegative(TypedPointer pointer) { return load32(pointer); }
+ LValue load32NonNegative(LValue base, const AbstractHeap& field) { return load32(base, field); }
+
+ LValue equal(LValue, LValue);
+ LValue notEqual(LValue, LValue);
+ LValue above(LValue, LValue);
+ LValue aboveOrEqual(LValue, LValue);
+ LValue below(LValue, LValue);
+ LValue belowOrEqual(LValue, LValue);
+ LValue greaterThan(LValue, LValue);
+ LValue greaterThanOrEqual(LValue, LValue);
+ LValue lessThan(LValue, LValue);
+ LValue lessThanOrEqual(LValue, LValue);
+
+ LValue doubleEqual(LValue, LValue);
+ LValue doubleEqualOrUnordered(LValue, LValue);
+ LValue doubleNotEqualOrUnordered(LValue, LValue);
+ LValue doubleLessThan(LValue, LValue);
+ LValue doubleLessThanOrEqual(LValue, LValue);
+ LValue doubleGreaterThan(LValue, LValue);
+ LValue doubleGreaterThanOrEqual(LValue, LValue);
+ LValue doubleNotEqualAndOrdered(LValue, LValue);
+ LValue doubleLessThanOrUnordered(LValue, LValue);
+ LValue doubleLessThanOrEqualOrUnordered(LValue, LValue);
+ LValue doubleGreaterThanOrUnordered(LValue, LValue);
+ LValue doubleGreaterThanOrEqualOrUnordered(LValue, LValue);
+
+ LValue isZero32(LValue);
+ LValue notZero32(LValue);
+ LValue isZero64(LValue);
+ LValue notZero64(LValue);
+ LValue isNull(LValue value) { return isZero64(value); }
+ LValue notNull(LValue value) { return notZero64(value); }
+
LValue testIsZero32(LValue value, LValue mask) { return isZero32(bitAnd(value, mask)); }
LValue testNonZero32(LValue value, LValue mask) { return notZero32(bitAnd(value, mask)); }
LValue testIsZero64(LValue value, LValue mask) { return isZero64(bitAnd(value, mask)); }
LValue testNonZero64(LValue value, LValue mask) { return notZero64(bitAnd(value, mask)); }
-
- LValue select(LValue value, LValue taken, LValue notTaken) { return buildSelect(m_builder, value, taken, notTaken); }
- LValue extractValue(LValue aggVal, unsigned index) { return buildExtractValue(m_builder, aggVal, index); }
-
- LValue fence(LAtomicOrdering ordering = LLVMAtomicOrderingSequentiallyConsistent, SynchronizationScope scope = CrossThread) { return buildFence(m_builder, ordering, scope); }
- LValue fenceAcqRel() { return fence(LLVMAtomicOrderingAcquireRelease); }
-
- template<typename VectorType>
- LValue call(LValue function, const VectorType& vector) { return buildCall(m_builder, function, vector); }
- LValue call(LValue function) { return buildCall(m_builder, function); }
- LValue call(LValue function, LValue arg1) { return buildCall(m_builder, function, arg1); }
- LValue call(LValue function, LValue arg1, LValue arg2) { return buildCall(m_builder, function, arg1, arg2); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3) { return buildCall(m_builder, function, arg1, arg2, arg3); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4) { return buildCall(m_builder, function, arg1, arg2, arg3, arg4); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5) { return buildCall(m_builder, function, arg1, arg2, arg3, arg4, arg5); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6) { return buildCall(m_builder, function, arg1, arg2, arg3, arg4, arg5, arg6); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6, LValue arg7) { return buildCall(m_builder, function, arg1, arg2, arg3, arg4, arg5, arg6, arg7); }
- LValue call(LValue function, LValue arg1, LValue arg2, LValue arg3, LValue arg4, LValue arg5, LValue arg6, LValue arg7, LValue arg8) { return buildCall(m_builder, function, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); }
-
- template<typename FunctionType>
- LValue operation(FunctionType function)
- {
- return intToPtr(constIntPtr(function), pointerType(operationType(function)));
- }
-
- void jump(LBasicBlock destination) { buildBr(m_builder, destination); }
- void branch(LValue condition, LBasicBlock taken, LBasicBlock notTaken) { buildCondBr(m_builder, condition, taken, notTaken); }
+ LValue testIsZeroPtr(LValue value, LValue mask) { return isNull(bitAnd(value, mask)); }
+ LValue testNonZeroPtr(LValue value, LValue mask) { return notNull(bitAnd(value, mask)); }
+
+ LValue select(LValue value, LValue taken, LValue notTaken);
+
template<typename VectorType>
- void switchInstruction(LValue value, const VectorType& cases, LBasicBlock fallThrough) { buildSwitch(m_builder, value, cases, fallThrough); }
- void ret(LValue value) { buildRet(m_builder, value); }
-
- void unreachable() { buildUnreachable(m_builder); }
-
- void trap()
+ LValue call(LType type, LValue function, const VectorType& vector)
{
- call(trapIntrinsic());
+ B3::CCallValue* result = m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function);
+ result->children().appendVector(vector);
+ return result;
}
-
- void crashNonTerminal()
+ LValue call(LType type, LValue function) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); }
+ LValue call(LType type, LValue function, LValue arg1) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1); }
+ template<typename... Args>
+ LValue call(LType type, LValue function, LValue arg1, Args... args) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1, args...); }
+
+ template<typename Function, typename... Args>
+ LValue callWithoutSideEffects(B3::Type type, Function function, LValue arg1, Args... args)
{
- call(intToPtr(constIntPtr(abort), pointerType(functionType(voidType))));
+ return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), B3::Effects::none(),
+ constIntPtr(bitwise_cast<void*>(function)), arg1, args...);
}
- void crash()
+
+ template<typename FunctionType>
+ LValue operation(FunctionType function) { return constIntPtr(bitwise_cast<void*>(function)); }
+
+ void jump(LBasicBlock);
+ void branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight);
+ void branch(LValue condition, WeightedTarget taken, WeightedTarget notTaken)
{
- crashNonTerminal();
- unreachable();
+ branch(condition, taken.target(), taken.weight(), notTaken.target(), notTaken.weight());
}
+
+ // Branches to an already-created handler if true, "falls through" if false. Fall-through is
+ // simulated by creating a continuation for you.
+ void check(LValue condition, WeightedTarget taken, Weight notTakenWeight);
- ValueFromBlock anchor(LValue value)
+ // Same as check(), but uses Weight::inverse() to compute the notTakenWeight.
+ void check(LValue condition, WeightedTarget taken);
+
+ template<typename VectorType>
+ void switchInstruction(LValue value, const VectorType& cases, LBasicBlock fallThrough, Weight fallThroughWeight)
{
- return ValueFromBlock(value, m_block);
+ B3::SwitchValue* switchValue = m_block->appendNew<B3::SwitchValue>(m_proc, origin(), value);
+ switchValue->setFallThrough(B3::FrequentedBlock(fallThrough));
+ for (const SwitchCase& switchCase : cases) {
+ int64_t value = switchCase.value()->asInt();
+ B3::FrequentedBlock target(switchCase.target(), switchCase.weight().frequencyClass());
+ switchValue->appendCase(B3::SwitchCase(value, target));
+ }
}
+
+ void ret(LValue);
+
+ void unreachable();
- LValue m_function;
+ void appendSuccessor(WeightedTarget);
+
+ B3::CheckValue* speculate(LValue);
+ B3::CheckValue* speculateAdd(LValue, LValue);
+ B3::CheckValue* speculateSub(LValue, LValue);
+ B3::CheckValue* speculateMul(LValue, LValue);
+
+ B3::PatchpointValue* patchpoint(LType);
+
+ void trap();
+
+ ValueFromBlock anchor(LValue);
+
+ void incrementSuperSamplerCount();
+ void decrementSuperSamplerCount();
+
+#if PLATFORM(COCOA)
+#pragma mark - States
+#endif
+ B3::Procedure& m_proc;
+
+ DFG::Node* m_origin { nullptr };
+ LBasicBlock m_block { nullptr };
+ LBasicBlock m_nextBlock { nullptr };
+
AbstractHeapRepository* m_heaps;
- LBuilder m_builder;
- LBasicBlock m_block;
- LBasicBlock m_nextBlock;
+
+ double m_frequency { 1 };
+
+private:
+ OrderMaker<LBasicBlock> m_blockOrder;
};
-#define FTL_NEW_BLOCK(output, nameArguments) \
- (LIKELY(!::JSC::DFG::verboseCompilationEnabled()) \
- ? (output).newBlock() \
- : (output).newBlock((toCString nameArguments).data()))
+template<typename... Params>
+inline LValue Output::phi(LType type, ValueFromBlock value, Params... theRest)
+{
+ LValue phiNode = phi(type);
+ addIncomingToPhi(phiNode, value, theRest...);
+ return phiNode;
+}
-} } // namespace JSC::FTL
+template<typename VectorType>
+inline LValue Output::phi(LType type, const VectorType& vector)
+{
+ LValue phiNode = phi(type);
+ for (const ValueFromBlock& valueFromBlock : vector)
+ addIncomingToPhi(phiNode, valueFromBlock);
+ return phiNode;
+}
-#endif // ENABLE(FTL_JIT)
+template<typename... Params>
+inline void Output::addIncomingToPhi(LValue phi, ValueFromBlock value, Params... theRest)
+{
+ addIncomingToPhi(phi, value);
+ addIncomingToPhi(phi, theRest...);
+}
+
+#if COMPILER(GCC_OR_CLANG)
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC_OR_CLANG)
-#endif // FTLOutput_h
+} } // namespace JSC::FTL
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp
new file mode 100644
index 000000000..6dcc5ee87
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLPatchpointExceptionHandle.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "B3StackmapGenerationParams.h"
+#include "FTLExceptionTarget.h"
+#include "FTLOSRExit.h"
+#include "FTLOSRExitHandle.h"
+#include "FTLState.h"
+
+namespace JSC { namespace FTL {
+
+using namespace DFG;
+
+RefPtr<PatchpointExceptionHandle> PatchpointExceptionHandle::create(
+ State& state, OSRExitDescriptor* descriptor, NodeOrigin origin, unsigned offset,
+ const HandlerInfo& handler)
+{
+ return adoptRef(new PatchpointExceptionHandle(state, descriptor, origin, offset, handler));
+}
+
+RefPtr<PatchpointExceptionHandle> PatchpointExceptionHandle::defaultHandle(State& state)
+{
+ if (!state.defaultExceptionHandle) {
+ state.defaultExceptionHandle = adoptRef(
+ new PatchpointExceptionHandle(state, nullptr, NodeOrigin(), 0, HandlerInfo()));
+ }
+ return state.defaultExceptionHandle;
+}
+
+PatchpointExceptionHandle::~PatchpointExceptionHandle()
+{
+}
+
+RefPtr<ExceptionTarget> PatchpointExceptionHandle::scheduleExitCreation(
+ const B3::StackmapGenerationParams& params)
+{
+ if (!m_descriptor) {
+ // NOTE: This object could be a singleton, however usually we toss the ExceptionHandler
+ // object shortly after creation.
+ bool isDefaultHandler = true;
+ return adoptRef(
+ new ExceptionTarget(isDefaultHandler, m_state.exceptionHandler, nullptr));
+ }
+ bool isDefaultHandler = false;
+ return adoptRef(new ExceptionTarget(isDefaultHandler, { }, createHandle(ExceptionCheck, params)));
+}
+
+void PatchpointExceptionHandle::scheduleExitCreationForUnwind(
+ const B3::StackmapGenerationParams& params, CallSiteIndex callSiteIndex)
+{
+ if (!m_descriptor)
+ return;
+
+ RefPtr<OSRExitHandle> handle = createHandle(GenericUnwind, params);
+
+ handle->exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
+
+ HandlerInfo handler = m_handler;
+ params.addLatePath(
+ [handle, handler, callSiteIndex] (CCallHelpers& jit) {
+ CodeBlock* codeBlock = jit.codeBlock();
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ HandlerInfo newHandler = handler;
+ newHandler.start = callSiteIndex.bits();
+ newHandler.end = callSiteIndex.bits() + 1;
+ newHandler.nativeCode = linkBuffer.locationOf(handle->label);
+ codeBlock->appendExceptionHandler(newHandler);
+ });
+ });
+}
+
+PatchpointExceptionHandle::PatchpointExceptionHandle(
+ State& state, OSRExitDescriptor* descriptor, NodeOrigin origin, unsigned offset,
+ const HandlerInfo& handler)
+ : m_state(state)
+ , m_descriptor(descriptor)
+ , m_origin(origin)
+ , m_offset(offset)
+ , m_handler(handler)
+{
+}
+
+RefPtr<OSRExitHandle> PatchpointExceptionHandle::createHandle(
+ ExitKind kind, const B3::StackmapGenerationParams& params)
+{
+ return m_descriptor->emitOSRExitLater(
+ m_state, kind, m_origin, params, m_offset);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h
new file mode 100644
index 000000000..523369ba3
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DFGCommon.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGNodeOrigin.h"
+#include "ExitKind.h"
+#include "HandlerInfo.h"
+#include <wtf/Ref.h>
+#include <wtf/ThreadSafeRefCounted.h>
+
+namespace JSC {
+
+namespace B3 {
+class StackmapGenerationParams;
+} // namespace B3
+
+namespace FTL {
+
+class ExceptionTarget;
+class State;
+struct OSRExitDescriptor;
+struct OSRExitHandle;
+
+class PatchpointExceptionHandle : public ThreadSafeRefCounted<PatchpointExceptionHandle> {
+public:
+ static RefPtr<PatchpointExceptionHandle> create(
+ State&, OSRExitDescriptor*, DFG::NodeOrigin, unsigned offset, const HandlerInfo&);
+
+ static RefPtr<PatchpointExceptionHandle> defaultHandle(State&);
+
+ ~PatchpointExceptionHandle();
+
+ // Note that you can use this handle to schedule any number of exits. This capability is here for
+ // two reasons:
+ //
+ // - B3 code duplication. B3 could take a patchpoint and turn it into multiple patchpoints if it
+ // duplicates code. Duplicating code is legal since you can do it without changing the behavior
+ // of the program. One example is tail duplication. Another is jump threading. Yet another is
+ // path specialization. You will have one PatchpointExceptionHandle per patchpoint you create
+ // during DFG->B3 lowering, and that patchpoint will have a generator that calls
+ // handle->scheduleBlah(). That generator will be called multiple times if your patchpoint got
+ // duplicated.
+ //
+ // - Combination of unwind and non-unwind exception handlers inside one patchpoint. A GetById may
+ // need both an exception handler that serves as an unwind target and an exception handler that
+ // is branched to directly for operation calls emitted inside the patchpoint. In that case,
+ // you'll call both scheduleExitCreation() and scheduleExitCreationForUnwind() on the same
+ // handle.
+
+ // Schedules the creation of an OSR exit jump destination. You don't know when this will be
+ // created, but it will happen before linking. You can link jumps to it during link time. That's
+ // why this returns an ExceptionTarget. That will contain the jump destination (target->label())
+ // at link time. This function should be used for exceptions from C calls.
+ RefPtr<ExceptionTarget> scheduleExitCreation(const B3::StackmapGenerationParams&);
+
+ // Schedules the creation of an OSR exit jump destination, and ensures that it gets associated
+ // with the handler for some callsite index. This function should be used for exceptions from JS.
+ void scheduleExitCreationForUnwind(const B3::StackmapGenerationParams&, CallSiteIndex);
+
+private:
+ PatchpointExceptionHandle(
+ State&, OSRExitDescriptor*, DFG::NodeOrigin, unsigned offset, const HandlerInfo&);
+
+ RefPtr<OSRExitHandle> createHandle(ExitKind, const B3::StackmapGenerationParams&);
+
+ State& m_state;
+ OSRExitDescriptor* m_descriptor;
+ DFG::NodeOrigin m_origin;
+ unsigned m_offset;
+ HandlerInfo m_handler;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp
new file mode 100644
index 000000000..70770dd81
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLRecoveryOpcode.h"
+
+#if ENABLE(FTL_JIT)
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::FTL::RecoveryOpcode opcode)
+{
+ switch (opcode) {
+ case JSC::FTL::AddRecovery:
+ out.print("Add");
+ return;
+ case JSC::FTL::SubRecovery:
+ out.print("Sub");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h
new file mode 100644
index 000000000..70e7e4ee6
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+namespace JSC { namespace FTL {
+
+enum RecoveryOpcode {
+ AddRecovery,
+ SubRecovery
+};
+
+} } // namespace JSC::FTL
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::FTL::RecoveryOpcode);
+
+} // namespace WTF
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp b/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp
new file mode 100644
index 000000000..c752fd22f
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLSaveRestore.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include "RegisterSet.h"
+
+namespace JSC { namespace FTL {
+
+static size_t bytesForGPRs()
+{
+ return MacroAssembler::numberOfRegisters() * sizeof(int64_t);
+}
+
+static size_t bytesForFPRs()
+{
+ // FIXME: It might be worthwhile saving the full state of the FP registers, at some point.
+ // Right now we don't need this since we only do the save/restore just prior to OSR exit, and
+ // OSR exit will be guaranteed to only need the double portion of the FP registers.
+ return MacroAssembler::numberOfFPRegisters() * sizeof(double);
+}
+
+size_t requiredScratchMemorySizeInBytes()
+{
+ return bytesForGPRs() + bytesForFPRs();
+}
+
+size_t offsetOfGPR(GPRReg reg)
+{
+ return MacroAssembler::registerIndex(reg) * sizeof(int64_t);
+}
+
+size_t offsetOfFPR(FPRReg reg)
+{
+ return bytesForGPRs() + MacroAssembler::fpRegisterIndex(reg) * sizeof(double);
+}
+
+size_t offsetOfReg(Reg reg)
+{
+ if (reg.isGPR())
+ return offsetOfGPR(reg.gpr());
+ return offsetOfFPR(reg.fpr());
+}
+
+namespace {
+
+struct Regs {
+ Regs()
+ {
+ special = RegisterSet::stackRegisters();
+ special.merge(RegisterSet::reservedHardwareRegisters());
+
+ first = MacroAssembler::firstRegister();
+ while (special.get(first))
+ first = MacroAssembler::nextRegister(first);
+ second = MacroAssembler::nextRegister(first);
+ while (special.get(second))
+ second = MacroAssembler::nextRegister(second);
+ }
+
+ RegisterSet special;
+ GPRReg first;
+ GPRReg second;
+};
+
+} // anonymous namespace
+
+void saveAllRegisters(MacroAssembler& jit, char* scratchMemory)
+{
+ Regs regs;
+
+ // Get the first register out of the way, so that we can use it as a pointer.
+ jit.poke64(regs.first, 0);
+ jit.move(MacroAssembler::TrustedImmPtr(scratchMemory), regs.first);
+
+ // Get all of the other GPRs out of the way.
+ for (MacroAssembler::RegisterID reg = regs.second; reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (regs.special.get(reg))
+ continue;
+ jit.store64(reg, MacroAssembler::Address(regs.first, offsetOfGPR(reg)));
+ }
+
+ // Restore the first register into the second one and save it.
+ jit.peek64(regs.second, 0);
+ jit.store64(regs.second, MacroAssembler::Address(regs.first, offsetOfGPR(regs.first)));
+
+ // Finally save all FPR's.
+ for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (regs.special.get(reg))
+ continue;
+ jit.storeDouble(reg, MacroAssembler::Address(regs.first, offsetOfFPR(reg)));
+ }
+}
+
+void restoreAllRegisters(MacroAssembler& jit, char* scratchMemory)
+{
+ Regs regs;
+
+ // Give ourselves a pointer to the scratch memory.
+ jit.move(MacroAssembler::TrustedImmPtr(scratchMemory), regs.first);
+
+ // Restore all FPR's.
+ for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (regs.special.get(reg))
+ continue;
+ jit.loadDouble(MacroAssembler::Address(regs.first, offsetOfFPR(reg)), reg);
+ }
+
+ for (MacroAssembler::RegisterID reg = regs.second; reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (regs.special.get(reg))
+ continue;
+ jit.load64(MacroAssembler::Address(regs.first, offsetOfGPR(reg)), reg);
+ }
+
+ jit.load64(MacroAssembler::Address(regs.first, offsetOfGPR(regs.first)), regs.first);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLSaveRestore.h b/Source/JavaScriptCore/ftl/FTLSaveRestore.h
new file mode 100644
index 000000000..c624641c7
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLSaveRestore.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "Reg.h"
+
+namespace JSC {
+
+class MacroAssembler;
+
+namespace FTL {
+
+size_t requiredScratchMemorySizeInBytes();
+
+size_t offsetOfReg(Reg);
+size_t offsetOfGPR(GPRReg);
+size_t offsetOfFPR(FPRReg);
+
+// Assumes that top-of-stack can be used as a pointer-sized scratchpad. Saves all of
+// the registers into the scratch buffer such that RegisterID * sizeof(int64_t) is the
+// offset of every register.
+void saveAllRegisters(MacroAssembler& jit, char* scratchMemory);
+
+void restoreAllRegisters(MacroAssembler& jit, char* scratchMemory);
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
new file mode 100644
index 000000000..eeba48274
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLSlowPathCall.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+#include "FTLState.h"
+#include "FTLThunks.h"
+#include "GPRInfo.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace FTL {
+
+// This code relies on us being 64-bit. FTL is currently always 64-bit.
+static const size_t wordSize = 8;
+
+SlowPathCallContext::SlowPathCallContext(
+ RegisterSet usedRegisters, CCallHelpers& jit, unsigned numArgs, GPRReg returnRegister)
+ : m_jit(jit)
+ , m_numArgs(numArgs)
+ , m_returnRegister(returnRegister)
+{
+ // We don't care that you're using callee-save, stack, or hardware registers.
+ usedRegisters.exclude(RegisterSet::stackRegisters());
+ usedRegisters.exclude(RegisterSet::reservedHardwareRegisters());
+ usedRegisters.exclude(RegisterSet::calleeSaveRegisters());
+
+ // The return register doesn't need to be saved.
+ if (m_returnRegister != InvalidGPRReg)
+ usedRegisters.clear(m_returnRegister);
+
+ size_t stackBytesNeededForReturnAddress = wordSize;
+
+ m_offsetToSavingArea =
+ (std::max(m_numArgs, NUMBER_OF_ARGUMENT_REGISTERS) - NUMBER_OF_ARGUMENT_REGISTERS) * wordSize;
+
+ for (unsigned i = std::min(NUMBER_OF_ARGUMENT_REGISTERS, numArgs); i--;)
+ m_argumentRegisters.set(GPRInfo::toArgumentRegister(i));
+ m_callingConventionRegisters.merge(m_argumentRegisters);
+ if (returnRegister != InvalidGPRReg)
+ m_callingConventionRegisters.set(GPRInfo::returnValueGPR);
+ m_callingConventionRegisters.filter(usedRegisters);
+
+ unsigned numberOfCallingConventionRegisters =
+ m_callingConventionRegisters.numberOfSetRegisters();
+
+ size_t offsetToThunkSavingArea =
+ m_offsetToSavingArea +
+ numberOfCallingConventionRegisters * wordSize;
+
+ m_stackBytesNeeded =
+ offsetToThunkSavingArea +
+ stackBytesNeededForReturnAddress +
+ (usedRegisters.numberOfSetRegisters() - numberOfCallingConventionRegisters) * wordSize;
+
+ m_stackBytesNeeded = (m_stackBytesNeeded + stackAlignmentBytes() - 1) & ~(stackAlignmentBytes() - 1);
+
+ m_jit.subPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister);
+
+ m_thunkSaveSet = usedRegisters;
+
+ // This relies on all calling convention registers also being temp registers.
+ unsigned stackIndex = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (!m_callingConventionRegisters.get(reg))
+ continue;
+ m_jit.storePtr(reg, CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize));
+ m_thunkSaveSet.clear(reg);
+ }
+
+ m_offset = offsetToThunkSavingArea;
+}
+
+SlowPathCallContext::~SlowPathCallContext()
+{
+ if (m_returnRegister != InvalidGPRReg)
+ m_jit.move(GPRInfo::returnValueGPR, m_returnRegister);
+
+ unsigned stackIndex = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (!m_callingConventionRegisters.get(reg))
+ continue;
+ m_jit.loadPtr(CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize), reg);
+ }
+
+ m_jit.addPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister);
+}
+
+SlowPathCallKey SlowPathCallContext::keyWithTarget(void* callTarget) const
+{
+ return SlowPathCallKey(m_thunkSaveSet, callTarget, m_argumentRegisters, m_offset);
+}
+
+SlowPathCall SlowPathCallContext::makeCall(void* callTarget)
+{
+ SlowPathCall result = SlowPathCall(m_jit.call(), keyWithTarget(callTarget));
+
+ m_jit.addLinkTask(
+ [result] (LinkBuffer& linkBuffer) {
+ VM& vm = linkBuffer.vm();
+
+ MacroAssemblerCodeRef thunk =
+ vm.ftlThunks->getSlowPathCallThunk(vm, result.key());
+
+ linkBuffer.link(result.call(), CodeLocationLabel(thunk.code()));
+ });
+
+ return result;
+}
+
+CallSiteIndex callSiteIndexForCodeOrigin(State& state, CodeOrigin codeOrigin)
+{
+ if (codeOrigin)
+ return state.jitCode->common.addCodeOrigin(codeOrigin);
+ return CallSiteIndex();
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCall.h b/Source/JavaScriptCore/ftl/FTLSlowPathCall.h
new file mode 100644
index 000000000..1d6eb9623
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLSlowPathCall.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+#include "FTLSlowPathCallKey.h"
+#include "JITOperations.h"
+
+namespace JSC { namespace FTL {
+
+class State;
+
+class SlowPathCall {
+public:
+ SlowPathCall() { }
+
+ SlowPathCall(MacroAssembler::Call call, const SlowPathCallKey& key)
+ : m_call(call)
+ , m_key(key)
+ {
+ }
+
+ MacroAssembler::Call call() const { return m_call; }
+ SlowPathCallKey key() const { return m_key; }
+
+private:
+ MacroAssembler::Call m_call;
+ SlowPathCallKey m_key;
+};
+
+// This will be an RAII thingy that will set up the necessary stack sizes and offsets and such.
+class SlowPathCallContext {
+public:
+ SlowPathCallContext(RegisterSet usedRegisters, CCallHelpers&, unsigned numArgs, GPRReg returnRegister);
+ ~SlowPathCallContext();
+
+ // NOTE: The call that this returns is already going to be linked by the JIT using addLinkTask(),
+ // so there is no need for you to link it yourself.
+ SlowPathCall makeCall(void* callTarget);
+
+private:
+ SlowPathCallKey keyWithTarget(void* callTarget) const;
+
+ RegisterSet m_argumentRegisters;
+ RegisterSet m_callingConventionRegisters;
+ CCallHelpers& m_jit;
+ unsigned m_numArgs;
+ GPRReg m_returnRegister;
+ size_t m_offsetToSavingArea;
+ size_t m_stackBytesNeeded;
+ RegisterSet m_thunkSaveSet;
+ ptrdiff_t m_offset;
+};
+
+template<typename... ArgumentTypes>
+SlowPathCall callOperation(
+ const RegisterSet& usedRegisters, CCallHelpers& jit, CCallHelpers::JumpList* exceptionTarget,
+ FunctionPtr function, GPRReg resultGPR, ArgumentTypes... arguments)
+{
+ SlowPathCall call;
+ {
+ SlowPathCallContext context(usedRegisters, jit, sizeof...(ArgumentTypes) + 1, resultGPR);
+ jit.setupArgumentsWithExecState(arguments...);
+ call = context.makeCall(function.value());
+ }
+ if (exceptionTarget)
+ exceptionTarget->append(jit.emitExceptionCheck());
+ return call;
+}
+
+template<typename... ArgumentTypes>
+SlowPathCall callOperation(
+ const RegisterSet& usedRegisters, CCallHelpers& jit, CallSiteIndex callSiteIndex,
+ CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg resultGPR,
+ ArgumentTypes... arguments)
+{
+ if (callSiteIndex) {
+ jit.store32(
+ CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+ CCallHelpers::tagFor(CallFrameSlot::argumentCount));
+ }
+ return callOperation(usedRegisters, jit, exceptionTarget, function, resultGPR, arguments...);
+}
+
+CallSiteIndex callSiteIndexForCodeOrigin(State&, CodeOrigin);
+
+template<typename... ArgumentTypes>
+SlowPathCall callOperation(
+ State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, CodeOrigin codeOrigin,
+ CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg result, ArgumentTypes... arguments)
+{
+ return callOperation(
+ usedRegisters, jit, callSiteIndexForCodeOrigin(state, codeOrigin), exceptionTarget, function,
+ result, arguments...);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompilationInfo.h b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp
index 796c9d691..4cc835d82 100644
--- a/Source/JavaScriptCore/ftl/FTLOSRExitCompilationInfo.h
+++ b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp
@@ -23,33 +23,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLOSRExitCompilationInfo_h
-#define FTLOSRExitCompilationInfo_h
-
-#include <wtf/Platform.h>
+#include "config.h"
+#include "FTLSlowPathCallKey.h"
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
-#include "MacroAssembler.h"
-
namespace JSC { namespace FTL {
-struct OSRExitCompilationInfo {
- OSRExitCompilationInfo()
- : m_isInvalidationPoint(false)
- {
- }
-
- MacroAssembler::Label m_thunkLabel;
- MacroAssembler::PatchableJump m_thunkJump;
- CodeLocationLabel m_thunkAddress;
- bool m_isInvalidationPoint;
-};
+void SlowPathCallKey::dump(PrintStream& out) const
+{
+ out.print("<usedRegisters = ", m_usedRegisters, ", offset = ", m_offset, ", callTarget = ", RawPointer(m_callTarget), ", argumentRegisters = ", m_argumentRegisters, ">");
+}
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-#endif // FTLOSRExitCompilationInfo_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h
new file mode 100644
index 000000000..f1161fbb2
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "RegisterSet.h"
+
+namespace JSC { namespace FTL {
+
+// This is used for creating some sanity in slow-path calls out of the FTL's inline
+// caches. The idea is that we don't want all of the register save/restore stuff to
+// be generated at each IC site. Instead, the IC slow path call site will just save
+// the registers needed for the arguments. It will arrange for there to be enough
+// space on top of stack to save the remaining registers and the return PC. Then it
+// will call a shared thunk that will save the remaining registers. That thunk needs
+// to know the stack offset at which things get saved along with the call target.
+
+// Note that the offset is *not including* the return PC that would be pushed on X86.
+
+class SlowPathCallKey {
+public:
+ SlowPathCallKey()
+ : m_callTarget(0)
+ , m_offset(0)
+ {
+ }
+
+ SlowPathCallKey(
+ const RegisterSet& set, void* callTarget, const RegisterSet& argumentRegisters,
+ ptrdiff_t offset)
+ : m_usedRegisters(set)
+ , m_callTarget(callTarget)
+ , m_argumentRegisters(argumentRegisters)
+ , m_offset(offset)
+ {
+ }
+
+ const RegisterSet& usedRegisters() const { return m_usedRegisters; }
+ void* callTarget() const { return m_callTarget; }
+ const RegisterSet& argumentRegisters() const { return m_argumentRegisters; }
+ ptrdiff_t offset() const { return m_offset; }
+
+ SlowPathCallKey withCallTarget(void* callTarget)
+ {
+ return SlowPathCallKey(usedRegisters(), callTarget, argumentRegisters(), offset());
+ }
+
+ void dump(PrintStream&) const;
+
+ enum EmptyValueTag { EmptyValue };
+ enum DeletedValueTag { DeletedValue };
+
+ SlowPathCallKey(EmptyValueTag)
+ : m_usedRegisters(RegisterSet::EmptyValue)
+ , m_callTarget(0)
+ , m_offset(0)
+ {
+ }
+
+ SlowPathCallKey(DeletedValueTag)
+ : m_usedRegisters(RegisterSet::DeletedValue)
+ , m_callTarget(0)
+ , m_offset(0)
+ {
+ }
+
+ bool isEmptyValue() const { return m_usedRegisters.isEmptyValue(); }
+ bool isDeletedValue() const { return m_usedRegisters.isDeletedValue(); }
+
+ bool operator==(const SlowPathCallKey& other) const
+ {
+ return m_usedRegisters == other.m_usedRegisters
+ && m_callTarget == other.m_callTarget
+ && m_offset == other.m_offset;
+ }
+ unsigned hash() const
+ {
+ return m_usedRegisters.hash() + PtrHash<void*>::hash(m_callTarget) + m_offset;
+ }
+
+private:
+ RegisterSet m_usedRegisters;
+ void* m_callTarget;
+ RegisterSet m_argumentRegisters;
+ ptrdiff_t m_offset;
+};
+
+struct SlowPathCallKeyHash {
+ static unsigned hash(const SlowPathCallKey& key) { return key.hash(); }
+ static bool equal(const SlowPathCallKey& a, const SlowPathCallKey& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+};
+
+} } // namespace JSC::FTL
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::FTL::SlowPathCallKey> {
+ typedef JSC::FTL::SlowPathCallKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::FTL::SlowPathCallKey> : public CustomHashTraits<JSC::FTL::SlowPathCallKey> { };
+
+} // namespace WTF
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLExitArgumentList.h b/Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h
index a466edc46..bbbfb641f 100644
--- a/Source/JavaScriptCore/ftl/FTLExitArgumentList.h
+++ b/Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,23 +23,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLExitArgumentList_h
-#define FTLExitArgumentList_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
+#include "FTLAbbreviatedTypes.h"
#include <wtf/Vector.h>
namespace JSC { namespace FTL {
-typedef Vector<LValue, 16> ExitArgumentList;
+typedef Vector<LValue, 16> StackmapArgumentList;
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLExitArgumentList_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLState.cpp b/Source/JavaScriptCore/ftl/FTLState.cpp
index c263b264b..d62433700 100644
--- a/Source/JavaScriptCore/ftl/FTLState.cpp
+++ b/Source/JavaScriptCore/ftl/FTLState.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,16 +32,16 @@
#include "FTLForOSREntryJITCode.h"
#include "FTLJITCode.h"
#include "FTLJITFinalizer.h"
+#include "FTLPatchpointExceptionHandle.h"
+#include <stdio.h>
namespace JSC { namespace FTL {
+using namespace B3;
using namespace DFG;
State::State(Graph& graph)
: graph(graph)
- , context(llvm->ContextCreate())
- , module(0)
- , function(0)
, generatedFunction(0)
{
switch (graph.m_plan.mode) {
@@ -51,7 +51,7 @@ State::State(Graph& graph)
}
case FTLForOSREntryMode: {
RefPtr<ForOSREntryJITCode> code = adoptRef(new ForOSREntryJITCode());
- code->initializeEntryBuffer(graph.m_vm, graph.m_profiledBlock->m_numCalleeRegisters);
+ code->initializeEntryBuffer(graph.m_vm, graph.m_profiledBlock->m_numCalleeLocals);
code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex);
jitCode = code;
break;
@@ -60,20 +60,22 @@ State::State(Graph& graph)
RELEASE_ASSERT_NOT_REACHED();
break;
}
-
- finalizer = new JITFinalizer(graph.m_plan);
- graph.m_plan.finalizer = adoptPtr(finalizer);
-}
-State::~State()
-{
- llvm->ContextDispose(context);
+ graph.m_plan.finalizer = std::make_unique<JITFinalizer>(graph.m_plan);
+ finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer.get());
+
+ proc = std::make_unique<Procedure>();
+
+ proc->setOriginPrinter(
+ [this] (PrintStream& out, B3::Origin origin) {
+ out.print("DFG:", bitwise_cast<Node*>(origin.data()));
+ });
+
+ proc->setFrontendData(&graph);
}
-void State::dumpState(const char* when)
+State::~State()
{
- dataLog("LLVM IR for ", CodeBlockWithJITType(graph.m_codeBlock, FTL::JITCode::FTLJIT), " ", when, ":\n");
- dumpModule(module);
}
} } // namespace JSC::FTL
diff --git a/Source/JavaScriptCore/ftl/FTLState.h b/Source/JavaScriptCore/ftl/FTLState.h
index c82db6728..bc434304d 100644
--- a/Source/JavaScriptCore/ftl/FTLState.h
+++ b/Source/JavaScriptCore/ftl/FTLState.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,23 +23,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLState_h
-#define FTLState_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
+#include "B3Procedure.h"
+#include "DFGCommon.h"
#include "DFGGraph.h"
-#include "FTLAbbreviations.h"
+#include "FTLAbbreviatedTypes.h"
#include "FTLGeneratedFunction.h"
-#include "FTLInlineCacheDescriptor.h"
#include "FTLJITCode.h"
#include "FTLJITFinalizer.h"
-#include "FTLStackMaps.h"
+#include <wtf/Box.h>
#include <wtf/Noncopyable.h>
-namespace JSC { namespace FTL {
+namespace JSC {
+
+namespace B3 {
+class PatchpointValue;
+class StackSlot;
+} // namespace B3
+
+namespace FTL {
+
+class PatchpointExceptionHandle;
+
+inline bool verboseCompilationEnabled()
+{
+ return DFG::verboseCompilationEnabled(DFG::FTLMode);
+}
+
+inline bool shouldDumpDisassembly()
+{
+ return DFG::shouldDumpDisassembly(DFG::FTLMode);
+}
class State {
WTF_MAKE_NONCOPYABLE(State);
@@ -51,24 +68,18 @@ public:
// None of these things is owned by State. It is the responsibility of
// FTL phases to properly manage the lifecycle of the module and function.
DFG::Graph& graph;
- LContext context;
- LModule module;
- LValue function;
+ std::unique_ptr<B3::Procedure> proc;
+ bool allocationFailed { false }; // Throw out the compilation once B3 returns.
RefPtr<JITCode> jitCode;
GeneratedFunction generatedFunction;
JITFinalizer* finalizer;
- SegmentedVector<GetByIdDescriptor> getByIds;
- SegmentedVector<PutByIdDescriptor> putByIds;
- Vector<CString> codeSectionNames;
- Vector<CString> dataSectionNames;
- RefCountedArray<LSectionWord> stackmapsSection;
-
- void dumpState(const char* when);
+ // Top-level exception handler. Jump here if you know that you have to genericUnwind() and there
+ // are no applicable catch blocks anywhere in the Graph.
+ RefPtr<PatchpointExceptionHandle> defaultExceptionHandle;
+ Box<CCallHelpers::Label> exceptionHandler { Box<CCallHelpers::Label>::create() };
+ B3::StackSlot* capturedValue { nullptr };
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLState_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLSwitchCase.h b/Source/JavaScriptCore/ftl/FTLSwitchCase.h
index e9f17d010..948e7462f 100644
--- a/Source/JavaScriptCore/ftl/FTLSwitchCase.h
+++ b/Source/JavaScriptCore/ftl/FTLSwitchCase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,36 +23,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLSwitchCase_h
-#define FTLSwitchCase_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
#include "FTLAbbreviatedTypes.h"
+#include "FTLWeight.h"
namespace JSC { namespace FTL {
class SwitchCase {
public:
- SwitchCase(LValue value, LBasicBlock target)
+ SwitchCase()
+ : m_value(nullptr)
+ , m_target(nullptr)
+ {
+ }
+
+ SwitchCase(LValue value, LBasicBlock target, Weight weight = Weight())
: m_value(value)
, m_target(target)
+ , m_weight(weight)
{
}
LValue value() const { return m_value; }
LBasicBlock target() const { return m_target; }
+ Weight weight() const { return m_weight; }
private:
LValue m_value;
LBasicBlock m_target;
+ Weight m_weight;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLSwitchCase_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLThunks.cpp b/Source/JavaScriptCore/ftl/FTLThunks.cpp
index bf04af02a..cfbc75b7c 100644
--- a/Source/JavaScriptCore/ftl/FTLThunks.cpp
+++ b/Source/JavaScriptCore/ftl/FTLThunks.cpp
@@ -29,8 +29,10 @@
#if ENABLE(FTL_JIT)
#include "AssemblyHelpers.h"
+#include "DFGOSRExitCompilerCommon.h"
#include "FPRInfo.h"
#include "FTLOSRExitCompiler.h"
+#include "FTLOperations.h"
#include "FTLSaveRestore.h"
#include "GPRInfo.h"
#include "LinkBuffer.h"
@@ -39,29 +41,51 @@ namespace JSC { namespace FTL {
using namespace DFG;
-MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM& vm, const Location& location)
+enum class FrameAndStackAdjustmentRequirement {
+ Needed,
+ NotNeeded
+};
+
+static MacroAssemblerCodeRef genericGenerationThunkGenerator(
+ VM* vm, FunctionPtr generationFunction, const char* name, unsigned extraPopsToRestore, FrameAndStackAdjustmentRequirement frameAndStackAdjustmentRequirement)
{
- AssemblyHelpers jit(&vm, 0);
+ AssemblyHelpers jit(vm, 0);
+
+ if (frameAndStackAdjustmentRequirement == FrameAndStackAdjustmentRequirement::Needed) {
+ // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer.
+ adjustFrameAndStackInOSRExitCompilerThunk<FTL::JITCode>(jit, vm, JITCode::FTLJIT);
+ }
- // Note that the "return address" will be the OSR exit ID.
+ // Note that the "return address" will be the ID that we pass to the generation function.
+
+ ptrdiff_t stackMisalignment = MacroAssembler::pushToSaveByteOffset();
// Pretend that we're a C call frame.
- jit.push(MacroAssembler::framePointerRegister);
+ jit.pushToSave(MacroAssembler::framePointerRegister);
jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister);
- jit.push(GPRInfo::regT0);
- jit.push(GPRInfo::regT0);
+ stackMisalignment += MacroAssembler::pushToSaveByteOffset();
+
+ // Now create ourselves enough stack space to give saveAllRegisters() a scratch slot.
+ unsigned numberOfRequiredPops = 0;
+ do {
+ jit.pushToSave(GPRInfo::regT0);
+ stackMisalignment += MacroAssembler::pushToSaveByteOffset();
+ numberOfRequiredPops++;
+ } while (stackMisalignment % stackAlignmentBytes());
- ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(requiredScratchMemorySizeInBytes());
+ ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(requiredScratchMemorySizeInBytes());
char* buffer = static_cast<char*>(scratchBuffer->dataBuffer());
saveAllRegisters(jit, buffer);
// Tell GC mark phase how much of the scratch buffer is active during call.
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::nonArgGPR1);
- jit.storePtr(MacroAssembler::TrustedImmPtr(requiredScratchMemorySizeInBytes()), GPRInfo::nonArgGPR1);
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::nonArgGPR0);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(requiredScratchMemorySizeInBytes()), GPRInfo::nonArgGPR0);
- location.restoreInto(jit, buffer, GPRInfo::argumentGPR0, 1);
- jit.peek(GPRInfo::argumentGPR1, 3);
+ jit.loadPtr(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ jit.peek(
+ GPRInfo::argumentGPR1,
+ (stackMisalignment - MacroAssembler::pushToSaveByteOffset()) / sizeof(void*));
MacroAssembler::Call functionCall = jit.call();
// At this point we want to make a tail call to what was returned to us in the
@@ -71,24 +95,73 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM& vm, const Location& lo
jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
- // Prepare for tail call.
- jit.pop(GPRInfo::regT1);
- jit.pop(GPRInfo::regT1);
- jit.pop(MacroAssembler::framePointerRegister);
-
- // At this point we're sitting on the return address - so if we did a jump right now, the
- // tail-callee would be happy. Instead we'll stash the callee in the return address and then
- // restore all registers.
+ // Make sure we tell the GC that we're not using the scratch buffer anymore.
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT1);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT1);
+ // Prepare for tail call.
+ while (numberOfRequiredPops--)
+ jit.popToRestore(GPRInfo::regT1);
+ jit.popToRestore(MacroAssembler::framePointerRegister);
+
+ // When we came in here, there was an additional thing pushed to the stack. Some clients want it
+ // popped before proceeding.
+ while (extraPopsToRestore--)
+ jit.popToRestore(GPRInfo::regT1);
+
+ // Put the return address wherever the return instruction wants it. On all platforms, this
+ // ensures that the return address is out of the way of register restoration.
jit.restoreReturnAddressBeforeReturn(GPRInfo::regT0);
-
+
restoreAllRegisters(jit, buffer);
jit.ret();
- LinkBuffer patchBuffer(vm, &jit, GLOBAL_THUNK_ID);
- patchBuffer.link(functionCall, compileFTLOSRExit);
- return FINALIZE_CODE(patchBuffer, ("FTL OSR exit generation thunk for callFrame at %s", toCString(location).data()));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ patchBuffer.link(functionCall, generationFunction);
+ return FINALIZE_CODE(patchBuffer, ("%s", name));
+}
+
+MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
+{
+ unsigned extraPopsToRestore = 0;
+ return genericGenerationThunkGenerator(
+ vm, compileFTLOSRExit, "FTL OSR exit generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::Needed);
+}
+
+MacroAssemblerCodeRef lazySlowPathGenerationThunkGenerator(VM* vm)
+{
+ unsigned extraPopsToRestore = 1;
+ return genericGenerationThunkGenerator(
+ vm, compileFTLLazySlowPath, "FTL lazy slow path generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::NotNeeded);
+}
+
+static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber)
+{
+ if (!Options::clobberAllRegsInFTLICSlowPath())
+ return;
+
+ RegisterSet clobber = RegisterSet::allRegisters();
+ clobber.exclude(RegisterSet::reservedHardwareRegisters());
+ clobber.exclude(RegisterSet::stackRegisters());
+ clobber.exclude(RegisterSet::calleeSaveRegisters());
+ clobber.exclude(dontClobber);
+
+ GPRReg someGPR;
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!clobber.get(reg) || !reg.isGPR())
+ continue;
+
+ jit.move(AssemblyHelpers::TrustedImm32(0x1337beef), reg.gpr());
+ someGPR = reg.gpr();
+ }
+
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!clobber.get(reg) || !reg.isFPR())
+ continue;
+
+ jit.move64ToDouble(someGPR, reg.fpr());
+ }
}
MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey& key)
@@ -122,8 +195,10 @@ MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey&
jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR0);
jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()));
- JITCompiler::Call call = jit.call();
+ registerClobberCheck(jit, key.argumentRegisters());
+ AssemblyHelpers::Call call = jit.call();
+
jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()), GPRInfo::nonPreservedNonReturnGPR);
jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
@@ -147,7 +222,7 @@ MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey&
jit.ret();
- LinkBuffer patchBuffer(vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(vm, jit, GLOBAL_THUNK_ID);
patchBuffer.link(call, FunctionPtr(key.callTarget()));
return FINALIZE_CODE(patchBuffer, ("FTL slow path call thunk for %s", toCString(key).data()));
}
diff --git a/Source/JavaScriptCore/ftl/FTLThunks.h b/Source/JavaScriptCore/ftl/FTLThunks.h
index bbcdbddc9..33e3b021b 100644
--- a/Source/JavaScriptCore/ftl/FTLThunks.h
+++ b/Source/JavaScriptCore/ftl/FTLThunks.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLThunks_h
-#define FTLThunks_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -41,7 +38,8 @@ class VM;
namespace FTL {
-MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM&, const Location&);
+MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM*);
+MacroAssemblerCodeRef lazySlowPathGenerationThunkGenerator(VM*);
MacroAssemblerCodeRef slowPathCallThunkGenerator(VM&, const SlowPathCallKey&);
template<typename KeyTypeArgument>
@@ -78,12 +76,6 @@ typename MapType::KeyType keyForThunk(MapType& map, MacroAssemblerCodePtr ptr)
class Thunks {
public:
- MacroAssemblerCodeRef getOSRExitGenerationThunk(VM& vm, const Location& location)
- {
- return generateIfNecessary(
- vm, m_osrExitThunks, location, osrExitGenerationThunkGenerator);
- }
-
MacroAssemblerCodeRef getSlowPathCallThunk(VM& vm, const SlowPathCallKey& key)
{
return generateIfNecessary(
@@ -96,12 +88,9 @@ public:
}
private:
- ThunkMap<Location> m_osrExitThunks;
ThunkMap<SlowPathCallKey> m_slowPathCallThunks;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLTHunks_h
diff --git a/Source/JavaScriptCore/ftl/FTLTypedPointer.h b/Source/JavaScriptCore/ftl/FTLTypedPointer.h
index fbee5a3ec..8c0cbae5f 100644
--- a/Source/JavaScriptCore/ftl/FTLTypedPointer.h
+++ b/Source/JavaScriptCore/ftl/FTLTypedPointer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,14 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLTypedPointer_h
-#define FTLTypedPointer_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLAbbreviations.h"
+#include "FTLAbbreviatedTypes.h"
#include "FTLAbstractHeap.h"
namespace JSC { namespace FTL {
@@ -49,13 +46,13 @@ public:
{
}
- bool operator!() const
+ explicit operator bool() const
{
ASSERT(!m_heap == !m_value);
- return !m_heap;
+ return !!m_heap;
}
- const AbstractHeap& heap() const { return *m_heap; }
+ const AbstractHeap* heap() const { return m_heap; }
LValue value() const { return m_value; }
private:
@@ -66,6 +63,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLTypedPointer_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLValueFormat.cpp b/Source/JavaScriptCore/ftl/FTLValueFormat.cpp
deleted file mode 100644
index 5a89d6a49..000000000
--- a/Source/JavaScriptCore/ftl/FTLValueFormat.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "FTLValueFormat.h"
-
-#if ENABLE(FTL_JIT)
-
-#include "AssemblyHelpers.h"
-
-namespace JSC { namespace FTL {
-
-void reboxAccordingToFormat(
- ValueFormat format, AssemblyHelpers& jit, GPRReg value, GPRReg scratch1, GPRReg scratch2)
-{
- switch (format) {
- case ValueFormatInt32: {
- jit.zeroExtend32ToPtr(value, value);
- jit.or64(GPRInfo::tagTypeNumberRegister, value);
- break;
- }
-
- case ValueFormatInt52: {
- jit.rshift64(AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), value);
- jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
- jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
- jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
- break;
- }
-
- case ValueFormatStrictInt52: {
- jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
- jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
- jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
- break;
- }
-
- case ValueFormatBoolean: {
- jit.zeroExtend32ToPtr(value, value);
- jit.or32(MacroAssembler::TrustedImm32(ValueFalse), value);
- break;
- }
-
- case ValueFormatJSValue: {
- // Done already!
- break;
- }
-
- case ValueFormatDouble: {
- jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch1);
- jit.move64ToDouble(value, FPRInfo::fpRegT0);
- jit.boxDouble(FPRInfo::fpRegT0, value);
- jit.move64ToDouble(scratch1, FPRInfo::fpRegT0);
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-}
-
-} } // namespace JSC::FTL
-
-namespace WTF {
-
-using namespace JSC::FTL;
-
-void printInternal(PrintStream& out, ValueFormat format)
-{
- switch (format) {
- case InvalidValueFormat:
- out.print("Invalid");
- return;
- case ValueFormatInt32:
- out.print("Int32");
- return;
- case ValueFormatInt52:
- out.print("Int52");
- return;
- case ValueFormatStrictInt52:
- out.print("StrictInt52");
- return;
- case ValueFormatBoolean:
- out.print("Boolean");
- return;
- case ValueFormatJSValue:
- out.print("JSValue");
- return;
- case ValueFormatDouble:
- out.print("Double");
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace WTF
-
-#endif // ENABLE(FTL_JIT)
-
diff --git a/Source/JavaScriptCore/ftl/FTLValueFromBlock.h b/Source/JavaScriptCore/ftl/FTLValueFromBlock.h
index fec0a1b37..5bd14ca62 100644
--- a/Source/JavaScriptCore/ftl/FTLValueFromBlock.h
+++ b/Source/JavaScriptCore/ftl/FTLValueFromBlock.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLValueFromBlock_h
-#define FTLValueFromBlock_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
@@ -47,6 +44,8 @@ public:
, m_block(block)
{
}
+
+ explicit operator bool() const { return m_value || m_block; }
LValue value() const { return m_value; }
LBasicBlock block() const { return m_block; }
@@ -59,6 +58,3 @@ private:
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLValueFromBlock_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLValueRange.cpp b/Source/JavaScriptCore/ftl/FTLValueRange.cpp
new file mode 100644
index 000000000..d46ddafc1
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLValueRange.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLValueRange.h"
+
+#if ENABLE(FTL_JIT)
+
+namespace JSC { namespace FTL {
+
+void ValueRange::decorateInstruction(LValue loadInstruction, unsigned rangeKind) const
+{
+ UNUSED_PARAM(loadInstruction);
+ UNUSED_PARAM(rangeKind);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLExitThunkGenerator.h b/Source/JavaScriptCore/ftl/FTLValueRange.h
index 56936b518..3ae3dcbf9 100644
--- a/Source/JavaScriptCore/ftl/FTLExitThunkGenerator.h
+++ b/Source/JavaScriptCore/ftl/FTLValueRange.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,38 +23,38 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FTLExitThunkGenerator_h
-#define FTLExitThunkGenerator_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(FTL_JIT)
-#include "CCallHelpers.h"
+#include "FTLAbbreviatedTypes.h"
namespace JSC { namespace FTL {
-class State;
-struct OSRExitCompilationInfo;
-
-class ExitThunkGenerator : public CCallHelpers {
+class ValueRange {
public:
- ExitThunkGenerator(State& state);
- ~ExitThunkGenerator();
+ ValueRange()
+ : m_begin(0)
+ , m_end(0)
+ {
+ }
- void emitThunk(unsigned index);
- void emitThunks();
+ ValueRange(LValue begin, LValue end)
+ : m_begin(begin)
+ , m_end(end)
+ {
+ }
+
+ LValue begin() const { return m_begin; }
+ LValue end() const { return m_end; }
+
+ void decorateInstruction(LValue loadInstruction, unsigned rangeKind) const;
- bool didThings() const { return m_didThings; }
-
private:
- State& m_state;
- bool m_didThings;
+ LValue m_begin;
+ LValue m_end;
};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
-#endif // FTLExitThunkGenerator_h
-
diff --git a/Source/JavaScriptCore/ftl/FTLExitThunkGenerator.cpp b/Source/JavaScriptCore/ftl/FTLWeight.h
index 86c872307..cde23070b 100644
--- a/Source/JavaScriptCore/ftl/FTLExitThunkGenerator.cpp
+++ b/Source/JavaScriptCore/ftl/FTLWeight.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,47 +23,49 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "config.h"
-#include "FTLExitThunkGenerator.h"
+#pragma once
#if ENABLE(FTL_JIT)
-#include "FTLOSRExitCompilationInfo.h"
-#include "FTLState.h"
+#include "B3FrequencyClass.h"
+#include <wtf/MathExtras.h>
+#include <wtf/StdLibExtras.h>
namespace JSC { namespace FTL {
-using namespace JSC::DFG;
-
-ExitThunkGenerator::ExitThunkGenerator(State& state)
- : CCallHelpers(&state.graph.m_vm, state.graph.m_codeBlock)
- , m_state(state)
- , m_didThings(false)
-{
-}
-
-ExitThunkGenerator::~ExitThunkGenerator()
-{
-}
-
-void ExitThunkGenerator::emitThunk(unsigned index)
-{
- OSRExitCompilationInfo& info = m_state.finalizer->osrExit[index];
+class Weight {
+public:
+ Weight()
+ : m_value(std::numeric_limits<float>::quiet_NaN())
+ {
+ }
+
+ explicit Weight(float value)
+ : m_value(value)
+ {
+ }
- info.m_thunkLabel = label();
- push(TrustedImm32(index));
- info.m_thunkJump = patchableJump();
+ bool isSet() const { return m_value == m_value; }
+ bool operator!() const { return !isSet(); }
- m_didThings = true;
-}
+ float value() const { return m_value; }
-void ExitThunkGenerator::emitThunks()
-{
- for (unsigned i = 0; i < m_state.finalizer->osrExit.size(); ++i)
- emitThunk(i);
-}
+ B3::FrequencyClass frequencyClass() const { return value() ? B3::FrequencyClass::Normal : B3::FrequencyClass::Rare; }
+
+ // Inverse weight for a two-target branch.
+ Weight inverse() const
+ {
+ if (!isSet())
+ return Weight();
+ if (value())
+ return Weight(0);
+ return Weight(1);
+ }
+
+private:
+ float m_value;
+};
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
-
diff --git a/Source/JavaScriptCore/ftl/FTLWeightedTarget.h b/Source/JavaScriptCore/ftl/FTLWeightedTarget.h
new file mode 100644
index 000000000..5ed2968e4
--- /dev/null
+++ b/Source/JavaScriptCore/ftl/FTLWeightedTarget.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(FTL_JIT)
+
+#include "FTLAbbreviatedTypes.h"
+#include "FTLWeight.h"
+
+namespace JSC { namespace FTL {
+
+class WeightedTarget {
+public:
+ WeightedTarget()
+ : m_target(nullptr)
+ {
+ }
+
+ WeightedTarget(LBasicBlock target, Weight weight)
+ : m_target(target)
+ , m_weight(weight)
+ {
+ }
+
+ WeightedTarget(LBasicBlock target, float weight)
+ : m_target(target)
+ , m_weight(weight)
+ {
+ }
+
+ LBasicBlock target() const { return m_target; }
+ Weight weight() const { return m_weight; }
+
+ B3::FrequentedBlock frequentedBlock() const
+ {
+ return B3::FrequentedBlock(target(), weight().frequencyClass());
+ }
+
+private:
+ LBasicBlock m_target;
+ Weight m_weight;
+};
+
+// Helpers for creating weighted targets for statically known (or unknown) branch
+// profiles.
+
+inline WeightedTarget usually(LBasicBlock block)
+{
+ return WeightedTarget(block, 1);
+}
+
+inline WeightedTarget rarely(LBasicBlock block)
+{
+ return WeightedTarget(block, 0);
+}
+
+// Currently in B3 this is the equivalent of "usually", but we like to make the distinction in
+// case we ever make B3 support proper branch weights. We used to do that in LLVM.
+inline WeightedTarget unsure(LBasicBlock block)
+{
+ return WeightedTarget(block, Weight());
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)