summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/JITInlines.h
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/JITInlines.h')
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h619
1 files changed, 460 insertions, 159 deletions
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index 9330e773e..5d2d6882e 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,17 +23,60 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITInlines_h
-#define JITInlines_h
-
+#pragma once
#if ENABLE(JIT)
-#include "CallFrameInlines.h"
+#include "JSCInlines.h"
namespace JSC {
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(int src)
+#if USE(JSVALUE64)
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ return slowCases;
+}
+#else
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleToInts(fpRegT0, regT0, regT1);
+ return slowCases;
+}
+#endif // USE(JSVALUE64)
+
+ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
+{
+ switch (arrayMode) {
+ case JITInt32:
+ return emitInt32Load(currentInstruction, badType);
+ case JITDouble:
+ return emitDoubleLoad(currentInstruction, badType);
+ case JITContiguous:
+ return emitContiguousLoad(currentInstruction, badType);
+ case JITArrayStorage:
+ return emitArrayStorageLoad(currentInstruction, badType);
+ default:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return MacroAssembler::JumpList();
+}
+
+inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)
+{
+ return emitContiguousLoad(instruction, badType, expectedShape);
+}
+
+inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ return emitArrayStorageLoad(instruction, badType);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
}
@@ -44,7 +87,7 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(int src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, int entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
@@ -54,26 +97,9 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca
#endif
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- loadPtr(Address(from, entry * sizeof(Register)), to);
-}
-
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-}
-
-#if USE(JSVALUE64)
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load64(Address(from, entry * sizeof(Register)), to);
-}
-#endif
-
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ failures.append(branchStructure(NotEqual, Address(src, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
failures.append(branchTest32(Zero, dst));
@@ -92,23 +118,34 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
+ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr function)
+{
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ Call nakedCall = nearTailCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+ return nakedCall;
+}
+
ALWAYS_INLINE void JIT::updateTopCallFrame()
{
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
#if USE(JSVALUE32_64)
- Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset + 1;
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
+ Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset;
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
#else
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(m_bytecodeOffset + 1);
+ uint32_t locationBits = CallSiteIndex(m_bytecodeOffset).bits();
#endif
- store32(TrustedImm32(locationBits), intTagFor(JSStack::ArgumentCount));
+ store32(TrustedImm32(locationBits), intTagFor(CallFrameSlot::argumentCount));
+
+ // FIXME: It's not clear that this is needed. JITOperations tend to update the top call frame on
+ // the C++ side.
+ // https://bugs.webkit.org/show_bug.cgi?id=155693
storePtr(callFrameRegister, &m_vm->topCallFrame);
}
@@ -120,6 +157,16 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const Funct
return call;
}
+#if OS(WINDOWS) && CPU(X86_64)
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr& function)
+{
+ updateTopCallFrame();
+ MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
+ exceptionCheck();
+ return call;
+}
+#endif
+
ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
{
updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
@@ -151,12 +198,49 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueRe
return call;
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(Z_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ updateTopCallFrame();
+ return appendCall(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_E operation)
{
setupArgumentsExecState();
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJsc operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJscZ operation, GPRReg arg1, int32_t arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, TrustedImmPtr arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EO operation, GPRReg arg)
{
setupArgumentsWithExecState(arg);
@@ -205,6 +289,18 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operatio
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscC operation, int dst, GPRReg arg1, JSCell* cell)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), arg2);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EP operation, int dst, void* pointer)
{
setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -217,16 +313,22 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(WithProfileTag, J_JITOpera
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EPc operation, int dst, Instruction* bytecodePC)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operation, int dst, int32_t arg)
{
setupArgumentsWithExecState(TrustedImm32(arg));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EZ operation, int32_t op)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZZ operation, int dst, int32_t arg1, int32_t arg2)
{
- setupArgumentsWithExecState(TrustedImm32(op));
- return appendCallWithExceptionCheck(operation);
+ setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
@@ -241,6 +343,17 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss opera
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Sprt_JITOperation_EZ operation, int32_t op)
+{
+#if OS(WINDOWS) && CPU(X86_64)
+ setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32(op));
+ return appendCallWithExceptionCheckAndSlowPathReturnType(operation);
+#else
+ setupArgumentsWithExecState(TrustedImm32(op));
+ return appendCallWithExceptionCheck(operation);
+#endif
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation)
{
setupArgumentsExecState();
@@ -259,6 +372,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operati
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EE operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ updateTopCallFrame();
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC)
{
setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
@@ -289,21 +409,74 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnExce
return appendCallWithCallFrameRollbackOnException(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operation, JSValueRegs result, JSValueRegs arg)
+{
+ setupArgumentsWithExecState(arg);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJArp operation, JSValueRegs result, JSValueRegs operand, ArithProfile* arithProfile)
+{
+ setupArgumentsWithExecState(operand, TrustedImmPtr(arithProfile));
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJArp operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2, ArithProfile* arithProfile)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arithProfile));
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJMic operation, JSValueRegs result, JSValueRegs arg, TrustedImmPtr mathIC)
+{
+ setupArgumentsWithExecState(arg, mathIC);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJMic operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr mathIC)
+{
+ setupArgumentsWithExecState(arg1, arg2, mathIC);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
#if USE(JSVALUE64)
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1, int32_t arg3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1, int32_t arg2, int32_t arg3)
{
- setupArgumentsWithExecState(arg1, TrustedImm32(arg3));
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2, int32_t arg3, GPRReg arg4)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1, regOp2, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -315,7 +488,31 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, GPRReg arg)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), arg);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
@@ -339,7 +536,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1, const Identifier* arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJI operation, int dst, GPRReg arg1, UniquedStringImpl* arg2)
{
setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
@@ -351,6 +548,24 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1, GPRReg arg2, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1, GPRReg arg2, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+{
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1)
{
setupArgumentsWithExecState(arg1);
@@ -370,15 +585,29 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJI operation, GPRReg arg1, UniquedStringImpl* arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID regOp1, RegisterID regOp2)
{
setupArgumentsWithExecState(regOp1, regOp2);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2, int32_t op3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
{
- setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2, TrustedImm32(op3));
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
return appendCallWithExceptionCheck(operation);
}
@@ -388,9 +617,21 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZJJ operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJ operation, RegisterID arg1, RegisterID arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID regOp1, RegisterID regOp2, int32_t op3, RegisterID regOp4)
{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+ setupArgumentsWithExecState(regOp1, regOp2, TrustedImm32(op3), regOp4);
return appendCallWithExceptionCheck(operation);
}
@@ -408,23 +649,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat
#else // USE(JSVALUE32_64)
-// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
-// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
-#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
-#define EABI_32BIT_DUMMY_ARG TrustedImm32(0),
-#else
-#define EABI_32BIT_DUMMY_ARG
-#endif
-
-// JSVALUE32_64 is a 64-bit integer that cannot be put half in an argument register and half on stack when using SH4 architecture.
-// To avoid this, let's occupy the 4th argument register (r7) with a dummy argument when necessary. This must only be done when there
-// is no other 32-bit value argument behind this 64-bit JSValue.
-#if CPU(SH4)
-#define SH4_32BIT_DUMMY_ARG TrustedImm32(0),
-#else
-#define SH4_32BIT_DUMMY_ARG
-#endif
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
@@ -432,20 +656,21 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOpera
return appendCall(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
-#if CPU(SH4)
- // We have to put arg3 in the 4th argument register (r7) as 64-bit value arg2 will be put on stack for sh4 architecure.
- setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImm32(arg2));
-#else
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2));
-#endif
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2, int32_t arg3)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
@@ -461,13 +686,19 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, const Identifier* arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJI operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* arg2)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
@@ -475,13 +706,25 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc opera
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag, TrustedImmPtr(byValInfo));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
@@ -497,15 +740,33 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJI operation, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* arg2)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID arg1Tag, RegisterID arg1Payload, RegisterID arg2Tag, RegisterID arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECICC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZCC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJ operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload, int32_t arg3, RegisterID arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
@@ -515,13 +776,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2Tag, RegisterID regOp2Payload, int32_t op3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int32_t op1, SymbolTable* symbolTable, RegisterID regOp3Tag, RegisterID regOp3Payload)
{
- setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2Payload, regOp2Tag, TrustedImm32(op3));
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -529,7 +790,25 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI ope
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, RegisterID regOp1Tag, RegisterID regOp1Payload)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), regOp1Payload, regOp1Tag);
return appendCallWithExceptionCheck(operation);
}
@@ -544,15 +823,18 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
return appendCallWithExceptionCheck(operation);
}
-
-#undef EABI_32BIT_DUMMY_ARG
-#undef SH4_32BIT_DUMMY_ARG
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), EABI_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
#endif // USE(JSVALUE32_64)
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
+ return branchStructure(NotEqual, Address(reg, JSCell::structureIDOffset()), structure);
}
ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
@@ -561,26 +843,32 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
linkSlowCase(iter);
}
+ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
+{
+ while (iter != slowCases.end() && iter->to == bytecodeOffset) {
+ iter->from.link(this);
+ ++iter;
+ }
+}
+
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
-ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+ALWAYS_INLINE void JIT::addSlowCase(const JumpList& jumpList)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- const JumpList::JumpVector& jumpVector = jumpList.jumps();
- size_t size = jumpVector.size();
- for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
+ for (const Jump& jump : jumpList.jumps())
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase()
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
@@ -588,21 +876,26 @@ ALWAYS_INLINE void JIT::addSlowCase()
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellObject(RegisterID cellReg)
{
- return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ return branch8(AboveOrEqual, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellNotObject(RegisterID cellReg)
+{
+ return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
#if ENABLE(SAMPLING_FLAGS)
@@ -658,28 +951,11 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
-template<typename StructureType>
-inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
-{
- loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
- addSlowCase(branchTestPtr(Zero, result));
-
- // remove the object from the free list
- loadPtr(Address(result), scratch);
- storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
-
- // initialize the object's structure
- storePtr(structure, Address(result, JSCell::structureOffset()));
-
- // initialize the object's property storage pointer
- storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
-}
-
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
ASSERT(shouldEmitProfiling());
@@ -713,22 +989,19 @@ inline void JIT::emitValueProfilingSite()
emitValueProfilingSite(m_bytecodeOffset);
}
-inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
+inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
{
- UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
-
- RegisterID structure = structureAndIndexingType;
- RegisterID indexingType = structureAndIndexingType;
-
- if (shouldEmitProfiling())
- storePtr(structure, arrayProfile->addressOfLastSeenStructure());
+ if (shouldEmitProfiling()) {
+ load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
+ store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
+ }
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
+ load8(Address(cell, JSCell::indexingTypeAndMiscOffset()), indexingType);
}
-inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
+inline void JIT::emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex)
{
- emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
+ emitArrayProfilingSiteWithCell(cell, indexingType, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
}
inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
@@ -748,7 +1021,7 @@ static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capabilit
inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
{
- ConcurrentJITLocker locker(m_codeBlock->m_lock);
+ ConcurrentJSLocker locker(m_codeBlock->m_lock);
profile->computeUpdatedPrediction(locker, m_codeBlock);
ArrayModes arrayModes = profile->observedArrayModes(locker);
if (arrayProfileSaw(arrayModes, DoubleShape))
@@ -760,6 +1033,16 @@ inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
return JITContiguous;
}
+ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
+{
+ return getConstantOperand(src).asDouble();
+}
+
#if USE(JSVALUE32_64)
inline void JIT::emitLoadTag(int index, RegisterID tag)
@@ -788,6 +1071,16 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
move(Imm32(v.tag()), tag);
}
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitLoad(src, dst.tagGPR(), dst.payloadGPR());
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitStore(dst, from.tagGPR(), from.payloadGPR());
+}
+
inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
{
RELEASE_ASSERT(tag != payload);
@@ -819,7 +1112,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -904,20 +1197,20 @@ inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterI
}
}
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant)
+ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
{
- if (isOperandConstantImmediateInt(op1)) {
+ if (isOperandConstantInt(op1)) {
constant = getConstantOperand(op1).asInt32();
op = op2;
return true;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
constant = getConstantOperand(op2).asInt32();
op = op1;
return true;
@@ -931,7 +1224,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -946,6 +1239,11 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
load64(Address(callFrameRegister, src * sizeof(Register)), dst);
}
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitGetVirtualRegister(src, dst.payloadGPR());
+}
+
ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
{
emitGetVirtualRegister(src.offset(), dst);
@@ -962,12 +1260,7 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID
emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
}
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(int src)
-{
- return getConstantOperand(src).asInt32();
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
@@ -977,6 +1270,11 @@ ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
store64(from, Address(callFrameRegister, dst * sizeof(Register)));
}
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitPutVirtualRegister(dst, from.payloadGPR());
+}
+
ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
{
emitPutVirtualRegister(dst.offset(), from);
@@ -1019,7 +1317,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -1027,52 +1325,58 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- ASSERT(isOperandConstantImmediateInt(index));
+ ASSERT(isOperandConstantInt(index));
convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else
convertInt32ToDouble(addressFor(index), value);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfInt(RegisterID reg)
{
return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg)
{
return branch64(Below, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
+{
+ return patchableBranch64(Below, reg, tagTypeNumberRegister);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
and64(reg2, scratch);
- return emitJumpIfNotImmediateInteger(scratch);
+ return emitJumpIfNotInt(scratch);
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
{
- addSlowCase(emitJumpIfNotImmediateInteger(reg));
+ addSlowCase(emitJumpIfNotInt(reg));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+ addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
{
- addSlowCase(emitJumpIfNotImmediateNumber(reg));
+ addSlowCase(emitJumpIfNotNumber(reg));
}
-ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+ALWAYS_INLINE void JIT::emitTagBool(RegisterID reg)
{
- emitFastArithIntToImmNoCheck(src, dest);
+ or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
}
-ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+inline Instruction* JIT::copiedInstruction(Instruction* inst)
{
- or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
+ ASSERT(inst >= m_codeBlock->instructions().begin() && inst < m_codeBlock->instructions().end());
+ return m_instructions.begin() + (inst - m_codeBlock->instructions().begin());
}
#endif // USE(JSVALUE32_64)
@@ -1080,6 +1384,3 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITInlines_h
-