summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-22 15:40:17 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-10-22 15:40:17 +0200
commit43a42f108af6bcbd91f2672731c3047c26213af1 (patch)
tree7fa092e5f5d873c72f2486a70e26be26f7a38bec /Source/JavaScriptCore/jit
parentd9cf437c840c6eb7417bdd97e6c40979255d3158 (diff)
downloadqtwebkit-43a42f108af6bcbd91f2672731c3047c26213af1.tar.gz
Imported WebKit commit 302e7806bff028bd1167a1ec7c86a1ee00ecfb49 (http://svn.webkit.org/repository/webkit/trunk@132067)
New snapshot that fixes build without QtWidgets
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp55
-rw-r--r--Source/JavaScriptCore/jit/JIT.h38
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp84
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp22
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h195
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp650
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp143
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp127
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp75
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h32
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp53
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h9
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h20
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h8
14 files changed, 790 insertions, 721 deletions
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 4eab17661..2d2991b5f 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -270,10 +270,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_get_by_val)
DEFINE_OP(op_get_argument_by_val)
DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_global_var_watchable)
- DEFINE_OP(op_get_global_var)
DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_get_scoped_var)
DEFINE_OP(op_check_has_instance)
DEFINE_OP(op_instanceof)
DEFINE_OP(op_is_undefined)
@@ -339,17 +336,26 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_by_index)
DEFINE_OP(op_put_by_val)
DEFINE_OP(op_put_getter_setter)
- case op_init_global_const:
- DEFINE_OP(op_put_global_var)
- case op_init_global_const_check:
- DEFINE_OP(op_put_global_var_check)
- DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_init_global_const)
+ DEFINE_OP(op_init_global_const_check)
+
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
DEFINE_OP(op_resolve)
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
DEFINE_OP(op_resolve_base)
+
+ case op_put_to_base_variable:
+ DEFINE_OP(op_put_to_base)
+
DEFINE_OP(op_ensure_property_exists)
- DEFINE_OP(op_resolve_global)
- DEFINE_OP(op_resolve_global_dynamic)
- DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_resolve_with_this)
DEFINE_OP(op_ret)
@@ -488,7 +494,6 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_mul)
DEFINE_SLOWCASE_OP(op_negate)
DEFINE_SLOWCASE_OP(op_neq)
- DEFINE_SLOWCASE_OP(op_new_array)
DEFINE_SLOWCASE_OP(op_new_object)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
@@ -503,16 +508,32 @@ void JIT::privateCompileSlowCases()
case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
- case op_init_global_const_check:
- DEFINE_SLOWCASE_OP(op_put_global_var_check);
- DEFINE_SLOWCASE_OP(op_resolve_global)
- DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
+ DEFINE_SLOWCASE_OP(op_init_global_const_check);
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_jsnumber)
DEFINE_SLOWCASE_OP(op_to_primitive)
+
+ case op_resolve_global_property:
+ case op_resolve_global_var:
+ case op_resolve_scoped_var:
+ case op_resolve_scoped_var_on_top_scope:
+ case op_resolve_scoped_var_with_top_scope_check:
+ DEFINE_SLOWCASE_OP(op_resolve)
+
+ case op_resolve_base_to_global:
+ case op_resolve_base_to_global_dynamic:
+ case op_resolve_base_to_scope:
+ case op_resolve_base_to_scope_with_top_scope_check:
+ DEFINE_SLOWCASE_OP(op_resolve_base)
+ DEFINE_SLOWCASE_OP(op_resolve_with_base)
+ DEFINE_SLOWCASE_OP(op_resolve_with_this)
+
+ case op_put_to_base_variable:
+ DEFINE_SLOWCASE_OP(op_put_to_base)
+
default:
ASSERT_NOT_REACHED();
}
@@ -645,7 +666,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
continue;
int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
- loadPtr(Address(callFrameRegister, offset), regT0);
+ load64(Address(callFrameRegister, offset), regT0);
#elif USE(JSVALUE32_64)
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 054718573..cd832c21f 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -475,9 +475,7 @@ namespace JSC {
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
template<typename ClassType, MarkedBlock::DestructorType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
- void emitAllocateBasicStorage(size_t, ptrdiff_t offsetFromBase, RegisterID result);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
- void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
@@ -592,6 +590,9 @@ namespace JSC {
#endif
#else // USE(JSVALUE32_64)
+ /* This function is deprecated. */
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
void emitGetVirtualRegister(int src, RegisterID dst);
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
@@ -676,9 +677,6 @@ namespace JSC {
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
void emit_op_get_by_pname(Instruction*);
- void emit_op_get_global_var(Instruction*);
- void emit_op_get_global_var_watchable(Instruction* instruction) { emit_op_get_global_var(instruction); }
- void emit_op_get_scoped_var(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
void emit_op_check_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
@@ -741,17 +739,16 @@ namespace JSC {
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
void emit_op_put_getter_setter(Instruction*);
- void emit_op_put_global_var(Instruction*);
- void emit_op_put_global_var_check(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
+ void emit_op_init_global_const(Instruction*);
+ void emit_op_init_global_const_check(Instruction*);
+ void emit_resolve_operations(ResolveOperations*, const int* base, const int* value);
+ void emitSlow_link_resolve_operations(ResolveOperations*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
void emit_op_ensure_property_exists(Instruction*);
- void emit_op_resolve_global(Instruction*, bool dynamic = false);
- void emit_op_resolve_global_dynamic(Instruction*);
- void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_resolve_with_this(Instruction*);
+ void emit_op_put_to_base(Instruction*);
void emit_op_ret(Instruction*);
void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
@@ -820,23 +817,23 @@ namespace JSC {
void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_global_var_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_init_global_const_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
+
+ void emitSlow_op_resolve(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_to_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
- /* This function is deprecated. */
- void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
-
void emitInitRegister(unsigned dst);
void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
@@ -845,6 +842,9 @@ namespace JSC {
void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#endif
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index b66e2cd07..21d59bc33 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -209,8 +209,8 @@ void JIT::emit_op_negate(Instruction* currentInstruction)
srcNotInt.link(this);
emitJumpSlowCaseIfNotImmediateNumber(regT0);
- move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1);
- xorPtr(regT1, regT0);
+ move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
+ xor64(regT1, regT0);
end.link(this);
emitPutVirtualRegister(dst);
@@ -224,7 +224,7 @@ void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter); // double check
JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT0);
stubCall.call(dst);
}
@@ -279,8 +279,8 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
// supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
lhsIsInt.link(this);
emitJumpSlowCaseIfNotImmediateInteger(regT2);
@@ -377,8 +377,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
if (supportsFloatingPointTruncate()) {
JumpList failures;
failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
if (shift)
urshift32(Imm32(shift & 0x1f), regT0);
@@ -399,8 +399,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
if (supportsFloatingPointTruncate()) {
JumpList failures;
failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
emitFastArithImmToInt(regT1);
@@ -499,8 +499,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
if (supportsFloatingPoint()) {
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
int32_t op2imm = getConstantOperand(op2).asInt32();
@@ -525,8 +525,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
if (supportsFloatingPoint()) {
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
int32_t op1imm = getConstantOperand(op1).asInt32();
@@ -552,10 +552,10 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
- addPtr(tagTypeNumberRegister, regT0);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT0, fpRegT0);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT0, fpRegT0);
+ move64ToDouble(regT1, fpRegT1);
emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
@@ -585,19 +585,19 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
int32_t imm = getConstantOperandImmediateInt(op1);
- andPtr(Imm32(imm), regT0);
+ and64(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
int32_t imm = getConstantOperandImmediateInt(op2);
- andPtr(Imm32(imm), regT0);
+ and64(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
- andPtr(regT1, regT0);
+ and64(regT1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
}
emitPutVirtualRegister(result);
@@ -887,16 +887,16 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
emitGetVirtualRegister(op1, regT1);
convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
} else if (op2HasImmediateIntFastCase) {
notImm1.link(this);
if (!types.first().definitelyIsNumber())
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
emitGetVirtualRegister(op2, regT1);
convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
} else {
// if we get here, eax is not an int32, edx not yet checked.
notImm1.link(this);
@@ -904,8 +904,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
if (!types.second().definitelyIsNumber())
emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT1);
Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
convertInt32ToDouble(regT1, fpRegT2);
Jump op2wasInteger = jump();
@@ -916,8 +916,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
convertInt32ToDouble(regT0, fpRegT1);
op2isDouble.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT2);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT2);
op2wasInteger.link(this);
}
@@ -931,8 +931,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
ASSERT(opcodeID == op_div);
divDouble(fpRegT2, fpRegT1);
}
- moveDoubleToPtr(fpRegT1, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT1, regT0);
+ sub64(tagTypeNumberRegister, regT0);
emitPutVirtualRegister(result, regT0);
end.link(this);
@@ -1041,8 +1041,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
if (isOperandConstantImmediateDouble(op1)) {
emitGetVirtualRegister(op1, regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
} else if (isOperandConstantImmediateInt(op1)) {
emitLoadInt32ToDouble(op1, fpRegT0);
} else {
@@ -1053,15 +1053,15 @@ void JIT::emit_op_div(Instruction* currentInstruction)
convertInt32ToDouble(regT0, fpRegT0);
Jump skipDoubleLoad = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
skipDoubleLoad.link(this);
}
if (isOperandConstantImmediateDouble(op2)) {
emitGetVirtualRegister(op2, regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoadInt32ToDouble(op2, fpRegT1);
} else {
@@ -1072,8 +1072,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
convertInt32ToDouble(regT1, fpRegT1);
Jump skipDoubleLoad = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
skipDoubleLoad.link(this);
}
divDouble(fpRegT1, fpRegT0);
@@ -1100,13 +1100,13 @@ void JIT::emit_op_div(Instruction* currentInstruction)
Jump isInteger = jump();
notInteger.link(this);
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
- moveDoubleToPtr(fpRegT0, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
isInteger.link(this);
#else
// Double result.
- moveDoubleToPtr(fpRegT0, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
#endif
emitPutVirtualRegister(dst, regT0);
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 1de877daa..074bf7f97 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -72,7 +72,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
if (canOptimize) {
emitGetVirtualRegister(arguments, regT0);
- slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+ slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
@@ -91,18 +91,18 @@ void JIT::compileLoadVarargs(Instruction* instruction)
// Initialize 'this'.
emitGetVirtualRegister(thisValue, regT2);
- storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
// Copy arguments.
neg32(regT0);
signExtend32ToPtr(regT0, regT0);
- end.append(branchAddPtr(Zero, TrustedImm32(1), regT0));
+ end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
// regT0: -argumentCount
Label copyLoop = label();
- loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
- storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- branchAddPtr(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
+ load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
+ store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
end.append(jump());
}
@@ -124,7 +124,7 @@ void JIT::compileCallEval()
{
JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
stubCall.call();
- addSlowCase(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+ addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -134,7 +134,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
@@ -179,8 +179,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- storePtr(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
+ store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
+ store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
move(regT1, callFrameRegister);
if (opcodeID == op_call_eval) {
@@ -190,7 +190,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
DataLabelPtr addressOfLinkedFunctionCheck;
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
- Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));
+ Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(slowCase);
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index a7aecd3e8..410bdf710 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -31,14 +31,6 @@
namespace JSC {
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- peek(dst, argumentStackOffset);
-}
-
ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
@@ -50,23 +42,33 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
-{
- storePtr(from, payloadFor(entry, callFrameRegister));
-}
-
ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+ store32(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
#endif
- storePtr(from, payloadFor(entry, callFrameRegister));
}
ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
+#if USE(JSVALUE32_64)
store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
store32(from, intPayloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+{
+#if USE(JSVALUE32_64)
+ storePtr(from, payloadFor(entry, callFrameRegister));
+#else
+ store64(from, addressFor(entry, callFrameRegister));
+#endif
}
ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
@@ -82,6 +84,22 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEn
#endif
}
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+#if USE(JSVALUE64)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load64(Address(from, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+#endif
+
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
@@ -101,14 +119,6 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
cont8Bit.link(this);
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
@@ -434,56 +444,6 @@ template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, Re
emitAllocateBasicJSObject<JSFinalObject, MarkedBlock::None, T>(structure, result, scratch);
}
-inline void JIT::emitAllocateBasicStorage(size_t size, ptrdiff_t offsetFromBase, RegisterID result)
-{
- CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
-
- loadPtr(&allocator->m_currentRemaining, result);
- addSlowCase(branchSubPtr(Signed, TrustedImm32(size), result));
- storePtr(result, &allocator->m_currentRemaining);
- negPtr(result);
- addPtr(AbsoluteAddress(&allocator->m_currentPayloadEnd), result);
- subPtr(TrustedImm32(size - offsetFromBase), result);
-}
-
-inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch)
-{
- unsigned initialLength = std::max(length, BASE_VECTOR_LEN);
- size_t initialStorage = Butterfly::totalSize(0, 0, true, initialLength * sizeof(EncodedJSValue));
-
- loadPtr(m_codeBlock->globalObject()->addressOfArrayStructure(), scratch);
- load8(Address(scratch, Structure::indexingTypeOffset()), storagePtr);
- and32(TrustedImm32(IndexingShapeMask), storagePtr);
- addSlowCase(branch32(NotEqual, storagePtr, TrustedImm32(ContiguousShape)));
-
- // We allocate the backing store first to ensure that garbage collection
- // doesn't happen during JSArray initialization.
- emitAllocateBasicStorage(initialStorage, sizeof(IndexingHeader), storageResult);
-
- // Allocate the cell for the array.
- emitAllocateBasicJSObject<JSArray, MarkedBlock::None>(scratch, cellResult, storagePtr);
-
- // Store all the necessary info in the indexing header.
- store32(Imm32(length), Address(storageResult, Butterfly::offsetOfPublicLength()));
- store32(Imm32(initialLength), Address(storageResult, Butterfly::offsetOfVectorLength()));
-
- // Store the newly allocated ArrayStorage.
- storePtr(storageResult, Address(cellResult, JSObject::butterflyOffset()));
-
- // Store the values we have.
- for (unsigned i = 0; i < length; i++) {
-#if USE(JSVALUE64)
- loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- storePtr(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
-#else
- load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
- load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
- store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
-#endif
- }
-}
-
#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
@@ -500,7 +460,7 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
// We're in a simple configuration: only one bucket, so we can just do a direct
// store.
#if USE(JSVALUE64)
- storePtr(value, valueProfile->m_buckets);
+ store64(value, valueProfile->m_buckets);
#else
EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
store32(value, &descriptor->asBits.payload);
@@ -516,7 +476,7 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
move(TrustedImmPtr(valueProfile->m_buckets), scratch);
#if USE(JSVALUE64)
- storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
+ store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
#elif USE(JSVALUE32_64)
store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
@@ -538,17 +498,15 @@ inline void JIT::emitValueProfilingSite()
inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
{
+ UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
+
RegisterID structure = structureAndIndexingType;
RegisterID indexingType = structureAndIndexingType;
- if (canBeOptimized()) {
+ if (canBeOptimized())
storePtr(structure, arrayProfile->addressOfLastSeenStructure());
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
- move(TrustedImm32(1), scratch);
- lshift32(indexingType, scratch);
- or32(scratch, AbsoluteAddress(arrayProfile->addressOfArrayModes()));
- } else
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
+
+ load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
}
inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
@@ -860,6 +818,14 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
#else // USE(JSVALUE32_64)
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
+ peek64(dst, argumentStackOffset);
+}
+
ALWAYS_INLINE void JIT::killLastResultRegister()
{
m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
@@ -874,9 +840,9 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
if (!value.isNumber())
- move(TrustedImmPtr(JSValue::encode(value)), dst);
+ move(TrustedImm64(JSValue::encode(value)), dst);
else
- move(ImmPtr(JSValue::encode(value)), dst);
+ move(Imm64(JSValue::encode(value)), dst);
killLastResultRegister();
return;
}
@@ -889,7 +855,7 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
return;
}
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ load64(Address(callFrameRegister, src * sizeof(Register)), dst);
killLastResultRegister();
}
@@ -916,28 +882,24 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
- storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ store64(from, Address(callFrameRegister, dst * sizeof(Register)));
m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
}
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
{
- storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchTestPtr(Zero, reg, tagMaskRegister);
-#else
- return branchTest32(Zero, reg, TrustedImm32(TagMask));
-#endif
+ return branchTest64(Zero, reg, tagMaskRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
- orPtr(reg2, scratch);
+ or64(reg2, scratch);
return emitJumpIfJSCell(scratch);
}
@@ -948,11 +910,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchTestPtr(NonZero, reg, tagMaskRegister);
-#else
- return branchTest32(NonZero, reg, TrustedImm32(TagMask));
-#endif
+ return branchTest64(NonZero, reg, tagMaskRegister);
}
ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
@@ -966,8 +924,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
emitJumpSlowCaseIfNotJSCell(reg);
}
-#if USE(JSVALUE64)
-
inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
@@ -985,30 +941,21 @@ inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
} else
convertInt32ToDouble(addressFor(index), value);
}
-#endif
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
-#else
- return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
-#endif
+ return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
-#if USE(JSVALUE64)
- return branchPtr(Below, reg, tagTypeNumberRegister);
-#else
- return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
-#endif
+ return branch64(Below, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
- andPtr(reg2, scratch);
+ and64(reg2, scratch);
return emitJumpIfNotImmediateInteger(scratch);
}
@@ -1027,41 +974,17 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
addSlowCase(emitJumpIfNotImmediateNumber(reg));
}
-#if USE(JSVALUE32_64)
-ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
-{
- subPtr(TrustedImm32(TagTypeNumber), reg);
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
-{
- return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
-}
-#endif
-
ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
{
-#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(src, dest);
-#else
- if (src != dest)
- move(src, dest);
- addPtr(TrustedImm32(TagTypeNumber), dest);
-#endif
}
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
-#if USE(JSVALUE64)
if (src != dest)
move(src, dest);
- orPtr(tagTypeNumberRegister, dest);
-#else
- signExtend32ToPtr(src, dest);
- addPtr(dest, dest);
- emitFastArithReTagImmediate(dest, dest);
-#endif
+ or64(tagTypeNumberRegister, dest);
}
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index c187e4725..07c8ace2a 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -248,14 +248,14 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::edi);
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+ subPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister); // Align stack after call.
emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(X86Registers::r9, executableOffsetToFunction));
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(int64_t)), stackPointerRegister);
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
@@ -316,8 +316,8 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#endif
// Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+ load64(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTest64(NonZero, regT2);
// Return.
ret();
@@ -360,9 +360,9 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
} else {
if (m_codeBlock->isConstantRegisterIndex(src)) {
if (!getConstantOperand(src).isNumber())
- storePtr(TrustedImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
else
- storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
if (dst == m_lastResultBytecodeRegister)
killLastResultRegister();
} else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
@@ -372,8 +372,8 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
} else {
// Perform the copy via regT1; do not disturb any mapping in regT0.
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
- storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ load64(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ store64(regT1, Address(callFrameRegister, dst * sizeof(Register)));
}
}
}
@@ -441,18 +441,18 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
Label loop(this);
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+ move(TrustedImm64(JSValue::encode(jsBoolean(false))), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
@@ -467,7 +467,7 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
Jump isCell = emitJumpIfJSCell(regT0);
- comparePtr(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
+ compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
Jump done = jump();
isCell.link(this);
@@ -493,8 +493,8 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction)
unsigned value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
- testPtr(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -505,7 +505,7 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
unsigned value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
- testPtr(NonZero, regT0, tagTypeNumberRegister, regT0);
+ test64(NonZero, regT0, tagTypeNumberRegister, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -553,7 +553,7 @@ void JIT::emit_op_construct(Instruction* currentInstruction)
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branchTestPtr(Zero, addressFor(activation));
+ Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(activation, regT2);
stubCall.call();
@@ -565,7 +565,7 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
int arguments = currentInstruction[1].u.operand;
int activation = currentInstruction[2].u.operand;
- Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
+ Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
JITStubCall stubCall(this, cti_op_tear_off_arguments);
stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
stubCall.addArgument(activation, regT2);
@@ -631,13 +631,6 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
ret();
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -662,13 +655,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
@@ -677,50 +663,6 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
-{
- // Fast case
- void* globalObject = m_codeBlock->globalObject();
- unsigned currentIndex = m_globalResolveInfoIndex++;
- GlobalResolveInfo* resolveInfoAddress = &(m_codeBlock->globalResolveInfo(currentIndex));
-
- // Check Structure of global object
- move(TrustedImmPtr(globalObject), regT0);
- move(TrustedImmPtr(resolveInfoAddress), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
-
- // Load cached property
- // Assume that the global object always uses external storage.
- load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1);
- compileGetDirectOffset(regT0, regT0, regT1, regT0, KnownNotFinal);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.callWithValueProfiling(dst);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
@@ -728,9 +670,9 @@ void JIT::emit_op_not(Instruction* currentInstruction)
// Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
// clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
// Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -740,11 +682,11 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
isNonZero.link(this);
}
@@ -766,8 +708,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
isNotMasqueradesAsUndefined.link(this);
masqueradesGlobalObjectIsForeign.link(this);
@@ -789,8 +731,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
}
@@ -814,32 +756,16 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_jtrue(Instruction* currentInstruction)
{
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
+ Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
addJump(emitJumpIfImmediateInteger(regT0), target);
- addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
+ addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
isZero.link(this);
}
@@ -859,7 +785,7 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xorPtr(regT1, regT0);
+ xor64(regT1, regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -868,7 +794,7 @@ void JIT::emit_op_bitor(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- orPtr(regT1, regT0);
+ or64(regT1, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -910,7 +836,7 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
getPnamesStubCall.addArgument(regT0);
getPnamesStubCall.call(dst);
load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- storePtr(tagTypeNumberRegister, payloadFor(i));
+ store64(tagTypeNumberRegister, addressFor(i));
store32(TrustedImm32(Int32Tag), intTagFor(size));
store32(regT3, intPayloadFor(size));
Jump end = jump();
@@ -947,7 +873,7 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
loadPtr(addressFor(it), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+ load64(BaseIndex(regT2, regT0, TimesEight), regT2);
emitPutVirtualRegister(dst, regT2);
@@ -968,7 +894,7 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
addJump(branchTestPtr(Zero, Address(regT3)), target);
Label checkPrototype(this);
- loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
callHasProperty.append(emitJumpIfNotJSCell(regT2));
loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
@@ -1016,7 +942,7 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if both are cells (to cover strings).
move(regT0, regT2);
- orPtr(regT1, regT2);
+ or64(regT1, regT2);
addSlowCase(emitJumpIfJSCell(regT2));
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
@@ -1029,9 +955,9 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
rightOK.link(this);
if (type == OpStrictEq)
- comparePtr(Equal, regT1, regT0, regT0);
+ compare64(Equal, regT1, regT0, regT0);
else
- comparePtr(NotEqual, regT1, regT0, regT0);
+ compare64(NotEqual, regT1, regT0, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
@@ -1077,8 +1003,8 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
move(regT0, callFrameRegister);
peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
- loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ load64(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1147,9 +1073,9 @@ void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_throw_reference_error);
if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
- stubCall.addArgument(TrustedImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
else
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
stubCall.call();
}
@@ -1189,8 +1115,8 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(Equal, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
@@ -1221,8 +1147,8 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
isImmediate.link(this);
- andPtr(TrustedImm32(~TagBitUndefined), regT0);
- comparePtr(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
+ and64(TrustedImm32(~TagBitUndefined), regT0);
+ compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
@@ -1247,7 +1173,7 @@ void JIT::emit_op_create_activation(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
emitPutVirtualRegister(dst);
activationCreated.link(this);
@@ -1257,7 +1183,7 @@ void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
JITStubCall(this, cti_op_create_arguments).call();
emitPutVirtualRegister(dst);
emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
@@ -1268,7 +1194,7 @@ void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+ store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -1327,16 +1253,16 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
linkSlowCase(iter);
if (shouldEmitProfiling())
- move(TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(jsUndefined()))), regT0);
- Jump isNotUndefined = branchPtr(NotEqual, regT1, TrustedImmPtr(JSValue::encode(jsUndefined())));
+ move(TrustedImm64((JSValue::encode(jsUndefined()))), regT0);
+ Jump isNotUndefined = branch64(NotEqual, regT1, TrustedImm64(JSValue::encode(jsUndefined())));
emitValueProfilingSite();
- move(TrustedImmPtr(globalThis), regT0);
+ move(TrustedImm64(JSValue::encode(JSValue(static_cast<JSCell*>(globalThis)))), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
linkSlowCase(iter);
if (shouldEmitProfiling())
- move(TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(m_globalData->stringStructure.get()))), regT0);
+ move(TrustedImm64(JSValue::encode(m_globalData->stringStructure.get())), regT0);
isNotUndefined.link(this);
emitValueProfilingSite();
JITStubCall stubCall(this, cti_op_convert_this);
@@ -1356,7 +1282,7 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
JITStubCall stubCall(this, cti_op_not);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
@@ -1508,7 +1434,7 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
sub32(TrustedImm32(1), regT0);
emitFastArithReTagImmediate(regT0, regT0);
@@ -1534,7 +1460,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
emitGetVirtualRegister(property, regT1);
addSlowCase(emitJumpIfNotImmediateInteger(regT1));
add32(TrustedImm32(1), regT1);
@@ -1544,7 +1470,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
neg32(regT1);
signExtend32ToPtr(regT1, regT1);
- loadPtr(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
emitValueProfilingSite();
emitPutVirtualRegister(dst, regT0);
}
@@ -1571,51 +1497,413 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
stubCall.callWithValueProfiling(dst);
}
+void JIT::emit_op_put_to_base(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ switch (operation->m_kind) {
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
+ case PutToBaseOperation::GlobalVariablePut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ if (operation->m_isDynamic) {
+ emitGetVirtualRegister(base, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(globalObject)));
+ }
+ emitGetVirtualRegister(value, regT0);
+ store64(regT0, operation->m_registerAddress);
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+ case PutToBaseOperation::VariablePut: {
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT2);
+ store64(regT1, Address(regT2, operation->m_offset * sizeof(Register)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+
+ case PutToBaseOperation::GlobalPropertyPut: {
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+ loadPtr(&operation->m_structure, regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
+ ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ load32(&operation->m_offsetInButterfly, regT3);
+ signExtend32ToPtr(regT3, regT3);
+ store64(regT1, BaseIndex(regT2, regT3, TimesEight));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ return;
+ }
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(operation));
+ stubCall.call();
+ return;
+ }
+}
+
#endif // USE(JSVALUE64)
-void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
{
- int skip = currentInstruction[5].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
-
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- addSlowCase(checkStructure(regT0, m_codeBlock->globalObject()->activationStructure()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- activationNotCreated.link(this);
+
+#if USE(JSVALUE32_64)
+ unmap();
+#else
+ killLastResultRegister();
+#endif
+
+ if (resolveOperations->isEmpty()) {
+ addSlowCase(jump());
+ return;
}
- while (skip--) {
- addSlowCase(checkStructure(regT0, m_codeBlock->globalObject()->activationStructure()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+
+ const RegisterID value = regT0;
+#if USE(JSVALUE32_64)
+ const RegisterID valueTag = regT1;
+#endif
+ const RegisterID scope = regT2;
+ const RegisterID scratch = regT3;
+
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ResolveOperation* pc = resolveOperations->data();
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, scope);
+ bool setBase = false;
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ move(TrustedImmPtr(globalObject), value);
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), valueTag);
+#endif
+ emitValueProfilingSite();
+ emitStoreCell(*baseVR, value);
+ return;
+ case ResolveOperation::SetBaseToGlobal:
+ ASSERT(baseVR);
+ setBase = true;
+ move(TrustedImmPtr(globalObject), scratch);
+ emitStoreCell(*baseVR, scratch);
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::SetBaseToUndefined: {
+ ASSERT(baseVR);
+ setBase = true;
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(jsUndefined())), scratch);
+ emitPutVirtualRegister(*baseVR, scratch);
+#else
+ emitStore(*baseVR, jsUndefined());
+#endif
+ resolvingBase = false;
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SetBaseToScope:
+ ASSERT(baseVR);
+ setBase = true;
+ emitStoreCell(*baseVR, scope);
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ emitStoreCell(*baseVR, scope);
+ ASSERT(!value);
+ move(scope, value);
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), valueTag);
+#endif
+ emitValueProfilingSite();
+ return;
+ case ResolveOperation::SkipTopScopeNode: {
+#if USE(JSVALUE32_64)
+ Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+#else
+ Jump activationNotCreated = branchTest64(Zero, addressFor(m_codeBlock->activationRegister()));
+#endif
+ loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
+ activationNotCreated.link(this);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
+ move(scope, regT3);
+ loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
+ Jump atTopOfScope = branchTestPtr(Zero, regT1);
+ Label loopStart = label();
+ loadPtr(Address(regT3, JSCell::structureOffset()), regT2);
+ Jump isActivation = branchPtr(Equal, regT2, TrustedImmPtr(globalObject->activationStructure()));
+ addSlowCase(branchPtr(NotEqual, regT2, TrustedImmPtr(globalObject->nameScopeStructure())));
+ isActivation.link(this);
+ move(regT1, regT3);
+ loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
+ branchTestPtr(NonZero, regT1, loopStart);
+ atTopOfScope.link(this);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SkipScopes: {
+ for (int i = 0; i < pc->m_scopesToSkip; i++)
+ loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
+ ++pc;
+ break;
+ }
+ case ResolveOperation::Fail:
+ addSlowCase(jump());
+ return;
+ default:
+ resolvingBase = false;
+ }
}
- emit_op_resolve_global(currentInstruction, true);
+ if (baseVR && !setBase)
+ emitStoreCell(*baseVR, scope);
+
+ ASSERT(valueVR);
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ // Verify structure.
+ move(TrustedImmPtr(globalObject), regT2);
+ move(TrustedImmPtr(resolveValueOperation), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_structure)), regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
+
+ // Load property.
+ load32(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_offset)), regT3);
+
+ // regT2: GlobalObject
+ // regT3: offset
+#if USE(JSVALUE32_64)
+ compileGetDirectOffset(regT2, valueTag, value, regT3, KnownNotFinal);
+#else
+ compileGetDirectOffset(regT2, value, regT3, regT1, KnownNotFinal);
+#endif
+ break;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ case ResolveOperation::GetAndReturnGlobalVar: {
+#if USE(JSVALUE32_64)
+ load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), valueTag);
+ load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), value);
+#else
+ load64(reinterpret_cast<char*>(pc->m_registerAddress), value);
+#endif
+ break;
+ }
+ case ResolveOperation::GetAndReturnScopedVar: {
+ loadPtr(Address(scope, JSVariableObject::offsetOfRegisters()), scope);
+#if USE(JSVALUE32_64)
+ load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTag);
+ load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+#else
+ load64(Address(scope, pc->m_offset * sizeof(Register)), value);
+#endif
+ break;
+ }
+ default:
+ CRASH();
+ return;
+ }
+
+#if USE(JSVALUE32_64)
+ emitStore(*valueVR, valueTag, value);
+#else
+ emitPutVirtualRegister(*valueVR, value);
+#endif
+ emitValueProfilingSite();
}
-void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
- int skip = currentInstruction[5].u.operand;
- while (skip--)
+ if (resolveOperations->isEmpty()) {
linkSlowCase(iter);
- JITStubCall resolveStubCall(this, cti_op_resolve);
- resolveStubCall.addArgument(TrustedImmPtr(ident));
- resolveStubCall.call(dst);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.callWithValueProfiling(dst);
+ return;
+ }
+
+ ResolveOperation* pc = resolveOperations->data();
+ bool resolvingBase = true;
+ while (resolvingBase) {
+ switch (pc->m_operation) {
+ case ResolveOperation::ReturnGlobalObjectAsBase:
+ return;
+ case ResolveOperation::SetBaseToGlobal:
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::SetBaseToUndefined: {
+ resolvingBase = false;
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SetBaseToScope:
+ resolvingBase = false;
+ ++pc;
+ break;
+ case ResolveOperation::ReturnScopeAsBase:
+ return;
+ case ResolveOperation::SkipTopScopeNode: {
+ ++pc;
+ break;
+ }
+ case ResolveOperation::SkipScopes:
+ ++pc;
+ break;
+ case ResolveOperation::Fail:
+ linkSlowCase(iter);
+ return;
+ case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
+ linkSlowCase(iter);
+ ++pc;
+ break;
+ }
+ default:
+ resolvingBase = false;
+ }
+ }
+ ResolveOperation* resolveValueOperation = pc;
+ switch (resolveValueOperation->m_operation) {
+ case ResolveOperation::GetAndReturnGlobalProperty: {
+ linkSlowCase(iter);
+ break;
+ }
+ case ResolveOperation::GetAndReturnGlobalVarWatchable:
+ case ResolveOperation::GetAndReturnGlobalVar:
+ break;
+ case ResolveOperation::GetAndReturnScopedVar:
+ break;
+ default:
+ CRASH();
+ return;
+ }
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ emit_resolve_operations(operations, 0, &dst);
+}
+
+void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[3].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[3].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ emit_resolve_operations(operations, &dst, 0);
+}
+
+void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ emit_resolve_operations(operations, &base, &value);
+}
+
+void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(currentInstruction[5].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ emit_resolve_operations(operations, &base, &value);
+}
+
+void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveOperations* operations = m_codeBlock->resolveOperations(currentInstruction[4].u.operand);
+ emitSlow_link_resolve_operations(operations, iter);
+ JITStubCall stubCall(this, cti_op_resolve_with_this);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->resolveOperations(currentInstruction[4].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+}
+
+void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+ int operation = currentInstruction[4].u.operand;
+
+ PutToBaseOperation* putToBaseOperation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+ switch (putToBaseOperation->m_kind) {
+ case PutToBaseOperation::VariablePut:
+ return;
+
+ case PutToBaseOperation::GlobalVariablePut:
+ if (!putToBaseOperation->m_isDynamic)
+ return;
+ linkSlowCase(iter);
+ break;
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ return;
+
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ case PutToBaseOperation::GlobalPropertyPut:
+ linkSlowCase(iter);
+ break;
+
+ }
+
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->putToBaseOperation(operation)));
+ stubCall.call();
}
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
@@ -1633,7 +1921,7 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
#if USE(JSVALUE32_64)
lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
#else
- lazyJump = branchTestPtr(NonZero, addressFor(dst));
+ lazyJump = branchTest64(NonZero, addressFor(dst));
#endif
}
@@ -1660,34 +1948,6 @@ void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
- int length = currentInstruction[3].u.operand;
- if (m_codeBlock->globalObject()->isHavingABadTime()
- || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length)))) {
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
- return;
- }
- int dst = currentInstruction[1].u.operand;
- int values = currentInstruction[2].u.operand;
-
- emitAllocateJSArray(values, length, regT0, regT1, regT2, regT3);
- emitStoreCell(dst, regT0);
-}
-
-void JIT::emitSlow_op_new_array(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- // If the allocation would be oversize, we will already make the proper stub call above in
- // emit_op_new_array.
- int length = currentInstruction[3].u.operand;
- if (m_codeBlock->globalObject()->isHavingABadTime()
- || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length))))
- return;
- linkSlowCase(iter); // We're having a bad time.
- linkSlowCase(iter); // Not enough space in CopiedSpace for storage.
- linkSlowCase(iter); // Not enough space in MarkedSpace for cell.
-
JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index db5365535..44123be19 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -36,6 +36,7 @@
#include "JSCell.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
namespace JSC {
@@ -718,13 +719,6 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
argsNotCreated.link(this);
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -760,13 +754,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
@@ -775,53 +762,6 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
-{
- // FIXME: Optimize to use patching instead of so many memory accesses.
-
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = m_codeBlock->globalObject();
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
- GlobalResolveInfo* resolveInfoAddress = &m_codeBlock->globalResolveInfo(currentIndex);
-
-
- // Verify structure.
- move(TrustedImmPtr(globalObject), regT2);
- move(TrustedImmPtr(resolveInfoAddress), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
-
- // Load property.
- load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3);
- compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(TrustedImm32(currentIndex));
- stubCall.callWithValueProfiling(dst);
-}
-
void JIT::emit_op_not(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1214,22 +1154,6 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitStoreBool(dst, regT1);
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_throw(Instruction* currentInstruction)
{
unsigned exception = currentInstruction[1].u.operand;
@@ -1686,6 +1610,71 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
stubCall.callWithValueProfiling(dst);
}
+void JIT::emit_op_put_to_base(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int id = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ PutToBaseOperation* operation = m_codeBlock->putToBaseOperation(currentInstruction[4].u.operand);
+
+
+ switch (operation->m_kind) {
+ case PutToBaseOperation::GlobalVariablePutChecked:
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
+ case PutToBaseOperation::GlobalVariablePut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ if (operation->m_isDynamic)
+ addSlowCase(branchPtr(NotEqual, payloadFor(base), TrustedImmPtr(globalObject)));
+
+ emitLoad(value, regT1, regT0);
+ storePtr(regT0, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ storePtr(regT1, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+ case PutToBaseOperation::VariablePut: {
+ loadPtr(payloadFor(base), regT3);
+ emitLoad(value, regT1, regT0);
+ loadPtr(Address(regT3, JSVariableObject::offsetOfRegisters()), regT2);
+ store32(regT0, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(regT3, regT1, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+
+ case PutToBaseOperation::GlobalPropertyPut: {
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(payloadFor(base), regT3);
+ emitLoad(value, regT1, regT0);
+ loadPtr(&operation->m_structure, regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), regT2));
+ ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
+ loadPtr(Address(regT3, JSObject::butterflyOffset()), regT2);
+ load32(&operation->m_offsetInButterfly, regT3);
+ storePtr(regT0, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT1, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ if (Heap::isWriteBarrierEnabled())
+ emitWriteBarrier(globalObject, regT1, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ break;
+ }
+
+ case PutToBaseOperation::Uninitialised:
+ case PutToBaseOperation::Readonly:
+ case PutToBaseOperation::Generic:
+ JITStubCall stubCall(this, cti_op_put_to_base);
+
+ stubCall.addArgument(TrustedImm32(base));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
+ stubCall.addArgument(TrustedImm32(value));
+ stubCall.addArgument(TrustedImmPtr(operation));
+ stubCall.call();
+ break;
+ }
+}
+
} // namespace JSC
#endif // USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 8a4017f1d..b7be821f6 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -137,7 +137,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
#if !ASSERT_DISABLED
- Jump resultOK = branchTestPtr(NonZero, regT0);
+ Jump resultOK = branchTest64(NonZero, regT0);
breakpoint();
resultOK.link(this);
#endif
@@ -155,8 +155,8 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
- loadPtr(BaseIndex(regT2, regT1, ScalePtr), regT0);
- slowCases.append(branchTestPtr(Zero, regT0));
+ load64(BaseIndex(regT2, regT1, TimesEight), regT0);
+ slowCases.append(branchTest64(Zero, regT0));
return slowCases;
}
@@ -171,8 +171,8 @@ JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, ArrayStorage::vectorOffset()), regT0);
- slowCases.append(branchTestPtr(Zero, regT0));
+ load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
+ slowCases.append(branchTest64(Zero, regT0));
return slowCases;
}
@@ -189,7 +189,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // base array check
Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
- Jump failed = branchTestPtr(Zero, regT0);
+ Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
failed.link(this);
@@ -235,7 +235,7 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID
neg32(offset);
}
signExtend32ToPtr(offset, offset);
- loadPtr(BaseIndex(scratch, offset, ScalePtr, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+ load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -248,7 +248,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
unsigned i = currentInstruction[6].u.operand;
emitGetVirtualRegister(property, regT0);
- addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
emitGetVirtualRegisters(base, regT0, iter, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
@@ -337,7 +337,7 @@ JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, Patch
Label storeResult = label();
emitGetVirtualRegister(value, regT3);
- storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr));
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight));
Jump done = jump();
outOfBounds.link(this);
@@ -367,11 +367,11 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
- Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
emitGetVirtualRegister(value, regT3);
- storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Jump end = jump();
empty.link(this);
@@ -535,7 +535,7 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
addSlowCase(structureCheck);
ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
+ DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
Label putResult(this);
@@ -602,7 +602,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
+ DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
@@ -636,35 +636,35 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- storePtr(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+ store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
return;
}
loadPtr(Address(base, JSObject::butterflyOffset()), base);
- storePtr(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
+ store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
}
// Compile a load from an object's property storage. May overwrite base.
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- loadPtr(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+ load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
return;
}
loadPtr(Address(base, JSObject::butterflyOffset()), result);
- loadPtr(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
+ load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
}
void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
{
if (isInlineOffset(cachedOffset)) {
- loadPtr(base->locationForOffset(cachedOffset), result);
+ load64(base->locationForOffset(cachedOffset), result);
return;
}
loadPtr(base->butterflyAddress(), result);
- loadPtr(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
+ load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
}
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
@@ -1190,90 +1190,35 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
-
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
-
- emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
-
- loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
- storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- loadPtr(currentInstruction[2].u.registerPointer, regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
JSGlobalObject* globalObject = m_codeBlock->globalObject();
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
- storePtr(regT0, currentInstruction[1].u.registerPointer);
+
+ store64(regT0, currentInstruction[1].u.registerPointer);
if (Heap::isWriteBarrierEnabled())
emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
+
addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
JSGlobalObject* globalObject = m_codeBlock->globalObject();
-
- storePtr(regT0, currentInstruction[1].u.registerPointer);
+
+ store64(regT0, currentInstruction[1].u.registerPointer);
if (Heap::isWriteBarrierEnabled())
emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::emitSlow_op_put_global_var_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_put_global_var_check);
+
+ JITStubCall stubCall(this, cti_op_init_global_const_check);
stubCall.addArgument(regT0);
stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
stubCall.call();
@@ -1585,8 +1530,8 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
convertInt32ToDouble(resultPayload, fpRegT0);
addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
#if USE(JSVALUE64)
- moveDoubleToPtr(fpRegT0, resultPayload);
- subPtr(tagTypeNumberRegister, resultPayload);
+ moveDoubleTo64(fpRegT0, resultPayload);
+ sub64(tagTypeNumberRegister, resultPayload);
#else
moveDoubleToInts(fpRegT0, resultPayload, resultTag);
#endif
@@ -1596,7 +1541,7 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
}
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, resultPayload);
+ or64(tagTypeNumberRegister, resultPayload);
#else
move(TrustedImm32(JSValue::Int32Tag), resultTag);
#endif
@@ -1645,8 +1590,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
}
#if USE(JSVALUE64)
- moveDoubleToPtr(fpRegT0, resultPayload);
- subPtr(tagTypeNumberRegister, resultPayload);
+ moveDoubleTo64(fpRegT0, resultPayload);
+ sub64(tagTypeNumberRegister, resultPayload);
#else
moveDoubleToInts(fpRegT0, resultPayload, resultTag);
#endif
@@ -1746,8 +1691,8 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
Jump ready = jump();
doubleCase.link(this);
slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
- addPtr(tagTypeNumberRegister, earlyScratch);
- movePtrToDouble(earlyScratch, fpRegT0);
+ add64(tagTypeNumberRegister, earlyScratch);
+ move64ToDouble(earlyScratch, fpRegT0);
ready.link(this);
#else
emitLoad(value, lateScratch, earlyScratch);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index a4a547889..5d619b94b 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -1232,72 +1232,7 @@ void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowC
stubCall.call(dst);
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
- emitStore(index, regT1, regT0, regT3);
- emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- WriteBarrier<Unknown>* registerPointer = currentInstruction[2].u.registerPointer;
-
- load32(registerPointer->tagPointer(), regT1);
- load32(registerPointer->payloadPointer(), regT0);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
int value = currentInstruction[2].u.operand;
@@ -1314,10 +1249,10 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
store32(regT1, registerPointer->tagPointer());
store32(regT0, registerPointer->payloadPointer());
- map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_init_global_const), value, regT1, regT0);
}
-void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
{
WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
int value = currentInstruction[2].u.operand;
@@ -1338,11 +1273,11 @@ void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
unmap();
}
-void JIT::emitSlow_op_put_global_var_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_put_global_var_check);
+ JITStubCall stubCall(this, cti_op_init_global_const_check);
stubCall.addArgument(regT1, regT0);
stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
stubCall.call();
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
index 352956559..25755886a 100644
--- a/Source/JavaScriptCore/jit/JITStubCall.h
+++ b/Source/JavaScriptCore/jit/JITStubCall.h
@@ -94,15 +94,15 @@ namespace JSC {
{
}
-#if USE(JSVALUE32_64)
JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(stub)
+#if USE(JSVALUE32_64) || !ASSERT_DISABLED
, m_returnType(Value)
+#endif
, m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
-#endif
// Arguments are added first to last.
@@ -137,7 +137,11 @@ namespace JSC {
void addArgument(JIT::RegisterID argument)
{
+#if USE(JSVALUE32_64)
m_jit->poke(argument, m_stackIndex);
+#else
+ m_jit->poke64(argument, m_stackIndex);
+#endif
m_stackIndex += stackIndexStep;
}
@@ -148,6 +152,18 @@ namespace JSC {
m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
m_stackIndex += stackIndexStep;
}
+#else
+ void addArgument(JIT::TrustedImm64 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::Imm64 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
#endif
void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
@@ -179,9 +195,9 @@ namespace JSC {
void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
{
if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
- addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ addArgument(JIT::Imm64(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
else {
- m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ m_jit->load64(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
addArgument(scratchRegister);
}
m_jit->killLastResultRegister();
@@ -242,7 +258,7 @@ namespace JSC {
#else
JIT::Call call(unsigned dst) // dst is a virtual register.
{
- ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ ASSERT(m_returnType == Value || m_returnType == Cell);
JIT::Call call = this->call();
m_jit->emitPutVirtualRegister(dst);
return call;
@@ -250,7 +266,7 @@ namespace JSC {
JIT::Call callWithValueProfiling(unsigned dst)
{
- ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ ASSERT(m_returnType == Value || m_returnType == Cell);
JIT::Call call = this->call();
ASSERT(JIT::returnValueRegister == JIT::regT0);
m_jit->emitValueProfilingSite();
@@ -261,10 +277,8 @@ namespace JSC {
JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
{
-#if USE(JSVALUE32_64)
+#if USE(JSVALUE32_64) || !ASSERT_DISABLED
ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
-#else
- ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
#endif
JIT::Call call = this->call();
if (dst != JIT::returnValueRegister)
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index f1f2f4c9d..a16b328ad 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -2375,7 +2375,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer)
return constructArray(stackFrame.callFrame, stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
}
-DEFINE_STUB_FUNCTION(void, op_put_global_var_check)
+DEFINE_STUB_FUNCTION(void, op_init_global_const_check)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2390,11 +2390,22 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolve(callFrame, stackFrame.args[0].identifier());
+ JSValue result = JSScope::resolve(callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].resolveOperations());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
+DEFINE_STUB_FUNCTION(void, op_put_to_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue base = callFrame->r(stackFrame.args[0].int32()).jsValue();
+ JSValue value = callFrame->r(stackFrame.args[2].int32()).jsValue();
+ JSScope::resolvePut(callFrame, base, stackFrame.args[1].identifier(), value, stackFrame.args[3].putToBaseOperation());
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2719,14 +2730,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return JSValue::encode(JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), false));
+ return JSValue::encode(JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), false, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
{
STUB_INIT_STACK_FRAME(stackFrame);
- if (JSValue result = JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), true))
+ if (JSValue result = JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), true, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()))
return JSValue::encode(result);
VM_THROW_EXCEPTION();
}
@@ -2745,36 +2756,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
return JSValue::encode(base);
}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue result = JSScope::resolveSkip(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].int32());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[0].identifier();
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
- GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
-
- JSValue result = JSScope::resolveGlobal(
- callFrame,
- ident,
- callFrame->lexicalGlobalObject(),
- &globalResolveInfo.structure,
- &globalResolveInfo.offset
- );
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
-}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
{
@@ -3055,7 +3036,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithBase(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()]);
+ JSValue result = JSScope::resolveWithBase(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations(), stackFrame.args[3].putToBaseOperation());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -3065,7 +3046,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_this)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithThis(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()]);
+ JSValue result = JSScope::resolveWithThis(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations());
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index 4a3b252d6..6e3141e22 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -35,6 +35,7 @@
#include "LowLevelInterpreter.h"
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
+#include "ResolveOperation.h"
#include "ThunkGenerators.h"
#include <wtf/HashMap.h>
@@ -82,6 +83,8 @@ namespace JSC {
JSString* jsString() { return static_cast<JSString*>(asPointer); }
Structure* structure() { return static_cast<Structure*>(asPointer); }
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
+ PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
};
struct TrampolineStructure {
@@ -398,11 +401,9 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void JIT_STUB cti_op_put_to_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
@@ -450,7 +451,7 @@ extern "C" {
void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void JIT_STUB cti_op_put_global_var_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 8d9a0c800..d2a91ba0a 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -270,36 +270,36 @@ namespace JSC {
#if USE(JSVALUE64)
ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
{
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ return branchTest64(NonZero, reg, tagTypeNumberRegister);
}
ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
{
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ return branchTest64(Zero, reg, tagTypeNumberRegister);
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
{
- loadPtr(addressFor(virtualRegisterIndex), dst);
- return branchTestPtr(NonZero, dst, tagMaskRegister);
+ load64(addressFor(virtualRegisterIndex), dst);
+ return branchTest64(NonZero, dst, tagMaskRegister);
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
{
- loadPtr(addressFor(virtualRegisterIndex), dst);
- Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ load64(addressFor(virtualRegisterIndex), dst);
+ Jump result = branch64(Below, dst, tagTypeNumberRegister);
zeroExtend32ToPtr(dst, dst);
return result;
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
- loadPtr(addressFor(virtualRegisterIndex), scratch);
+ load64(addressFor(virtualRegisterIndex), scratch);
Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
- Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ Jump notInt = branch64(Below, scratch, tagTypeNumberRegister);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
notInt.link(this);
- addPtr(tagTypeNumberRegister, scratch);
- movePtrToDouble(scratch, dst);
+ add64(tagTypeNumberRegister, scratch);
+ move64ToDouble(scratch, dst);
done.link(this);
return notNumber;
}
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 560f7c833..9c7fbce81 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -90,9 +90,9 @@ namespace JSC {
void returnDouble(FPRegisterID src)
{
#if USE(JSVALUE64)
- moveDoubleToPtr(src, regT0);
- Jump zero = branchTestPtr(Zero, regT0);
- subPtr(tagTypeNumberRegister, regT0);
+ moveDoubleTo64(src, regT0);
+ Jump zero = branchTest64(Zero, regT0);
+ sub64(tagTypeNumberRegister, regT0);
Jump done = jump();
zero.link(this);
move(tagTypeNumberRegister, regT0);
@@ -151,7 +151,7 @@ namespace JSC {
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, regT0);
+ or64(tagTypeNumberRegister, regT0);
#else
move(TrustedImm32(JSValue::Int32Tag), regT1);
#endif