summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm')
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm2024
1 files changed, 1063 insertions, 961 deletions
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index 9689edf01..d9fed6bcd 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -22,54 +22,6 @@
# THE POSSIBILITY OF SUCH DAMAGE.
-# Crash course on the language that this is written in (which I just call
-# "assembly" even though it's more than that):
-#
-# - Mostly gas-style operand ordering. The last operand tends to be the
-# destination. So "a := b" is written as "mov b, a". But unlike gas,
-# comparisons are in-order, so "if (a < b)" is written as
-# "bilt a, b, ...".
-#
-# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
-# Currently this is just 32-bit so "i" and "p" are interchangeable
-# except when an op supports one but not the other.
-#
-# - In general, valid operands for macro invocations and instructions are
-# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
-# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
-# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
-# macros as operands. Instructions cannot take anonymous macros.
-#
-# - Labels must have names that begin with either "_" or ".". A "." label
-# is local and gets renamed before code gen to minimize namespace
-# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
-# may or may not be removed during code gen depending on whether the asm
-# conventions for C name mangling on the target platform mandate a "_"
-# prefix.
-#
-# - A "macro" is a lambda expression, which may be either anonymous or
-# named. But this has caveats. "macro" can take zero or more arguments,
-# which may be macros or any valid operands, but it can only return
-# code. But you can do Turing-complete things via continuation passing
-# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
-# that, since you'll just crash the assembler.
-#
-# - An "if" is a conditional on settings. Any identifier supplied in the
-# predicate of an "if" is assumed to be a #define that is available
-# during code gen. So you can't use "if" for computation in a macro, but
-# you can use it to select different pieces of code for different
-# platforms.
-#
-# - Arguments to macros follow lexical scoping rather than dynamic scoping.
-# Const's also follow lexical scoping and may override (hide) arguments
-# or other consts. All variables (arguments and constants) can be bound
-# to operands. Additionally, arguments (but not constants) can be bound
-# to macros.
-
-
-# Below we have a bunch of constant declarations. Each constant must have
-# a corresponding ASSERT() in LLIntData.cpp.
-
# Utilities
macro dispatch(advance)
addp advance * 4, PC
@@ -89,49 +41,47 @@ end
macro dispatchAfterCall()
loadi ArgumentCount + TagOffset[cfr], PC
- loadi 4[PC], t2
- storei t1, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- valueProfile(t1, t0, 28, t3)
- dispatch(8)
+ loadi 4[PC], t3
+ storei r1, TagOffset[cfr, t3, 8]
+ storei r0, PayloadOffset[cfr, t3, 8]
+ valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
+macro cCall2(function)
if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- move arg1, a0
- move arg2, a1
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
call function
- elsif SH4
- setargs arg1, arg2
+ elsif X86 or X86_WIN
+ subp 8, sp
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
-# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ else
+ cCall2(function)
+ end
+end
+
+macro cCall4(function)
if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- move arg1, a0
- move arg2, a1
- move arg3, a2
- move arg4, a3
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
- poke arg3, 2
- poke arg4, 3
call function
- elsif SH4
- setargs arg1, arg2, arg3, arg4
+ elsif X86 or X86_WIN
+ push a3
+ push a2
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
error
else
@@ -140,214 +90,251 @@ macro cCall4(function, arg1, arg2, arg3, arg4)
end
macro callSlowPath(slowPath)
- cCall2(slowPath, cfr, PC)
- move t0, PC
- move t1, cfr
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ move r0, PC
end
-macro functionPrologue(extraStackSpace)
- if X86
- push cfr
- move sp, cfr
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
+
+ # x86 needs to load arguments from the stack
+ if X86 or X86_WIN
+ loadp 16[cfr], a2
+ loadp 12[cfr], a1
+ loadp 8[cfr], a0
end
- pushCalleeSaves
- if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- push cfr
- push lr
+
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ # We are using t3, t4 and t5 as temporaries through the function.
+ # Since we have the guarantee that tX != aY when X != Y, we are safe from
+ # aliasing problems with our arguments.
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
end
- subp extraStackSpace, sp
-end
-macro functionEpilogue(extraStackSpace)
- addp extraStackSpace, sp
- if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- pop lr
- pop cfr
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ # Align stack pointer
+ if X86_WIN or MIPS
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ andp ~StackAlignmentMask, t3
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ clrbp t3, StackAlignmentMask, t3
+ if ARMv7
+ subp t3, CallFrameAlignSlots * SlotSize, t3
+ move t3, sp
+ else
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ end
end
- popCalleeSaves
- if X86
- pop cfr
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ if C_LOOP
+ bpaeq t3, VM::m_cloopStackLimit[vm], .stackHeightOK
+ else
+ bpaeq t3, VM::m_softStackLimit[vm], .stackHeightOK
end
-end
-macro doCallToJavaScript(makeCall, doReturn)
- if X86
- const entry = t5
- const vmTopCallFrame = t2
- const protoCallFrame = t4
-
- const extraStackSpace = 28
- const previousCFR = t0
- const previousPC = t1
- const temp1 = t0 # Same as previousCFR
- const temp2 = t1 # Same as previousPC
- const temp3 = t2 # same as vmTopCallFrame
- const temp4 = t3
- elsif ARM or ARMv7_TRADITIONAL
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 16
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t4
- const temp4 = t5
- elsif ARMv7
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 28
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t4
- const temp4 = t5
- elsif MIPS
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 36
- const previousCFR = t2
- const previousPC = lr
- const temp1 = t3
- const temp2 = t4
- const temp3 = t5
- const temp4 = t6
- elsif SH4
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 20
- const previousCFR = t3
- const previousPC = lr
- const temp1 = t3 # Same as previousCFR
- const temp2 = a3 # Same as topOfStack
- const temp3 = t8
- const temp4 = t9
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
end
- if X86
- loadp [sp], previousPC
- move cfr, previousCFR
+ subp 8, sp # Align stack for cCall2() to make a call.
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
end
- functionPrologue(extraStackSpace)
- if X86
- loadp extraStackSpace+20[sp], entry
- loadp extraStackSpace+24[sp], vmTopCallFrame
- loadp extraStackSpace+28[sp], protoCallFrame
- loadp extraStackSpace+32[sp], cfr
+
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
else
- move cfr, previousCFR
- move topOfStack, cfr
+ subp cfr, CalleeRegisterSaveSize, sp
end
- subp (CallFrameHeaderSlots-1)*8, cfr
- storep 0, ArgumentCount+4[cfr]
- storep 0, ArgumentCount[cfr]
- storep 0, Callee+4[cfr]
- storep vmTopCallFrame, Callee[cfr]
- loadp [vmTopCallFrame], temp4
- storep 0, ScopeChain+4[cfr]
- storep temp4, ScopeChain[cfr]
- storep 0, CodeBlock+4[cfr]
- storep 1, CodeBlock[cfr]
- storep previousPC, ReturnPC[cfr]
- storep previousCFR, CallerFrame[cfr]
- move cfr, temp1
-
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
- addp CallFrameHeaderSlots, temp2, temp2
- lshiftp 3, temp2
- subp temp2, cfr
- storep temp1, CallerFrame[cfr]
-
- move 5, temp1
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
.copyHeaderLoop:
- subi 1, temp1
- loadp [protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock[cfr, temp1, 8]
- loadp 4[protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock+4[cfr, temp1, 8]
- btinz temp1, .copyHeaderLoop
-
- loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
- subi 1, temp2
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
- subi 1, temp3
-
- bieq temp2, temp3, .copyArgs
- move 0, temp1
- move UndefinedTag, temp4
+ subi 1, t3
+ loadi TagOffset[protoCallFrame, t3, 8], t5
+ storei t5, TagOffset + CodeBlock[sp, t3, 8]
+ loadi PayloadOffset[protoCallFrame, t3, 8], t5
+ storei t5, PayloadOffset + CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t5
+ subi 1, t5
+
+ bieq t4, t5, .copyArgs
.fillExtraArgsLoop:
- subi 1, temp3
- storep temp1, ThisArgumentOffset+8+PayloadOffset[cfr, temp3, 8]
- storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp3, 8]
- bineq temp2, temp3, .fillExtraArgsLoop
+ subi 1, t5
+ storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t5, 8]
+ storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t5, 8]
+ bineq t4, t5, .fillExtraArgsLoop
.copyArgs:
- loadp ProtoCallFrame::args[protoCallFrame], temp1
+ loadp ProtoCallFrame::args[protoCallFrame], t3
.copyArgsLoop:
- btiz temp2, .copyArgsDone
- subi 1, temp2
- loadp PayloadOffset[temp1, temp2, 8], temp3
- loadp TagOffset[temp1, temp2, 8], temp4
- storep temp3, ThisArgumentOffset+8+PayloadOffset[cfr, temp2, 8]
- storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp2, 8]
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadi TagOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + TagOffset[sp, t4, 8]
+ loadi PayloadOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + PayloadOffset[sp, t4, 8]
jmp .copyArgsLoop
.copyArgsDone:
- if X86
- loadp extraStackSpace+24[sp], vmTopCallFrame
- end
- storep cfr, [vmTopCallFrame]
+ storep sp, VM::topCallFrame[vm]
+ storep cfr, VM::topVMEntryFrame[vm]
- makeCall(entry, temp1)
+ makeCall(entry, t3, t4)
- bpeq CodeBlock[cfr], 1, .calleeFramePopped
- loadp CallerFrame[cfr], cfr
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
-.calleeFramePopped:
- loadp Callee + PayloadOffset[cfr], temp3 # VM.topCallFrame
- loadp ScopeChain + PayloadOffset[cfr], temp4
- storep temp4, [temp3]
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
- doReturn(extraStackSpace)
-end
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
-macro makeJavaScriptCall(entry, temp)
- call entry
+ popCalleeSaves()
+ functionEpilogue()
+ ret
end
-macro makeHostFunctionCall(entry, temp)
- move entry, temp
- if X86
- # Put cfr on stack as arg0, also put it in ecx for "fastcall" targets
- poke cfr, 0
- move cfr, t2
+macro makeJavaScriptCall(entry, temp, unused)
+ addp CallerFrameAndPCSize, sp
+ checkStackPointerAlignment(temp, 0xbad0dc02)
+ if C_LOOP
+ cloopCallJSFunction entry
else
- move cfr, a0
+ call entry
+ end
+ checkStackPointerAlignment(temp, 0xbad0dc03)
+ subp CallerFrameAndPCSize, sp
+end
+
+macro makeHostFunctionCall(entry, temp1, temp2)
+ move entry, temp1
+ storep cfr, [sp]
+ if C_LOOP
+ move sp, a0
+ storep lr, PtrSize[sp]
+ cloopCallNative temp1
+ elsif X86 or X86_WIN
+ # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
+ move 0, temp2
+ move temp2, 4[sp] # put 0 in ReturnPC
+ move sp, a0 # a0 is ecx
+ push temp2 # Push dummy arg1
+ push a0
+ call temp1
+ addp 8, sp
+ else
+ move sp, a0
+ call temp1
end
- call temp
end
-macro doReturnFromJavaScript(extraStackSpace)
-_returnFromJavaScript:
- functionEpilogue(extraStackSpace)
+_handleUncaughtException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t5
+ storep t5, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5
+ storep t5, VM::topVMEntryFrame[t3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t3
+ move t3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
ret
-end
macro doReturnFromHostFunction(extraStackSpace)
functionEpilogue(extraStackSpace)
@@ -359,33 +346,43 @@ end
# debugging from. operand should likewise be an immediate, and should identify the operand
# in the instruction stream you'd like to print out.
macro traceOperand(fromWhere, operand)
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
+ move r0, PC
+ move r1, cfr
end
# Debugging operation if you'd like to print the value of an operand in the instruction
# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
# value.
macro traceValue(fromWhere, operand)
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
+ move r0, PC
+ move r1, cfr
end
# Call a slowPath for call opcodes.
macro callCallSlowPath(slowPath, action)
storep PC, ArgumentCount + TagOffset[cfr]
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -394,10 +391,12 @@ macro checkSwitchToJITForLoop()
1,
macro ()
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
loadi ArgumentCount + TagOffset[cfr], PC
end)
@@ -492,55 +491,69 @@ macro loadConstantOrVariablePayloadUnchecked(index, payload)
end
macro writeBarrierOnOperand(cellOperand)
- if GGC
- loadisFromInstruction(cellOperand, t1)
- loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
- checkMarkByte(t2, t1, t3,
- macro(marked)
- btbz marked, .writeBarrierDone
- push cfr, PC
- # We make two extra slots because cCall2 will poke.
- subp 8, sp
- cCall2(_llint_write_barrier_slow, cfr, t2)
- addp 8, sp
- pop PC, cfr
- end
- )
- .writeBarrierDone:
- end
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(
+ t2,
+ macro()
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move t2, a1 # t2 can be a0 on x86
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnOperands(cellOperand, valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariableTag(t1, t0)
- bineq t0, CellTag, .writeBarrierDone
-
- writeBarrierOnOperand(cellOperand)
- .writeBarrierDone:
- end
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ loadHelper(t3)
+
+ skipIfIsRememberedOrInEden(
+ t3,
+ macro()
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnGlobalObject(valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- bineq t0, CellTag, .writeBarrierDone
-
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- checkMarkByte(t3, t1, t2,
- macro(marked)
- btbz marked, .writeBarrierDone
- push cfr, PC
- # We make two extra slots because cCall2 will poke.
- subp 8, sp
- cCall2(_llint_write_barrier_slow, cfr, t3)
- addp 8, sp
- pop PC, cfr
- end
- )
- .writeBarrierDone:
- end
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
end
macro valueProfile(tag, payload, operand, scratch)
@@ -553,22 +566,57 @@ end
# Entrypoints into the interpreter
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- btiz t0, .isArityFixupNeeded
- move t1, cfr # t1 contains caller frame
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
jmp _llint_throw_from_slow_path_trampoline
-.isArityFixupNeeded:
+.noError:
+ # r1 points to ArityCheckData.
+ loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3
+ btpz t3, .proceedInline
+
+ loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0
+ call t3
+ if ASSERT_ENABLED
+ loadp ReturnPC[cfr], t0
+ loadp [t0], t0
+ end
+ jmp .continue
+
+.proceedInline:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
- // Move frame up "t1" slots
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+.fillExtraSlots:
+ move 0, t0
+ storei t0, PayloadOffset[cfr, t2, 8]
+ move UndefinedTag, t0
+ storei t0, TagOffset[cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
negi t1
move cfr, t3
- loadi PayloadOffset + ArgumentCount[cfr], t2
- addi CallFrameHeaderSlots, t2
+ move t1, t0
+ lshiftp 3, t0
+ addp t0, cfr
+ addp t0, sp
.copyLoop:
loadi PayloadOffset[t3], t0
storei t0, PayloadOffset[t3, t1, 8]
@@ -587,8 +635,6 @@ macro functionArityCheck(doneLabel, slow_path)
addp 8, t3
baddinz 1, t2, .fillLoop
- lshiftp 3, t1
- addp t1, cfr
.continue:
# Reload CodeBlock and PC, since the slow_path clobbered it.
loadp CodeBlock[cfr], t1
@@ -596,12 +642,11 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
-
macro branchIfException(label)
- loadp ScopeChain + PayloadOffset[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- bieq VM::m_exception + TagOffset[t3], EmptyValueTag, .noException
+ loadp MarkedBlock::m_vm[t3], t3
+ btiz VM::m_exception[t3], .noException
jmp label
.noException:
end
@@ -611,6 +656,7 @@ end
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
btiz t2, .opEnterDone
@@ -623,98 +669,75 @@ _llint_op_enter:
addi 1, t2
btinz t2, .opEnterLoop
.opEnterDone:
- callSlowPath(_slow_path_enter)
+ callOpcodeSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
+_llint_op_get_argument:
traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ bilteq t0, t2, .opGetArgumentOutOfBounds
+ loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t3
+ storei t0, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ valueProfile(t0, t3, 12, t1)
+ dispatch(4)
-_llint_op_init_lazy_reg:
- traceExecution()
- loadi 4[PC], t0
- storei EmptyValueTag, TagOffset[cfr, t0, 8]
- storei 0, PayloadOffset[cfr, t0, 8]
- dispatch(2)
+.opGetArgumentOutOfBounds:
+ storei UndefinedTag, TagOffset[cfr, t1, 8]
+ storei 0, PayloadOffset[cfr, t1, 8]
+ valueProfile(UndefinedTag, 0, 12, t1)
+ dispatch(4)
-_llint_op_create_arguments:
+_llint_op_argument_count:
traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
- callSlowPath(_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadisFromInstruction(1, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ move Int32Tag, t1
+ storei t1, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
dispatch(2)
-_llint_op_create_this:
+_llint_op_get_scope:
traceExecution()
- loadi 8[PC], t0
- loadp PayloadOffset[cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
- btpz t1, .opCreateThisSlow
- allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
- loadi 4[PC], t1
+ loadi Callee + PayloadOffset[cfr], t0
+ loadi JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opCreateThisSlow:
- callSlowPath(_slow_path_create_this)
- dispatch(4)
+ dispatch(2)
-_llint_op_get_callee:
- traceExecution()
- loadi 4[PC], t0
- loadp PayloadOffset + Callee[cfr], t1
- loadpFromInstruction(2, t2)
- bpneq t1, t2, .opGetCalleeSlow
- storei CellTag, TagOffset[cfr, t0, 8]
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(3)
-
-.opGetCalleeSlow:
- callSlowPath(_slow_path_get_callee)
- dispatch(3)
-
_llint_op_to_this:
traceExecution()
loadi 4[PC], t0
bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
loadi PayloadOffset[cfr, t0, 8], t0
- loadp JSCell::m_structure[t0], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
loadpFromInstruction(2, t2)
- bpneq t0, t2, .opToThisSlow
- dispatch(3)
+ bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
+ dispatch(4)
.opToThisSlow:
- callSlowPath(_slow_path_to_this)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_to_this)
+ dispatch(4)
-_llint_op_new_object:
+_llint_op_check_tdz:
traceExecution()
- loadpFromInstruction(3, t0)
- loadp ObjectAllocationProfile::m_allocator[t0], t1
- loadp ObjectAllocationProfile::m_structure[t0], t2
- allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(4)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariableTag(t0, t1)
+ bineq t1, EmptyValueTag, .opNotTDZ
+ callOpcodeSlowPath(_slow_path_throw_tdz_error)
-.opNewObjectSlow:
- callSlowPath(_llint_slow_path_new_object)
- dispatch(4)
+.opNotTDZ:
+ dispatch(2)
_llint_op_mov:
@@ -727,45 +750,6 @@ _llint_op_mov:
dispatch(3)
-macro notifyWrite(set, valueTag, valuePayload, scratch, slow)
- loadb VariableWatchpointSet::m_state[set], scratch
- bieq scratch, IsInvalidated, .done
- bineq scratch, ClearWatchpoint, .overwrite
- storei valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set]
- storei valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set]
- storeb IsWatched, VariableWatchpointSet::m_state[set]
- jmp .done
-
-.overwrite:
- bineq valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set], .definitelyDifferent
- bieq valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set], .done
-.definitelyDifferent:
- btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow
- storei EmptyValueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set]
- storei 0, VariableWatchpointSet::m_inferredValue + PayloadOffset[set]
- storeb IsInvalidated, VariableWatchpointSet::m_state[set]
-
-.done:
-end
-
-_llint_op_captured_mov:
- traceExecution()
- loadi 8[PC], t1
- loadConstantOrVariable(t1, t2, t3)
- loadpFromInstruction(3, t0)
- btpz t0, .opCapturedMovReady
- notifyWrite(t0, t2, t3, t1, .opCapturedMovSlow)
-.opCapturedMovReady:
- loadi 4[PC], t0
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(4)
-
-.opCapturedMovSlow:
- callSlowPath(_slow_path_captured_mov)
- dispatch(4)
-
-
_llint_op_not:
traceExecution()
loadi 8[PC], t0
@@ -778,7 +762,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_slow_path_not)
+ callOpcodeSlowPath(_slow_path_not)
dispatch(3)
@@ -798,7 +782,7 @@ _llint_op_eq:
dispatch(4)
.opEqSlow:
- callSlowPath(_slow_path_eq)
+ callOpcodeSlowPath(_slow_path_eq)
dispatch(4)
@@ -810,11 +794,11 @@ _llint_op_eq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opEqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
move 0, t1
jmp .opEqNullNotImmediate
.opEqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t1], t0, t1
@@ -845,7 +829,7 @@ _llint_op_neq:
dispatch(4)
.opNeqSlow:
- callSlowPath(_slow_path_neq)
+ callOpcodeSlowPath(_slow_path_neq)
dispatch(4)
@@ -857,11 +841,11 @@ _llint_op_neq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opNeqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
move 1, t1
jmp .opNeqNullNotImmediate
.opNeqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpneq Structure::m_globalObject[t1], t0, t1
@@ -883,12 +867,10 @@ macro strictEq(equalityOperation, slowPath)
loadConstantOrVariable2Reg(t0, t2, t0)
bineq t2, t3, .slow
bib t2, LowestTag, .slow
- bineq t2, CellTag, .notString
- loadp JSCell::m_structure[t0], t2
- loadp JSCell::m_structure[t1], t3
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
- bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
-.notString:
+ bineq t2, CellTag, .notStringOrSymbol
+ bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol
+ bbb JSCell::m_type[t1], ObjectType, .slow
+.notStringOrSymbol:
loadi 4[PC], t2
equalityOperation(t0, t1, t0)
storei BooleanTag, TagOffset[cfr, t2, 8]
@@ -896,7 +878,7 @@ macro strictEq(equalityOperation, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -920,7 +902,7 @@ _llint_op_inc:
dispatch(2)
.opIncSlow:
- callSlowPath(_slow_path_inc)
+ callOpcodeSlowPath(_slow_path_inc)
dispatch(2)
@@ -934,7 +916,7 @@ _llint_op_dec:
dispatch(2)
.opDecSlow:
- callSlowPath(_slow_path_dec)
+ callOpcodeSlowPath(_slow_path_dec)
dispatch(2)
@@ -948,10 +930,28 @@ _llint_op_to_number:
.opToNumberIsInt:
storei t2, TagOffset[cfr, t1, 8]
storei t3, PayloadOffset[cfr, t1, 8]
- dispatch(3)
+ valueProfile(t2, t3, 12, t1)
+ dispatch(4)
.opToNumberSlow:
- callSlowPath(_slow_path_to_number)
+ callOpcodeSlowPath(_slow_path_to_number)
+ dispatch(4)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, CellTag, .opToStringSlow
+ bbneq JSCell::m_type[t3], StringType, .opToStringSlow
+.opToStringIsString:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callOpcodeSlowPath(_slow_path_to_string)
dispatch(3)
@@ -960,22 +960,27 @@ _llint_op_negate:
loadi 8[PC], t0
loadi 4[PC], t3
loadConstantOrVariable(t0, t1, t2)
+ loadisFromInstruction(3, t0)
bineq t1, Int32Tag, .opNegateSrcNotInt
btiz t2, 0x7fffffff, .opNegateSlow
negi t2
+ ori ArithProfileInt, t0
storei Int32Tag, TagOffset[cfr, t3, 8]
+ storeisToInstruction(t0, 3)
storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
+ dispatch(4)
.opNegateSrcNotInt:
bia t1, LowestTag, .opNegateSlow
xori 0x80000000, t1
- storei t1, TagOffset[cfr, t3, 8]
+ ori ArithProfileNumber, t0
storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
+ storeisToInstruction(t0, 3)
+ storei t1, TagOffset[cfr, t3, 8]
+ dispatch(4)
.opNegateSlow:
- callSlowPath(_slow_path_negate)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_negate)
+ dispatch(4)
macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
@@ -985,6 +990,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
loadConstantOrVariable2Reg(t0, t2, t0)
bineq t2, Int32Tag, .op1NotInt
bineq t3, Int32Tag, .op2NotInt
+ loadisFromInstruction(4, t5)
+ ori ArithProfileIntInt, t5
+ storeisToInstruction(t5, 4)
loadi 4[PC], t2
integerOperationAndStore(t3, t1, t0, .slow, t2)
dispatch(5)
@@ -994,10 +1002,16 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
bia t2, LowestTag, .slow
bib t3, LowestTag, .op1NotIntOp2Double
bineq t3, Int32Tag, .slow
+ loadisFromInstruction(4, t5)
+ ori ArithProfileNumberInt, t5
+ storeisToInstruction(t5, 4)
ci2d t1, ft1
jmp .op1NotIntReady
.op1NotIntOp2Double:
fii2d t1, t3, ft1
+ loadisFromInstruction(4, t5)
+ ori ArithProfileNumberNumber, t5
+ storeisToInstruction(t5, 4)
.op1NotIntReady:
loadi 4[PC], t1
fii2d t0, t2, ft0
@@ -1009,6 +1023,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
# First operand is definitely an int, the second operand is definitely not.
loadi 4[PC], t2
bia t3, LowestTag, .slow
+ loadisFromInstruction(4, t5)
+ ori ArithProfileIntNumber, t5
+ storeisToInstruction(t5, 4)
ci2d t0, ft0
fii2d t1, t3, ft1
doubleOperation(ft1, ft0)
@@ -1016,7 +1033,7 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
dispatch(5)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
@@ -1097,7 +1114,7 @@ macro bitOp(operation, slowPath, advance)
dispatch(advance)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(advance)
end
@@ -1135,7 +1152,7 @@ _llint_op_unsigned:
storei Int32Tag, TagOffset[cfr, t0, 8]
dispatch(3)
.opUnsignedSlow:
- callSlowPath(_slow_path_unsigned)
+ callOpcodeSlowPath(_slow_path_unsigned)
dispatch(3)
@@ -1163,48 +1180,52 @@ _llint_op_bitor:
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
- loadi 12[PC], t1
- loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
-
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(1, t3)
+ storei BooleanTag, TagOffset[cfr, t3, 8]
-_llint_op_instanceof:
- traceExecution()
- # Actually do the work.
- loadi 12[PC], t0
- loadi 4[PC], t3
- loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
-
- # Register state: t1 = prototype, t2 = value
- move 1, t0
-.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadi Structure::m_prototype + PayloadOffset[t2], t2
- bpeq t2, t1, .opInstanceofDone
- btinz t2, .opInstanceofLoop
+ # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance]
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opOverrideshasInstanceValueNotCell)
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, CellTag, .opOverrideshasInstanceValueNotCell
- move 0, t0
-.opInstanceofDone:
- storei BooleanTag, TagOffset[cfr, t3, 8]
+ # We don't need hasInstanceValue's tag register anymore.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t1], t1
+ bineq t1, t2, .opOverrideshasInstanceValueNotDefault
+
+ # We know the constructor is a cell.
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariablePayloadUnchecked(t0, t1)
+ tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0
storei t0, PayloadOffset[cfr, t3, 8]
dispatch(4)
-.opInstanceofSlow:
- callSlowPath(_llint_slow_path_instanceof)
+.opOverrideshasInstanceValueNotCell:
+.opOverrideshasInstanceValueNotDefault:
+ storei 1, PayloadOffset[cfr, t3, 8]
dispatch(4)
+_llint_op_instanceof_custom:
+ traceExecution()
+ callOpcodeSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
+
+
+_llint_op_is_empty:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ cieq t2, EmptyValueTag, t3
+ storei BooleanTag, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
_llint_op_is_undefined:
traceExecution()
@@ -1217,12 +1238,12 @@ _llint_op_is_undefined:
storei t3, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t3], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
move 0, t1
storei t1, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t3], t1
loadp CodeBlock[cfr], t3
loadp CodeBlock::m_globalObject[t3], t3
cpeq Structure::m_globalObject[t1], t3, t1
@@ -1253,18 +1274,33 @@ _llint_op_is_number:
dispatch(3)
-_llint_op_is_string:
+_llint_op_is_cell_with_type:
traceExecution()
loadi 8[PC], t1
loadi 4[PC], t2
loadConstantOrVariable(t1, t0, t3)
storei BooleanTag, TagOffset[cfr, t2, 8]
- bineq t0, CellTag, .opIsStringNotCell
- loadp JSCell::m_structure[t3], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ bineq t0, CellTag, .notCellCase
+ loadi 12[PC], t0
+ cbeq JSCell::m_type[t3], t0, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+.notCellCase:
+ storep 0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+
+_llint_op_is_object:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariable(t1, t0, t3)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ bineq t0, CellTag, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t3], ObjectType, t1
storei t1, PayloadOffset[cfr, t2, 8]
dispatch(3)
-.opIsStringNotCell:
+.opIsObjectNotCell:
storep 0, PayloadOffset[cfr, t2, 8]
dispatch(3)
@@ -1302,54 +1338,68 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag,
end
-_llint_op_init_global_const:
- traceExecution()
- writeBarrierOnGlobalObject(2)
- loadi 8[PC], t1
- loadi 4[PC], t0
- loadConstantOrVariable(t1, t2, t3)
- storei t2, TagOffset[t0]
- storei t3, PayloadOffset[t0]
- dispatch(5)
-
-
# We only do monomorphic get_by_id caching for now, and we do not modify the
-# opcode. We do, however, allow for the cache to change anytime if fails, since
-# ping-ponging is free. At best we get lucky and the get_by_id will continue
+# opcode for own properties. We also allow for the cache to change anytime it fails,
+# since ping-ponging is free. At best we get lucky and the get_by_id will continue
# to take fast path on the new cache. At worst we take slow path, which is what
-# we would have been doing anyway.
+# we would have been doing anyway. For prototype/unset properties, we will attempt to
+# convert opcode into a get_by_id_proto_load/get_by_id_unset, respectively, after an
+# execution counter hits zero.
-macro getById(getPropertyStorage)
+_llint_op_get_by_id:
traceExecution()
loadi 8[PC], t0
loadi 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
loadi 20[PC], t2
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadi 4[PC], t1
- loadi TagOffset[propertyStorage, t2], scratch
- loadi PayloadOffset[propertyStorage, t2], t2
- storei scratch, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- valueProfile(scratch, t2, 32, t1)
- dispatch(9)
- end)
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow
+ loadPropertyAtVariableOffset(t2, t3, t0, t1)
+ loadi 4[PC], t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ valueProfile(t0, t1, 32, t2)
+ dispatch(9)
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
+.opGetByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id:
- getById(withInlineStorage)
+_llint_op_get_by_id_proto_load:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdProtoSlow)
+ loadi 20[PC], t2
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdProtoSlow
+ loadpFromInstruction(6, t3)
+ loadPropertyAtVariableOffset(t2, t3, t0, t1)
+ loadi 4[PC], t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ valueProfile(t0, t1, 32, t2)
+ dispatch(9)
+
+.opGetByIdProtoSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+
+_llint_op_get_by_id_unset:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdUnsetSlow)
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdUnsetSlow
+ loadi 4[PC], t2
+ storei UndefinedTag, TagOffset[cfr, t2, 8]
+ storei 0, PayloadOffset[cfr, t2, 8]
+ valueProfile(UndefinedTag, 0, 32, t2)
+ dispatch(9)
+
+.opGetByIdUnsetSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -1357,7 +1407,7 @@ _llint_op_get_array_length:
loadi 8[PC], t0
loadp 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
@@ -1371,123 +1421,152 @@ _llint_op_get_array_length:
dispatch(9)
.opGetArrayLengthSlow:
- callSlowPath(_llint_slow_path_get_by_id)
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
+_llint_op_put_by_id:
traceExecution()
writeBarrierOnOperands(1, 3)
loadi 4[PC], t3
- loadi 16[PC], t1
loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadi 20[PC], t1
- loadConstantOrVariable2Reg(t2, scratch, t2)
- storei scratch, TagOffset[propertyStorage, t1]
- storei t2, PayloadOffset[propertyStorage, t1]
- dispatch(9)
- end)
-end
+ loadi JSCell::m_structureID[t0], t2
+ bineq t2, 16[PC], .opPutByIdSlow
-_llint_op_put_by_id:
- putById(withInlineStorage)
+ # At this point, we have:
+ # t2 -> currentStructureID
+ # t0 -> object base
+ # We will lose currentStructureID in the shenanigans below.
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 32[PC], t1
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> value tag
+ # t3 -> value payload
+
+ btinz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andi PutByIdSecondaryTypeMask, t1
+ bilt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bilt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bieq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ bieq t2, CellTag, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ bieq t2, NullTag, .opPutByIdDoneCheckingTypes
+ bieq t2, UndefinedTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ bineq t2, CellTag, .opPutByIdSlow
+ bieq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bieq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number.
+ bilt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bieq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bieq t2, Int32Tag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ bib t2, LowestTag + 1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other
+ bineq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ bieq t2, BooleanTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bieq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ bieq t2, CellTag, .opPutByIdTypeCheckObjectWithStructure
+ btinz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ andi PutByIdSecondaryTypeMask, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadi 24[PC], t1
+
+ btiz t1, .opPutByIdNotTransition
+
+ # This is the transition case. t1 holds the new Structure*. If we have a chain, we need to
+ # check it. t0 is the base. We may clobber t1 to use it as scratch.
+ loadp 28[PC], t3
+ btpz t3, .opPutByIdTransitionDirect
+
+ loadi 16[PC], t2 # Need old structure again.
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+
+ loadp Structure::m_prototype[t2], t2
+ btpz t2, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ loadp [t3], t1
+ bpneq t1, JSCell::m_structureID[t2], .opPutByIdSlow
+ addp 4, t3
+ loadp Structure::m_prototype[t1], t2
+ btpnz t2, .opPutByIdTransitionChainLoop
+
+.opPutByIdTransitionChainDone:
+ loadi 24[PC], t1
+
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 20[PC], t1
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+ writeBarrierOnOperand(1)
dispatch(9)
-
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
-
-
-macro putByIdTransition(additionalChecks, getPropertyStorage)
- traceExecution()
- writeBarrierOnOperand(1)
- loadi 4[PC], t3
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
loadi 20[PC], t1
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable2Reg(t2, t1, t2)
- storei t1, TagOffset[t3]
- loadi 24[PC], t1
- storei t2, PayloadOffset[t3]
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
-
-macro noAdditionalChecks(oldStructure, scratch)
-end
-
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
-
- loadp 28[PC], scratch
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
-.loop:
- loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 4, scratch
- bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
-.done:
-end
-
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
-
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+ dispatch(9)
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
+.opPutByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
_llint_op_get_by_val:
traceExecution()
loadi 8[PC], t2
loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
+ move t0, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t1)
loadi 12[PC], t3
@@ -1533,65 +1612,10 @@ _llint_op_get_by_val:
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opGetByValSlow:
- callSlowPath(_llint_slow_path_get_by_val)
+ callOpcodeSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadi 8[PC], t0
- loadi 12[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
- loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- loadi 4[PC], t3
- loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
- loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
- storei t0, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- valueProfile(t0, t1, 20, t2)
- dispatch(6)
-
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
-
-
-_llint_op_get_by_pname:
- traceExecution()
- loadi 12[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
- loadi 16[PC], t0
- bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
- loadi 20[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t3
- loadp JSCell::m_structure[t2], t0
- bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
- loadi 24[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t0
- subi 1, t0
- biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
- bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t0
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t0, t2, t1, t3)
- loadi 4[PC], t0
- storei t1, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(7)
-
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
-
-
macro contiguousPutByVal(storeCallback)
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
@@ -1608,12 +1632,12 @@ macro contiguousPutByVal(storeCallback)
jmp .storeResult
end
-macro putByVal(holeCheck, slowPath)
+macro putByVal(slowPath)
traceExecution()
writeBarrierOnOperands(1, 3)
loadi 4[PC], t0
loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
+ move t1, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t0)
loadi 8[PC], t0
@@ -1659,7 +1683,7 @@ macro putByVal(holeCheck, slowPath)
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- holeCheck(ArrayStorage::m_vector + TagOffset[t0, t3, 8], .opPutByValArrayStorageEmpty)
+ bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadi 12[PC], t2
loadConstantOrVariable2Reg(t2, t1, t2)
@@ -1680,18 +1704,15 @@ macro putByVal(holeCheck, slowPath)
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
_llint_op_put_by_val:
- putByVal(macro(addr, slowPath)
- bieq addr, EmptyValueTag, slowPath
- end, _llint_slow_path_put_by_val)
+ putByVal(_llint_slow_path_put_by_val)
_llint_op_put_by_val_direct:
- putByVal(macro(addr, slowPath)
- end, _llint_slow_path_put_by_val_direct)
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
traceExecution()
@@ -1708,7 +1729,7 @@ macro jumpTrueOrFalse(conditionOp, slow)
dispatchBranch(8[PC])
.slow:
- callSlowPath(slow)
+ callOpcodeSlowPath(slow)
dispatch(0)
end
@@ -1719,8 +1740,8 @@ macro equalNull(cellHandler, immediateHandler)
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadp JSCell::m_structureID[t0], t2
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1767,9 +1788,10 @@ _llint_op_jneq_ptr:
loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
.opJneqPtrBranch:
+ storei 1, 16[PC]
dispatchBranch(12[PC])
.opJneqPtrFallThrough:
- dispatch(4)
+ dispatch(5)
macro compare(integerCompare, doubleCompare, slowPath)
@@ -1806,7 +1828,7 @@ macro compare(integerCompare, doubleCompare, slowPath)
dispatchBranch(12[PC])
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(0)
end
@@ -1835,7 +1857,7 @@ _llint_op_switch_imm:
dispatchBranch(8[PC])
.opSwitchImmSlow:
- callSlowPath(_llint_slow_path_switch_imm)
+ callOpcodeSlowPath(_llint_slow_path_switch_imm)
dispatch(0)
@@ -1850,8 +1872,7 @@ _llint_op_switch_char:
loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
bineq t1, CellTag, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t0], t1
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t0], t0
btpz t0, .opSwitchOnRope
@@ -1873,39 +1894,22 @@ _llint_op_switch_char:
dispatchBranch(8[PC])
.opSwitchOnRope:
- callSlowPath(_llint_slow_path_switch_char)
+ callOpcodeSlowPath(_llint_slow_path_switch_char)
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- btiz 12[PC], .opNewFuncUnchecked
- loadi 4[PC], t1
- bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
-_llint_op_new_captured_func:
- traceExecution()
- callSlowPath(_slow_path_new_captured_func)
- dispatch(4)
-
-
macro arrayProfileForCall()
loadi 16[PC], t3
negi t3
bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
- loadp JSCell::m_structure[t0], t0
- loadp 24[PC], t1
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ loadp JSCell::m_structureID[t0], t0
+ loadpFromInstruction(CallOpCodeSize - 2, t1)
+ storep t0, ArrayProfile::m_lastSeenStructureID[t1]
.done:
end
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
loadi 8[PC], t0
loadi 20[PC], t1
loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1915,42 +1919,19 @@ macro doCall(slowPath)
lshifti 3, t3
negi t3
addp cfr, t3 # t3 contains the new value of cfr
- loadp JSFunction::m_scope[t2], t0
storei t2, Callee + PayloadOffset[t3]
- storei t0, ScopeChain + PayloadOffset[t3]
loadi 12[PC], t2
storei PC, ArgumentCount + TagOffset[cfr]
- storep cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
storei CellTag, Callee + TagOffset[t3]
- storei CellTag, ScopeChain + TagOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadi 4[PC], t0
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadi 4[PC], t0
- addi 1, t0 # Get the unmodifiedArgumentsRegister
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
@@ -1959,135 +1940,62 @@ _llint_op_ret:
doReturn()
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- bineq t1, CellTag, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadi 8[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- doReturn()
-
-
_llint_op_to_primitive:
traceExecution()
loadi 8[PC], t2
loadi 4[PC], t3
loadConstantOrVariable(t2, t1, t0)
bineq t1, CellTag, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storei t1, TagOffset[cfr, t3, 8]
storei t0, PayloadOffset[cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_slow_path_to_primitive)
+ callOpcodeSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadi 12[PC], t1
- loadi 16[PC], t2
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadi 20[PC], t2
- loadi PayloadOffset[cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadi PayloadOffset[t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- loadi 8[PC], t3
- loadi PayloadOffset[cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
- loadp Structure::m_prototype + PayloadOffset[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 4, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchBranch(24[PC])
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The throwing code must have known that we were throwing to the interpreter,
# and have set VM::targetInterpreterPCForThrow.
- loadp ScopeChain + PayloadOffset[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- loadp VM::callFrameForThrow[t3], cfr
- loadi VM::targetInterpreterPCForThrow[t3], PC
- loadi VM::m_exception + PayloadOffset[t3], t0
- loadi VM::m_exception + TagOffset[t3], t1
- storei 0, VM::m_exception + PayloadOffset[t3]
- storei EmptyValueTag, VM::m_exception + TagOffset[t3]
- loadi 4[PC], t2
- storei t0, PayloadOffset[cfr, t2, 8]
- storei t1, TagOffset[cfr, t2, 8]
- traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
- dispatch(2)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain + PayloadOffset[cfr], t0
- loadi deBruijinIndexOperand, t2
-
- btiz t2, .done
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsActivation[t1], .loop
+ loadi VM::targetInterpreterPCForThrow[t3], PC
- loadi CodeBlock::m_activationRegister[t1], t1
+ callOpcodeSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
- # Need to conditionally skip over one scope.
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
+.isCatchableException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
- subi 1, t2
- btinz t2, .loop
+ loadi VM::m_exception[t3], t0
+ storei 0, VM::m_exception[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei CellTag, TagOffset[cfr, t2, 8]
-.done:
+ loadi Exception::m_value + TagOffset[t0], t1
+ loadi Exception::m_value + PayloadOffset[t0], t0
+ loadi 8[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
-end
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(3)
_llint_op_end:
traceExecution()
@@ -2105,8 +2013,10 @@ _llint_throw_from_slow_path_trampoline:
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_vm[t1], t1
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2)
jmp VM::targetMachinePCForThrow[t1]
@@ -2116,81 +2026,62 @@ _llint_throw_during_call_trampoline:
macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
storep 0, CodeBlock[cfr]
- loadp CallerFrame[cfr], t0
- loadi ScopeChain + PayloadOffset[t0], t1
- storei CellTag, ScopeChain + TagOffset[cfr]
- storei t1, ScopeChain + PayloadOffset[cfr]
- if X86
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadi Callee + PayloadOffset[cfr], t1
+ // Callee is still in t1 for code below
+ if X86 or X86_WIN
+ subp 8, sp # align stack pointer
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t3
storep cfr, VM::topCallFrame[t3]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t2 # t2 = ecx
- subp 16 - 4, sp
+ move cfr, a0 # a0 = ecx
+ storep a0, [sp]
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t0, cfr
+ checkStackPointerAlignment(t3, 0xdead0001)
call executableOffsetToFunction[t1]
- addp 16 - 4, sp
- loadp PayloadOffset + ScopeChain[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
+ loadp MarkedBlock::m_vm[t3], t3
+ addp 8, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS
+ subp 8, sp # align stack pointer
+ # t1 already contains the Callee.
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ move cfr, a0
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- if MIPS or SH4
- move t0, a0
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ call executableOffsetToFunction[t1]
end
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif C_LOOP
- loadp PayloadOffset + ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadi Callee + PayloadOffset[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- cloopCallNative executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp PayloadOffset + ScopeChain[cfr], t3
+ loadp Callee + PayloadOffset[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp MarkedBlock::m_vm[t3], t3
+ addp 8, sp
else
error
end
- bineq VM::m_exception + TagOffset[t3], EmptyValueTag, .exception
+
+ btinz VM::m_exception[t3], .handleException
+
+ functionEpilogue()
ret
-.exception:
- preserveReturnAddressAfterCall(t1) # This is really only needed on X86
- loadi ArgumentCount + TagOffset[cfr], PC
- callSlowPath(_llint_throw_from_native_call)
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
jmp _llint_throw_from_slow_path_trampoline
end
-macro getGlobalObject(dst)
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
loadisFromInstruction(dst, t1)
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
@@ -2205,14 +2096,10 @@ end
macro resolveScope()
loadp CodeBlock[cfr], t0
- loadisFromInstruction(4, t2)
- btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck
- loadis CodeBlock::m_activationRegister[t0], t1
- btpz PayloadOffset[cfr, t1, 8], .resolveScopeAfterActivationCheck
- addi 1, t2
+ loadisFromInstruction(5, t2)
-.resolveScopeAfterActivationCheck:
- loadp ScopeChain + PayloadOffset[cfr], t0
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
btiz t2, .resolveScopeLoopEnd
.resolveScopeLoop:
@@ -2229,55 +2116,71 @@ end
_llint_op_resolve_scope:
traceExecution()
- loadisFromInstruction(3, t0)
+ loadisFromInstruction(4, t0)
#rGlobalProperty:
bineq t0, GlobalProperty, .rGlobalVar
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVar:
- bineq t0, GlobalVar, .rClosureVar
- getGlobalObject(1)
- dispatch(6)
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
.rClosureVar:
- bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ bineq t0, ClosureVar, .rModuleVar
resolveScope()
- dispatch(6)
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
.rGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
.rClosureVarWithVarInjectionChecks:
bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
varInjectionCheck(.rDynamic)
resolveScope()
- dispatch(6)
+ dispatch(7)
.rDynamic:
- callSlowPath(_llint_slow_path_resolve_scope)
- dispatch(6)
+ callOpcodeSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
macro loadWithStructureCheck(operand, slowPath)
loadisFromInstruction(operand, t0)
loadp PayloadOffset[cfr, t0, 8], t0
loadpFromInstruction(5, t1)
- bpneq JSCell::m_structure[t0], t1, slowPath
+ bpneq JSCell::m_structureID[t0], t1, slowPath
end
macro getProperty()
- loadpFromInstruction(6, t3)
+ loadisFromInstruction(6, t3)
loadPropertyAtVariableOffset(t3, t0, t1, t2)
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
@@ -2285,10 +2188,11 @@ macro getProperty()
storei t2, PayloadOffset[cfr, t0, 8]
end
-macro getGlobalVar()
+macro getGlobalVar(tdzCheckIfNecessary)
loadpFromInstruction(6, t0)
loadp TagOffset[t0], t1
loadp PayloadOffset[t0], t2
+ tdzCheckIfNecessary(t1)
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
storei t1, TagOffset[cfr, t0, 8]
@@ -2296,10 +2200,9 @@ macro getGlobalVar()
end
macro getClosureVar()
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t3)
- loadp TagOffset[t0, t3, 8], t1
- loadp PayloadOffset[t0, t3, 8], t2
+ loadisFromInstruction(6, t3)
+ loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1
+ loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2
valueProfile(t1, t2, 28, t0)
loadisFromInstruction(1, t0)
storei t1, TagOffset[cfr, t0, 8]
@@ -2309,7 +2212,7 @@ end
_llint_op_get_from_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
#gGlobalProperty:
bineq t0, GlobalProperty, .gGlobalVar
@@ -2318,8 +2221,16 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVar:
- bineq t0, GlobalVar, .gClosureVar
- getGlobalVar()
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
dispatch(8)
.gClosureVar:
@@ -2335,10 +2246,18 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.gDynamic)
- loadVariable(2, t2, t1, t0)
- getGlobalVar()
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
dispatch(8)
.gClosureVarWithVarInjectionChecks:
@@ -2349,22 +2268,22 @@ _llint_op_get_from_scope:
dispatch(8)
.gDynamic:
- callSlowPath(_llint_slow_path_get_from_scope)
+ callOpcodeSlowPath(_llint_slow_path_get_from_scope)
dispatch(8)
macro putProperty()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2, t3)
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
storePropertyAtVariableOffset(t1, t0, t2, t3)
end
-macro putGlobalVar()
+macro putGlobalVariable()
loadisFromInstruction(3, t0)
loadConstantOrVariable(t0, t1, t2)
loadpFromInstruction(5, t3)
- notifyWrite(t3, t1, t2, t0, .pDynamic)
+ notifyWrite(t3, .pDynamic)
loadpFromInstruction(6, t0)
storei t1, TagOffset[t0]
storei t2, PayloadOffset[t0]
@@ -2373,19 +2292,37 @@ end
macro putClosureVar()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2, t3)
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- storei t2, TagOffset[t0, t1, 8]
- storei t3, PayloadOffset[t0, t1, 8]
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadpFromInstruction(5, t5)
+ btpz t5, .noVariableWatchpointSet
+ notifyWrite(t5, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
end
_llint_op_put_to_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
-#pGlobalProperty:
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putLocalClosureVar()
+ dispatch(7)
+
+.pGlobalProperty:
bineq t0, GlobalProperty, .pGlobalVar
writeBarrierOnOperands(1, 3)
loadWithStructureCheck(1, .pDynamic)
@@ -2393,9 +2330,15 @@ _llint_op_put_to_scope:
dispatch(7)
.pGlobalVar:
- bineq t0, GlobalVar, .pClosureVar
+ bineq t0, GlobalVar, .pGlobalLexicalVar
writeBarrierOnGlobalObject(3)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ putGlobalVariable()
dispatch(7)
.pClosureVar:
@@ -2413,20 +2356,179 @@ _llint_op_put_to_scope:
dispatch(7)
.pGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
writeBarrierOnGlobalObject(3)
varInjectionCheck(.pDynamic)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVariable()
dispatch(7)
.pClosureVarWithVarInjectionChecks:
- bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
writeBarrierOnOperands(1, 3)
varInjectionCheck(.pDynamic)
loadVariable(1, t2, t1, t0)
putClosureVar()
dispatch(7)
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callOpcodeSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
+ dispatch(7)
+
.pDynamic:
- callSlowPath(_llint_slow_path_put_to_scope)
+ callOpcodeSlowPath(_llint_slow_path_put_to_scope)
dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadi 12[PC], t1
+ loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2
+ loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3
+ loadisFromInstruction(1, t1)
+ valueProfile(t2, t3, 16, t0)
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 8[PC], t1
+ storei t2, DirectArguments_storage + TagOffset[t0, t1, 8]
+ storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+
+ # t0 is holding the payload, t5 is holding the tag.
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t2, t5, t0)
+
+ bieq t5, EmptyValueTag, .opProfileTypeDone
+
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # Store the JSValue onto the log entry.
+ storei t5, TypeProfilerLog::LogEntry::value + TagOffset[t2]
+ storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ bieq t5, CellTag, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callOpcodeSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ loadi BasicBlockLocation::m_executionCount[t0], t1
+ addi 1, t1
+ bieq t1, 0, .done # We overflowed.
+ storei t1, BasicBlockLocation::m_executionCount[t0]
+.done:
+ dispatch(2)
+
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .finish
+.storeZero:
+ move 0, t0
+.finish:
+ loadisFromInstruction(1, t1)
+ storei t0, PayloadOffset[cfr, t1, 8]
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_log_shadow_chicken_prologue:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ loadp CallerFrame[cfr], t1
+ storep t1, ShadowChicken::Packet::callerFrame[t0]
+ loadp Callee + PayloadOffset[cfr], t1
+ storep t1, ShadowChicken::Packet::callee[t0]
+ loadisFromInstruction(1, t1)
+ loadi PayloadOffset[cfr, t1, 8], t1
+ storep t1, ShadowChicken::Packet::scope[t0]
+ dispatch(2)
+.opLogShadowChickenPrologueSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_prologue)
+ dispatch(2)
+
+
+_llint_op_log_shadow_chicken_tail:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenTailSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t3, t2, t1)
+ storei t2, TagOffset + ShadowChicken::Packet::thisValue[t0]
+ storei t1, PayloadOffset + ShadowChicken::Packet::thisValue[t0]
+ loadisFromInstruction(2, t1)
+ loadi PayloadOffset[cfr, t1, 8], t1
+ storep t1, ShadowChicken::Packet::scope[t0]
+ loadp CodeBlock[cfr], t1
+ storep t1, ShadowChicken::Packet::codeBlock[t0]
+ storei PC, ShadowChicken::Packet::callSiteIndex[t0]
+ dispatch(3)
+.opLogShadowChickenTailSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_tail)
+ dispatch(3)