summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/llint/LowLevelInterpreter64.asm')
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm1925
1 files changed, 1102 insertions, 823 deletions
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index abfec65a4..0881d3721 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -46,225 +46,294 @@ macro dispatchAfterCall()
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- valueProfile(t0, 7, t2)
- dispatch(8)
+ storeq r0, [cfr, t1, 8]
+ valueProfile(r0, (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
- if X86_64
- move arg1, t5
- move arg2, t4
+macro cCall2(function)
+ checkStackPointerAlignment(t4, 0xbad0c002)
+ if X86_64 or ARM64
call function
- elsif ARM64
- move arg1, t0
- move arg2, t1
+ elsif X86_64_WIN
+ # Note: this implementation is only correct if the return type size is > 8 bytes.
+ # See macro cCall2Void for an implementation when the return type <= 8 bytes.
+ # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+ # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right,
+ # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument.
+ # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1)
+ # since the return value is expected to be split between the two.
+ # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ move a1, a2
+ move a0, a1
+ subp 48, sp
+ move sp, a0
+ addp 32, a0
call function
+ addp 48, sp
+ move 8[r0], r1
+ move [r0], r0
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ elsif X86_64_WIN
+ # Note: we cannot use the cCall2 macro for Win64 in this case,
+ # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
+ # On Win64, rcx and rdx are used for passing the first two parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 32, sp
+ call function
+ addp 32, sp
+ else
+ cCall2(function)
+ end
+end
+
# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
- if X86_64
- move arg1, t5
- move arg2, t4
- move arg3, t1
- move arg4, t2
+macro cCall4(function)
+ checkStackPointerAlignment(t4, 0xbad0c004)
+ if X86_64 or ARM64
call function
- elsif ARM64
- move arg1, t0
- move arg2, t1
- move arg3, t2
- move arg4, t3
+ elsif X86_64_WIN
+ # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 64, sp
call function
- elsif C_LOOP
- error
+ addp 64, sp
else
error
end
end
-macro functionPrologue(extraStackSpace)
- if X86_64
- push cfr
- move sp, cfr
- elsif ARM64
- pushLRAndFP
- end
- pushCalleeSaves
- if X86_64
- subp extraStackSpace, sp
- end
-end
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
-macro functionEpilogue(extraStackSpace)
- if X86_64
- addp extraStackSpace, sp
- end
- popCalleeSaves
- if X86_64
- pop cfr
- elsif ARM64
- popLRAndFP
- end
-end
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ vmEntryRecord(cfr, sp)
+
+ checkStackPointerAlignment(t4, 0xbad0dc01)
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
-macro doCallToJavaScript(makeCall, doReturn)
- if X86_64
- const entry = t5
- const vmTopCallFrame = t4
- const protoCallFrame = t1
- const topOfStack = t2
-
- const extraStackSpace = 8
- const previousCFR = t0
- const previousPC = t6
- const temp1 = t0
- const temp2 = t3
- const temp3 = t6
- elsif ARM64
- const entry = a0
- const vmTopCallFrame = a1
- const protoCallFrame = a2
- const topOfStack = a3
-
- const extraStackSpace = 0
- const previousCFR = t4
- const previousPC = lr
- const temp1 = t3
- const temp2 = t5
- const temp3 = t6
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ if C_LOOP
+ bpaeq t3, VM::m_cloopStackLimit[vm], .stackHeightOK
+ else
+ bpaeq t3, VM::m_softStackLimit[vm], .stackHeightOK
end
- functionPrologue(extraStackSpace)
-
- move topOfStack, cfr
- subp (CallFrameHeaderSlots-1)*8, cfr
- storep 0, ArgumentCount[cfr]
- storep vmTopCallFrame, Callee[cfr]
- loadp [vmTopCallFrame], temp1
- storep temp1, ScopeChain[cfr]
- storep 1, CodeBlock[cfr]
- if X86_64
- loadp 7*8[sp], previousPC
- loadp 6*8[sp], previousCFR
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
end
- storep previousPC, ReturnPC[cfr]
- storep previousCFR, CallerFrame[cfr]
- move cfr, temp1
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
- addp CallFrameHeaderSlots, temp2, temp2
- lshiftp 3, temp2
- subp temp2, cfr
- storep temp1, CallerFrame[cfr]
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ vmEntryRecord(cfr, t4)
- move 5, temp1
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg
+ storep extraTempReg, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
.copyHeaderLoop:
- subi 1, temp1
- loadp [protoCallFrame, temp1, 8], temp3
- storep temp3, CodeBlock[cfr, temp1, 8]
- btinz temp1, .copyHeaderLoop
-
- loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
- subi 1, temp2
- loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
- subi 1, temp3
-
- bieq temp2, temp3, .copyArgs
- move ValueUndefined, temp1
+ # Copy the CodeBlock/Callee/ArgumentCount/|this| from protoCallFrame into the callee frame.
+ subi 1, t3
+ loadq [protoCallFrame, t3, 8], extraTempReg
+ storeq extraTempReg, CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg
+ subi 1, extraTempReg
+
+ bieq t4, extraTempReg, .copyArgs
+ move ValueUndefined, t3
.fillExtraArgsLoop:
- subi 1, temp3
- storep temp1, ThisArgumentOffset+8[cfr, temp3, 8]
- bineq temp2, temp3, .fillExtraArgsLoop
+ subi 1, extraTempReg
+ storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8]
+ bineq t4, extraTempReg, .fillExtraArgsLoop
.copyArgs:
- loadp ProtoCallFrame::args[protoCallFrame], temp1
+ loadp ProtoCallFrame::args[protoCallFrame], t3
.copyArgsLoop:
- btiz temp2, .copyArgsDone
- subi 1, temp2
- loadp [temp1, temp2, 8], temp3
- storep temp3, ThisArgumentOffset+8[cfr, temp2, 8]
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadq [t3, t4, 8], extraTempReg
+ storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8]
jmp .copyArgsLoop
.copyArgsDone:
- storep cfr, [vmTopCallFrame]
+ if ARM64
+ move sp, t4
+ storep t4, VM::topCallFrame[vm]
+ else
+ storep sp, VM::topCallFrame[vm]
+ end
+ storep cfr, VM::topVMEntryFrame[vm]
- move 0xffff000000000000, csr1
- addp 2, csr1, csr2
+ checkStackPointerAlignment(extraTempReg, 0xbad0dc02)
- makeCall(entry, temp1)
+ makeCall(entry, t3)
- bpeq CodeBlock[cfr], 1, .calleeFramePopped
- loadp CallerFrame[cfr], cfr
+ # We may have just made a call into a JS function, so we can't rely on sp
+ # for anything but the fact that our own locals (ie the VMEntryRecord) are
+ # not below it. It also still has to be aligned, though.
+ checkStackPointerAlignment(t2, 0xbad0dc03)
+
+ vmEntryRecord(cfr, t4)
-.calleeFramePopped:
- loadp Callee[cfr], temp2 # VM.topCallFrame
- loadp ScopeChain[cfr], temp3
- storep temp3, [temp2]
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], t2
+ storep t2, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], t2
+ storep t2, VM::topVMEntryFrame[vm]
- doReturn(extraStackSpace)
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+
+ ret
end
+
macro makeJavaScriptCall(entry, temp)
- call entry
+ addp 16, sp
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ subp 16, sp
end
+
macro makeHostFunctionCall(entry, temp)
move entry, temp
- if X86_64
- move cfr, t5
- elsif ARM64 or C_LOOP
- move cfr, a0
+ storep cfr, [sp]
+ move sp, a0
+ if C_LOOP
+ storep lr, 8[sp]
+ cloopCallNative temp
+ elsif X86_64_WIN
+ # We need to allocate 32 bytes on the stack for the shadow space.
+ subp 32, sp
+ call temp
+ addp 32, sp
+ else
+ call temp
end
- call temp
end
-macro doReturnFromJavaScript(extraStackSpace)
-_returnFromJavaScript:
- functionEpilogue(extraStackSpace)
- ret
-end
-macro doReturnFromHostFunction(extraStackSpace)
- functionEpilogue(extraStackSpace)
+_handleUncaughtException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+ vmEntryRecord(cfr, t2)
+
+ loadp VMEntryRecord::m_vm[t2], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg
+ storep extraTempReg, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[t3]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
ret
-end
+
macro prepareStateForCCall()
leap [PB, PC, 8], PC
- move PB, t3
end
macro restoreStateAfterCCall()
- move t0, PC
- move t1, cfr
- move t3, PB
+ move r0, PC
subp PB, PC
rshiftp 3, PC
end
macro callSlowPath(slowPath)
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
restoreStateAfterCCall()
end
macro traceOperand(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
restoreStateAfterCCall()
end
macro traceValue(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
restoreStateAfterCCall()
end
@@ -272,18 +341,19 @@ end
macro callCallSlowPath(slowPath, action)
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
- move t3, PB
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -293,12 +363,13 @@ macro checkSwitchToJITForLoop()
macro()
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
- move t3, PB
loadi ArgumentCount + TagOffset[cfr], PC
end)
end
@@ -332,50 +403,63 @@ macro loadConstantOrVariableCell(index, value, slow)
end
macro writeBarrierOnOperand(cellOperand)
- if GGC
- loadisFromInstruction(cellOperand, t1)
- loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
- checkMarkByte(t2, t1, t3,
- macro(marked)
- btbz marked, .writeBarrierDone
- push PB, PC
- cCall2(_llint_write_barrier_slow, cfr, t2)
- pop PC, PB
- end
- )
- .writeBarrierDone:
- end
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(
+ t2,
+ macro()
+ push PB, PC
+ move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry)
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end)
+.writeBarrierDone:
end
macro writeBarrierOnOperands(cellOperand, valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariable(t1, t0)
- btpz t0, .writeBarrierDone
-
- writeBarrierOnOperand(cellOperand)
- .writeBarrierDone:
- end
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ loadHelper(t3)
+ skipIfIsRememberedOrInEden(
+ t3,
+ macro()
+ push PB, PC
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end
+ )
+.writeBarrierDone:
end
macro writeBarrierOnGlobalObject(valueOperand)
- if GGC
- loadisFromInstruction(valueOperand, t1)
- loadConstantOrVariable(t1, t0)
- btpz t0, .writeBarrierDone
-
- loadp CodeBlock[cfr], t3
- loadp CodeBlock::m_globalObject[t3], t3
- checkMarkByte(t3, t1, t2,
- macro(marked)
- btbz marked, .writeBarrierDone
- push PB, PC
- cCall2(_llint_write_barrier_slow, cfr, t3)
- pop PC, PB
- end
- )
- .writeBarrierDone:
- end
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
end
macro valueProfile(value, operand, scratch)
@@ -383,27 +467,68 @@ macro valueProfile(value, operand, scratch)
storeq value, ValueProfile::m_buckets[scratch]
end
+macro structureIDToStructureWithScratch(structureIDThenStructure, scratch)
+ loadp CodeBlock[cfr], scratch
+ loadp CodeBlock::m_vm[scratch], scratch
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
+ loadp [scratch, structureIDThenStructure, 8], structureIDThenStructure
+end
+
+macro loadStructureWithScratch(cell, structure, scratch)
+ loadi JSCell::m_structureID[cell], structure
+ structureIDToStructureWithScratch(structure, scratch)
+end
+
+macro loadStructureAndClobberFirstArg(cell, structure)
+ loadi JSCell::m_structureID[cell], structure
+ loadp CodeBlock[cfr], cell
+ loadp CodeBlock::m_vm[cell], cell
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
+ loadp [cell, structure, 8], structure
+end
# Entrypoints into the interpreter.
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
prepareStateForCCall()
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- btiz t0, .isArityFixupNeeded
- move t1, cfr # t1 contains caller frame
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
jmp _llint_throw_from_slow_path_trampoline
-.isArityFixupNeeded:
+.noError:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
- // Move frame up "t1" slots
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+ move ValueUndefined, t0
+.fillExtraSlots:
+ storeq t0, [cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
negq t1
move cfr, t3
- loadi PayloadOffset + ArgumentCount[cfr], t2
- addi CallFrameHeaderSlots, t2
+ subp CalleeSaveSpaceAsVirtualRegisters * 8, t3
+ addi CalleeSaveSpaceAsVirtualRegisters, t2
+ move t1, t0
+ lshiftp 3, t0
+ addp t0, cfr
+ addp t0, sp
.copyLoop:
loadq [t3], t0
storeq t0, [t3, t1, 8]
@@ -418,9 +543,6 @@ macro functionArityCheck(doneLabel, slow_path)
addp 8, t3
baddinz 1, t2, .fillLoop
- lshiftp 3, t1
- addp t1, cfr
-
.continue:
# Reload CodeBlock and reset PC, since the slow_path clobbered them.
loadp CodeBlock[cfr], t1
@@ -429,11 +551,10 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
-
macro branchIfException(label)
- loadp ScopeChain[cfr], t3
+ loadp Callee[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ loadp MarkedBlock::m_vm[t3], t3
btqz VM::m_exception[t3], .noException
jmp label
.noException:
@@ -441,108 +562,88 @@ end
# Instruction implementations
-
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
+ subq CalleeSaveSpaceAsVirtualRegisters, t2
+ move cfr, t1
+ subq CalleeSaveSpaceAsVirtualRegisters * 8, t1
btiz t2, .opEnterDone
move ValueUndefined, t0
negi t2
sxi2q t2, t2
.opEnterLoop:
- storeq t0, [cfr, t2, 8]
+ storeq t0, [t1, t2, 8]
addq 1, t2
btqnz t2, .opEnterLoop
.opEnterDone:
- callSlowPath(_slow_path_enter)
+ callOpcodeSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
+_llint_op_get_argument:
traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ bilteq t0, t2, .opGetArgumentOutOfBounds
+ loadq ThisArgumentOffset[cfr, t2, 8], t0
+ storeq t0, [cfr, t1, 8]
+ valueProfile(t0, 3, t2)
+ dispatch(4)
-_llint_op_init_lazy_reg:
- traceExecution()
- loadisFromInstruction(1, t0)
- storeq ValueEmpty, [cfr, t0, 8]
- dispatch(2)
+.opGetArgumentOutOfBounds:
+ storeq ValueUndefined, [cfr, t1, 8]
+ valueProfile(ValueUndefined, 3, t2)
+ dispatch(4)
-_llint_op_create_arguments:
+_llint_op_argument_count:
traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
- callSlowPath(_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadisFromInstruction(1, t1)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ orq TagTypeNumber, t0
+ storeq t0, [cfr, t1, 8]
dispatch(2)
-_llint_op_create_this:
+_llint_op_get_scope:
traceExecution()
- loadisFromInstruction(2, t0)
- loadp [cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
- btpz t1, .opCreateThisSlow
- allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
+ loadp Callee[cfr], t0
+ loadp JSCallee::m_scope[t0], t0
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(4)
-
-.opCreateThisSlow:
- callSlowPath(_slow_path_create_this)
- dispatch(4)
-
-
-_llint_op_get_callee:
- traceExecution()
- loadisFromInstruction(1, t0)
- loadp Callee[cfr], t1
- loadpFromInstruction(2, t2)
- bpneq t1, t2, .opGetCalleeSlow
- storep t1, [cfr, t0, 8]
- dispatch(3)
+ dispatch(2)
-.opGetCalleeSlow:
- callSlowPath(_slow_path_get_callee)
- dispatch(3)
_llint_op_to_this:
traceExecution()
loadisFromInstruction(1, t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .opToThisSlow
- loadp JSCell::m_structure[t0], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadStructureWithScratch(t0, t1, t2)
loadpFromInstruction(2, t2)
- bpneq t0, t2, .opToThisSlow
- dispatch(3)
+ bpneq t1, t2, .opToThisSlow
+ dispatch(4)
.opToThisSlow:
- callSlowPath(_slow_path_to_this)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_to_this)
+ dispatch(4)
-_llint_op_new_object:
+_llint_op_check_tdz:
traceExecution()
- loadpFromInstruction(3, t0)
- loadp ObjectAllocationProfile::m_allocator[t0], t1
- loadp ObjectAllocationProfile::m_structure[t0], t2
- allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
- loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- dispatch(4)
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariable(t0, t1)
+ bqneq t1, ValueEmpty, .opNotTDZ
+ callOpcodeSlowPath(_slow_path_throw_tdz_error)
-.opNewObjectSlow:
- callSlowPath(_llint_slow_path_new_object)
- dispatch(4)
+.opNotTDZ:
+ dispatch(2)
_llint_op_mov:
@@ -554,40 +655,6 @@ _llint_op_mov:
dispatch(3)
-macro notifyWrite(set, value, scratch, slow)
- loadb VariableWatchpointSet::m_state[set], scratch
- bieq scratch, IsInvalidated, .done
- bineq scratch, ClearWatchpoint, .overwrite
- storeq value, VariableWatchpointSet::m_inferredValue[set]
- storeb IsWatched, VariableWatchpointSet::m_state[set]
- jmp .done
-
-.overwrite:
- bqeq value, VariableWatchpointSet::m_inferredValue[set], .done
- btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow
- storeq 0, VariableWatchpointSet::m_inferredValue[set]
- storeb IsInvalidated, VariableWatchpointSet::m_state[set]
-
-.done:
-end
-
-_llint_op_captured_mov:
- traceExecution()
- loadisFromInstruction(2, t1)
- loadConstantOrVariable(t1, t2)
- loadpFromInstruction(3, t0)
- btpz t0, .opCapturedMovReady
- notifyWrite(t0, t2, t1, .opCapturedMovSlow)
-.opCapturedMovReady:
- loadisFromInstruction(1, t0)
- storeq t2, [cfr, t0, 8]
- dispatch(4)
-
-.opCapturedMovSlow:
- callSlowPath(_slow_path_captured_mov)
- dispatch(4)
-
-
_llint_op_not:
traceExecution()
loadisFromInstruction(2, t0)
@@ -600,7 +667,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_slow_path_not)
+ callOpcodeSlowPath(_slow_path_not)
dispatch(3)
@@ -617,7 +684,7 @@ macro equalityComparison(integerComparison, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -637,11 +704,11 @@ macro equalNullComparison()
loadisFromInstruction(2, t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move 0, t0
jmp .done
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t2, t1)
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t2], t0, t0
@@ -692,7 +759,7 @@ macro strictEq(equalityOperation, slowPath)
dispatch(4)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(4)
end
@@ -719,7 +786,7 @@ macro preOp(arithmeticOperation, slowPath)
dispatch(2)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(2)
end
@@ -744,10 +811,27 @@ _llint_op_to_number:
btqz t2, tagTypeNumber, .opToNumberSlow
.opToNumberIsImmediate:
storeq t2, [cfr, t1, 8]
- dispatch(3)
+ valueProfile(t2, 3, t0)
+ dispatch(4)
.opToNumberSlow:
- callSlowPath(_slow_path_to_number)
+ callOpcodeSlowPath(_slow_path_to_number)
+ dispatch(4)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opToStringSlow
+ bbneq JSCell::m_type[t0], StringType, .opToStringSlow
+.opToStringIsString:
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callOpcodeSlowPath(_slow_path_to_string)
dispatch(3)
@@ -755,22 +839,27 @@ _llint_op_negate:
traceExecution()
loadisFromInstruction(2, t0)
loadisFromInstruction(1, t1)
- loadConstantOrVariable(t0, t2)
- bqb t2, tagTypeNumber, .opNegateNotInt
- btiz t2, 0x7fffffff, .opNegateSlow
- negi t2
- orq tagTypeNumber, t2
- storeq t2, [cfr, t1, 8]
- dispatch(3)
+ loadConstantOrVariable(t0, t3)
+ loadisFromInstruction(3, t2)
+ bqb t3, tagTypeNumber, .opNegateNotInt
+ btiz t3, 0x7fffffff, .opNegateSlow
+ negi t3
+ ori ArithProfileInt, t2
+ orq tagTypeNumber, t3
+ storeisToInstruction(t2, 3)
+ storeq t3, [cfr, t1, 8]
+ dispatch(4)
.opNegateNotInt:
- btqz t2, tagTypeNumber, .opNegateSlow
- xorq 0x8000000000000000, t2
- storeq t2, [cfr, t1, 8]
- dispatch(3)
+ btqz t3, tagTypeNumber, .opNegateSlow
+ xorq 0x8000000000000000, t3
+ ori ArithProfileNumber, t2
+ storeq t3, [cfr, t1, 8]
+ storeisToInstruction(t2, 3)
+ dispatch(4)
.opNegateSlow:
- callSlowPath(_slow_path_negate)
- dispatch(3)
+ callOpcodeSlowPath(_slow_path_negate)
+ dispatch(4)
macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
@@ -782,6 +871,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
bqb t1, tagTypeNumber, .op2NotInt
loadisFromInstruction(1, t2)
integerOperationAndStore(t1, t0, .slow, t2)
+ loadisFromInstruction(4, t1)
+ ori ArithProfileIntInt, t1
+ storeisToInstruction(t1, 4)
dispatch(5)
.op1NotInt:
@@ -791,8 +883,14 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
btqz t1, tagTypeNumber, .slow
addq tagTypeNumber, t1
fq2d t1, ft1
+ loadisFromInstruction(4, t2)
+ ori ArithProfileNumberNumber, t2
+ storeisToInstruction(t2, 4)
jmp .op1NotIntReady
.op1NotIntOp2Int:
+ loadisFromInstruction(4, t2)
+ ori ArithProfileNumberInt, t2
+ storeisToInstruction(t2, 4)
ci2d t1, ft1
.op1NotIntReady:
loadisFromInstruction(1, t2)
@@ -808,6 +906,9 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
# First operand is definitely an int, the second is definitely not.
loadisFromInstruction(1, t2)
btqz t1, tagTypeNumber, .slow
+ loadisFromInstruction(4, t3)
+ ori ArithProfileIntNumber, t3
+ storeisToInstruction(t3, 4)
ci2d t0, ft0
addq tagTypeNumber, t1
fq2d t1, ft1
@@ -818,7 +919,7 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
dispatch(5)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
@@ -868,7 +969,7 @@ _llint_op_sub:
_llint_op_div:
traceExecution()
- if X86_64
+ if X86_64 or X86_64_WIN
binaryOpCustomStore(
macro (left, right, slow, index)
# Assume t3 is scratchable.
@@ -890,7 +991,7 @@ _llint_op_div:
macro (left, right) divd left, right end,
_slow_path_div)
else
- callSlowPath(_slow_path_div)
+ callOpcodeSlowPath(_slow_path_div)
dispatch(5)
end
@@ -909,7 +1010,7 @@ macro bitOp(operation, slowPath, advance)
dispatch(advance)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(advance)
end
@@ -946,7 +1047,7 @@ _llint_op_unsigned:
storeq t2, [cfr, t0, 8]
dispatch(3)
.opUnsignedSlow:
- callSlowPath(_slow_path_unsigned)
+ callOpcodeSlowPath(_slow_path_unsigned)
dispatch(3)
@@ -974,47 +1075,44 @@ _llint_op_bitor:
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
+ loadisFromInstruction(1, t3)
+
loadisFromInstruction(3, t1)
- loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
+ loadConstantOrVariable(t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_globalObject[t2], t2
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2
+ bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(2, t1)
+ loadConstantOrVariable(t1, t0)
+ tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t3, 8]
+ dispatch(4)
+
+.opOverridesHasInstanceNotDefaultSymbol:
+ storeq ValueTrue, [cfr, t3, 8]
+ dispatch(4)
-_llint_op_instanceof:
+_llint_op_instanceof_custom:
traceExecution()
- # Actually do the work.
- loadisFromInstruction(3, t0)
- loadisFromInstruction(1, t3)
- loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
- loadisFromInstruction(2, t0)
- loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
-
- # Register state: t1 = prototype, t2 = value
- move 1, t0
-.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadq Structure::m_prototype[t2], t2
- bqeq t2, t1, .opInstanceofDone
- btqz t2, tagMask, .opInstanceofLoop
+ callOpcodeSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
- move 0, t0
-.opInstanceofDone:
- orq ValueFalse, t0
- storeq t0, [cfr, t3, 8]
- dispatch(4)
-.opInstanceofSlow:
- callSlowPath(_llint_slow_path_instanceof)
- dispatch(4)
+_llint_op_is_empty:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ cqeq t0, ValueEmpty, t3
+ orq ValueFalse, t3
+ storeq t3, [cfr, t2, 8]
+ dispatch(3)
_llint_op_is_undefined:
@@ -1028,17 +1126,17 @@ _llint_op_is_undefined:
storeq t3, [cfr, t2, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t0], t0
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t3, t1)
loadp CodeBlock[cfr], t1
loadp CodeBlock::m_globalObject[t1], t1
- cpeq Structure::m_globalObject[t0], t1, t3
- orq ValueFalse, t3
- storeq t3, [cfr, t2, 8]
+ cpeq Structure::m_globalObject[t3], t1, t0
+ orq ValueFalse, t0
+ storeq t0, [cfr, t2, 8]
dispatch(3)
@@ -1065,18 +1163,33 @@ _llint_op_is_number:
dispatch(3)
-_llint_op_is_string:
+_llint_op_is_cell_with_type:
+ traceExecution()
+ loadisFromInstruction(3, t0)
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t3)
+ btqnz t3, tagMask, .notCellCase
+ cbeq JSCell::m_type[t3], t0, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(4)
+.notCellCase:
+ storeq ValueFalse, [cfr, t2, 8]
+ dispatch(4)
+
+
+_llint_op_is_object:
traceExecution()
loadisFromInstruction(2, t1)
loadisFromInstruction(1, t2)
loadConstantOrVariable(t1, t0)
- btqnz t0, tagMask, .opIsStringNotCell
- loadp JSCell::m_structure[t0], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ btqnz t0, tagMask, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t0], ObjectType, t1
orq ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
-.opIsStringNotCell:
+.opIsObjectNotCell:
storeq ValueFalse, [cfr, t2, 8]
dispatch(3)
@@ -1106,50 +1219,60 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value
storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
end
-_llint_op_init_global_const:
+_llint_op_get_by_id:
traceExecution()
- writeBarrierOnGlobalObject(2)
- loadisFromInstruction(2, t1)
- loadpFromInstruction(1, t0)
- loadConstantOrVariable(t1, t2)
- storeq t2, [t0]
- dispatch(5)
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdSlow
+ loadisFromInstruction(5, t1)
+ loadisFromInstruction(1, t2)
+ loadPropertyAtVariableOffset(t1, t3, t0)
+ storeq t0, [cfr, t2, 8]
+ valueProfile(t0, 8, t1)
+ dispatch(9)
+
+.opGetByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-macro getById(getPropertyStorage)
+_llint_op_get_by_id_proto_load:
traceExecution()
- # We only do monomorphic get_by_id caching for now, and we do not modify the
- # opcode. We do, however, allow for the cache to change anytime if fails, since
- # ping-ponging is free. At best we get lucky and the get_by_id will continue
- # to take fast path on the new cache. At worst we take slow path, which is what
- # we would have been doing anyway.
loadisFromInstruction(2, t0)
- loadpFromInstruction(4, t1)
- loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
- loadisFromInstruction(5, t2)
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadisFromInstruction(1, t1)
- loadq [propertyStorage, t2], scratch
- storeq scratch, [cfr, t1, 8]
- valueProfile(scratch, 8, t1)
- dispatch(9)
- end)
-
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
+ loadConstantOrVariableCell(t0, t3, .opGetByIdProtoSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdProtoSlow
+ loadisFromInstruction(5, t1)
+ loadpFromInstruction(6, t3)
+ loadisFromInstruction(1, t2)
+ loadPropertyAtVariableOffset(t1, t3, t0)
+ storeq t0, [cfr, t2, 8]
+ valueProfile(t0, 8, t1)
+ dispatch(9)
+
+.opGetByIdProtoSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
-_llint_op_get_by_id:
- getById(withInlineStorage)
+_llint_op_get_by_id_unset:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariableCell(t0, t3, .opGetByIdUnsetSlow)
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdUnsetSlow
+ loadisFromInstruction(1, t2)
+ storeq ValueUndefined, [cfr, t2, 8]
+ valueProfile(ValueUndefined, 8, t1)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+.opGetByIdUnsetSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -1157,7 +1280,7 @@ _llint_op_get_array_length:
loadisFromInstruction(2, t0)
loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
@@ -1171,121 +1294,176 @@ _llint_op_get_array_length:
dispatch(9)
.opGetArrayLengthSlow:
- callSlowPath(_llint_slow_path_get_by_id)
+ callOpcodeSlowPath(_llint_slow_path_get_by_id)
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(1, t1)
- btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- orq tagTypeNumber, t2
- storeq t2, [cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
+_llint_op_put_by_id:
traceExecution()
- writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadisFromInstruction(5, t1)
- loadConstantOrVariable(t2, scratch)
- storeq scratch, [propertyStorage, t1]
- dispatch(9)
- end)
-end
+ loadisFromInstruction(4, t2)
+ bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow
-_llint_op_put_by_id:
- putById(withInlineStorage)
+ # At this point, we have:
+ # t2 -> current structure ID
+ # t0 -> object base
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
- dispatch(9)
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t3)
+
+ loadpFromInstruction(8, t1)
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> current structure ID
+ # t3 -> value to put
+
+ btpnz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andp PutByIdSecondaryTypeMask, t1
+ bplt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bplt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bpeq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ btqz t3, tagMask, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ andq ~TagBitUndefined, t3
+ bqeq t3, ValueNull, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ btqnz t3, tagMask, .opPutByIdSlow
+ bpeq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bpeq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number
+ bplt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bpeq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bqaeq t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ btqnz t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other.
+ bpneq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ xorq ValueFalse, t3
+ btqz t3, ~1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bpeq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ btqz t3, tagMask, .opPutByIdTypeCheckObjectWithStructure
+ btpnz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ urshiftp 3, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadisFromInstruction(6, t1)
+
+ btiz t1, .opPutByIdNotTransition
+ # This is the transition case. t1 holds the new structureID. t2 holds the old structure ID.
+ # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as
+ # scratch.
+ loadpFromInstruction(7, t3)
+ btpz t3, .opPutByIdTransitionDirect
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+
+ structureIDToStructureWithScratch(t2, t1)
+ loadq Structure::m_prototype[t2], t2
+ bqeq t2, ValueNull, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ # At this point, t2 contains a prototye, and [t3] contains the Structure* that we want that
+ # prototype to have. We don't want to have to load the Structure* for t2. Instead, we load
+ # the Structure* from [t3], and then we compare its id to the id in the header of t2.
+ loadp [t3], t1
+ loadi JSCell::m_structureID[t2], t2
+ # Now, t1 has the Structure* and t2 has the StructureID that we want that Structure* to have.
+ bineq t2, Structure::m_blob + StructureIDBlob::u.fields.structureID[t1], .opPutByIdSlow
+ addp 8, t3
+ loadq Structure::m_prototype[t1], t2
+ bqneq t2, ValueNull, .opPutByIdTransitionChainLoop
+.opPutByIdTransitionChainDone:
+ # Reload the new structure, since we clobbered it above.
+ loadisFromInstruction(6, t1)
-macro putByIdTransition(additionalChecks, getPropertyStorage)
- traceExecution()
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
writeBarrierOnOperand(1)
- loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
- loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
+ # Reload base into t0
+ loadisFromInstruction(1, t1)
+ loadConstantOrVariable(t1, t0)
+
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
loadisFromInstruction(5, t1)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable(t2, t1)
- storeq t1, [t3]
- loadpFromInstruction(6, t1)
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
+ storePropertyAtVariableOffset(t1, t0, t2)
+ writeBarrierOnOperands(1, 3)
+ dispatch(9)
-macro noAdditionalChecks(oldStructure, scratch)
-end
+.opPutByIdSlow:
+ callOpcodeSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
- loadpFromInstruction(7, scratch)
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bqeq Structure::m_prototype[oldStructure], ValueNull, .done
-.loop:
- loadq Structure::m_prototype[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 8, scratch
- bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
-.done:
+macro finishGetByVal(result, scratch)
+ loadisFromInstruction(1, scratch)
+ storeq result, [cfr, scratch, 8]
+ valueProfile(result, 5, scratch)
+ dispatch(6)
end
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
+macro finishIntGetByVal(result, scratch)
+ orq tagTypeNumber, result
+ finishGetByVal(result, scratch)
+end
+macro finishDoubleGetByVal(result, scratch1, scratch2)
+ fd2q result, scratch1
+ subq tagTypeNumber, scratch1
+ finishGetByVal(scratch1, scratch2)
+end
_llint_op_get_by_val:
traceExecution()
loadisFromInstruction(2, t2)
loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
loadpFromInstruction(4, t3)
+ move t0, t2
arrayProfile(t2, t3, t1)
loadisFromInstruction(3, t3)
loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
@@ -1314,7 +1492,7 @@ _llint_op_get_by_val:
.opGetByValNotDouble:
subi ArrayStorageShape, t2
- bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
+ bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValNotIndexedStorage
biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq ArrayStorage::m_vector[t3, t1, 8], t2
@@ -1328,64 +1506,78 @@ _llint_op_get_by_val:
.opGetByValOutOfBounds:
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
-.opGetByValSlow:
- callSlowPath(_llint_slow_path_get_by_val)
- dispatch(6)
+ jmp .opGetByValSlow
+
+.opGetByValNotIndexedStorage:
+ # First lets check if we even have a typed array. This lets us do some boilerplate up front.
+ loadb JSCell::m_type[t0], t2
+ subi FirstArrayType, t2
+ bia t2, LastArrayType - FirstArrayType, .opGetByValSlow
+
+ # Sweet, now we know that we have a typed array. Do some basic things now.
+ loadp JSArrayBufferView::m_vector[t0], t3
+ biaeq t1, JSArrayBufferView::m_length[t0], .opGetByValSlow
+
+ # Now bisect through the various types. Note that we can treat Uint8ArrayType and
+ # Uint8ClampedArrayType the same.
+ bia t2, Uint8ClampedArrayType - FirstArrayType, .opGetByValAboveUint8ClampedArray
+
+ # We have one of Int8ArrayType .. Uint8ClampedArrayType.
+ bia t2, Int16ArrayType - FirstArrayType, .opGetByValInt32ArrayOrUint8Array
+
+ # We have one of Int8ArrayType or Int16ArrayType
+ bineq t2, Int8ArrayType - FirstArrayType, .opGetByValInt16Array
+
+ # We have Int8ArrayType
+ loadbs [t3, t1], t0
+ finishIntGetByVal(t0, t1)
+.opGetByValInt16Array:
+ loadhs [t3, t1, 2], t0
+ finishIntGetByVal(t0, t1)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(3, t1)
- btqnz [cfr, t0, 8], .opGetArgumentByValSlow
- loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- loadisFromInstruction(1, t3)
- loadpFromInstruction(5, t1)
- loadq ThisArgumentOffset[cfr, t2, 8], t0
- storeq t0, [cfr, t3, 8]
- valueProfile(t0, 5, t1)
- dispatch(6)
+.opGetByValInt32ArrayOrUint8Array:
+ # We have one of Int16Array, Uint8Array, or Uint8ClampedArray.
+ bieq t2, Int32ArrayType - FirstArrayType, .opGetByValInt32Array
+
+ # We have either Uint8Array or Uint8ClampedArray. They behave the same so that's cool.
+ loadb [t3, t1], t0
+ finishIntGetByVal(t0, t1)
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
+.opGetByValInt32Array:
+ loadi [t3, t1, 4], t0
+ finishIntGetByVal(t0, t1)
+.opGetByValAboveUint8ClampedArray:
+ # We have one of Uint16ArrayType .. Float64ArrayType.
+ bia t2, Uint32ArrayType - FirstArrayType, .opGetByValAboveUint32Array
+
+ # We have either Uint16ArrayType or Uint32ArrayType.
+ bieq t2, Uint32ArrayType - FirstArrayType, .opGetByValUint32Array
-_llint_op_get_by_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadConstantOrVariable(t1, t0)
- loadisFromInstruction(4, t1)
- assertNotConstant(t1)
- bqneq t0, [cfr, t1, 8], .opGetByPnameSlow
- loadisFromInstruction(2, t2)
- loadisFromInstruction(5, t3)
- loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t1
- loadp JSCell::m_structure[t0], t2
- bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow
- loadisFromInstruction(6, t3)
- loadi PayloadOffset[cfr, t3, 8], t3
- subi 1, t3
- biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow
- bilt t3, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t3
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], t3
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t3, t0, t0)
- loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- dispatch(7)
+ # We have Uint16ArrayType.
+ loadh [t3, t1, 2], t0
+ finishIntGetByVal(t0, t1)
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
+.opGetByValUint32Array:
+ # This is the hardest part because of large unsigned values.
+ loadi [t3, t1, 4], t0
+ bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
+ finishIntGetByVal(t0, t1)
+
+.opGetByValAboveUint32Array:
+ # We have one of Float32ArrayType or Float64ArrayType. Sadly, we cannot handle Float32Array
+ # inline yet. That would require some offlineasm changes.
+ bieq t2, Float32ArrayType - FirstArrayType, .opGetByValSlow
+
+ # We have Float64ArrayType.
+ loadd [t3, t1, 8], ft0
+ bdnequn ft0, ft0, .opGetByValSlow
+ finishDoubleGetByVal(ft0, t0, t1)
+
+.opGetByValSlow:
+ callOpcodeSlowPath(_llint_slow_path_get_by_val)
+ dispatch(6)
macro contiguousPutByVal(storeCallback)
@@ -1404,13 +1596,12 @@ macro contiguousPutByVal(storeCallback)
jmp .storeResult
end
-macro putByVal(holeCheck, slowPath)
+macro putByVal(slowPath)
traceExecution()
- writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t0)
loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
loadpFromInstruction(4, t3)
+ move t1, t2
arrayProfile(t2, t3, t0)
loadisFromInstruction(2, t0)
loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
@@ -1423,6 +1614,7 @@ macro putByVal(holeCheck, slowPath)
loadConstantOrVariable(operand, scratch)
bpb scratch, tagTypeNumber, .opPutByValSlow
storep scratch, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotInt32:
@@ -1439,6 +1631,7 @@ macro putByVal(holeCheck, slowPath)
bdnequn ft0, ft0, .opPutByValSlow
.ready:
stored ft0, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotDouble:
@@ -1447,16 +1640,18 @@ macro putByVal(holeCheck, slowPath)
macro (operand, scratch, address)
loadConstantOrVariable(operand, scratch)
storep scratch, address
+ writeBarrierOnOperands(1, 3)
end)
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- holeCheck(ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty)
+ btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadisFromInstruction(3, t2)
loadConstantOrVariable(t2, t1)
storeq t1, ArrayStorage::m_vector[t0, t3, 8]
+ writeBarrierOnOperands(1, 3)
dispatch(5)
.opPutByValArrayStorageEmpty:
@@ -1472,18 +1667,15 @@ macro putByVal(holeCheck, slowPath)
loadpFromInstruction(4, t0)
storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(5)
end
_llint_op_put_by_val:
- putByVal(macro(slot, slowPath)
- btqz slot, slowPath
- end, _llint_slow_path_put_by_val)
+ putByVal(_llint_slow_path_put_by_val)
_llint_op_put_by_val_direct:
- putByVal(macro(slot, slowPath)
- end, _llint_slow_path_put_by_val_direct)
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
@@ -1503,7 +1695,7 @@ macro jumpTrueOrFalse(conditionOp, slow)
dispatchIntIndirect(2)
.slow:
- callSlowPath(slow)
+ callOpcodeSlowPath(slow)
dispatch(0)
end
@@ -1513,8 +1705,8 @@ macro equalNull(cellHandler, immediateHandler)
assertNotConstant(t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadStructureWithScratch(t0, t2, t1)
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1559,9 +1751,10 @@ _llint_op_jneq_ptr:
loadp CodeBlock::m_globalObject[t2], t2
loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
- dispatch(4)
+ dispatch(5)
.opJneqPtrTarget:
+ storei 1, 32[PB, PC, 8]
dispatchIntIndirect(3)
@@ -1602,7 +1795,7 @@ macro compare(integerCompare, doubleCompare, slowPath)
dispatchIntIndirect(3)
.slow:
- callSlowPath(slowPath)
+ callOpcodeSlowPath(slowPath)
dispatch(0)
end
@@ -1631,7 +1824,7 @@ _llint_op_switch_imm:
dispatchIntIndirect(2)
.opSwitchImmSlow:
- callSlowPath(_llint_slow_path_switch_imm)
+ callOpcodeSlowPath(_llint_slow_path_switch_imm)
dispatch(0)
@@ -1646,8 +1839,7 @@ _llint_op_switch_char:
loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
btqnz t1, tagMask, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t1], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t1], t0
btpz t0, .opSwitchOnRope
@@ -1669,40 +1861,22 @@ _llint_op_switch_char:
dispatchIntIndirect(2)
.opSwitchOnRope:
- callSlowPath(_llint_slow_path_switch_char)
+ callOpcodeSlowPath(_llint_slow_path_switch_char)
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- loadisFromInstruction(3, t2)
- btiz t2, .opNewFuncUnchecked
- loadisFromInstruction(1, t1)
- btqnz [cfr, t1, 8], .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
-_llint_op_new_captured_func:
- traceExecution()
- callSlowPath(_slow_path_new_captured_func)
- dispatch(4)
-
-
macro arrayProfileForCall()
loadisFromInstruction(4, t3)
negp t3
loadq ThisArgumentOffset[cfr, t3, 8], t0
btqnz t0, tagMask, .done
- loadp JSCell::m_structure[t0], t0
- loadpFromInstruction(6, t1)
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
+ loadpFromInstruction((CallOpCodeSize - 2), t1)
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, ArrayProfile::m_lastSeenStructureID[t1]
.done:
end
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
loadisFromInstruction(2, t0)
loadpFromInstruction(5, t1)
loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1712,61 +1886,23 @@ macro doCall(slowPath)
lshifti 3, t3
negp t3
addp cfr, t3
- loadp JSFunction::m_scope[t2], t0
storeq t2, Callee[t3]
- storeq t0, ScopeChain[t3]
loadisFromInstruction(3, t2)
storei PC, ArgumentCount + TagOffset[cfr]
- storeq cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadisFromInstruction(1, t0)
- btqz [cfr, t0, 8], .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadisFromInstruction(1, t0)
- addq 1, t0 # Get the unmodifiedArgumentsRegister
- btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- doReturn()
-
-
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- btqnz t0, tagMask, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadisFromInstruction(2, t2)
- loadConstantOrVariable(t2, t0)
+ loadConstantOrVariable(t2, r0)
doReturn()
@@ -1776,81 +1912,56 @@ _llint_op_to_primitive:
loadisFromInstruction(1, t3)
loadConstantOrVariable(t2, t0)
btqnz t0, tagMask, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storeq t0, [cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_slow_path_to_primitive)
+ callOpcodeSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadisFromInstruction(4, t2)
- assertNotConstant(t1)
- assertNotConstant(t2)
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadisFromInstruction(5, t2)
- assertNotConstant(t2)
- loadp [cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadq [t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadisFromInstruction(1, t1)
- storeq t3, [cfr, t1, 8]
- loadisFromInstruction(2, t3)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bqeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow
- loadq Structure::m_prototype[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 8, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchIntIndirect(6)
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The throwing code must have known that we were throwing to the interpreter,
# and have set VM::targetInterpreterPCForThrow.
- loadp ScopeChain[cfr], t3
+ loadp Callee[cfr], t3
andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- loadp VM::callFrameForThrow[t3], cfr
+ loadp MarkedBlock::m_vm[t3], t3
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
+
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
loadp VM::targetInterpreterPCForThrow[t3], PC
subp PB, PC
rshiftp 3, PC
+
+ callOpcodeSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
+
+.isCatchableException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+
loadq VM::m_exception[t3], t0
storeq 0, VM::m_exception[t3]
loadisFromInstruction(1, t2)
storeq t0, [cfr, t2, 8]
+
+ loadq Exception::m_value[t0], t3
+ loadisFromInstruction(2, t2)
+ storeq t3, [cfr, t2, 8]
+
traceExecution()
- dispatch(2)
+ dispatch(3)
_llint_op_end:
@@ -1858,18 +1969,24 @@ _llint_op_end:
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t0)
assertNotConstant(t0)
- loadq [cfr, t0, 8], t0
+ loadq [cfr, t0, 8], r0
doReturn()
_llint_throw_from_slow_path_trampoline:
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2)
+
callSlowPath(_llint_slow_path_handle_exception)
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_vm[t1], t1
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_vm[t1], t1
jmp VM::targetMachinePCForThrow[t1]
@@ -1879,90 +1996,48 @@ _llint_throw_during_call_trampoline:
macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
storep 0, CodeBlock[cfr]
- if X86_64
- loadp ScopeChain[cfr], t0
- andp MarkedBlockMask, t0
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0
- storep cfr, VM::topCallFrame[t0]
- loadp CallerFrame[cfr], t0
- loadq ScopeChain[t0], t1
- storeq t1, ScopeChain[cfr]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t5 # t5 = rdi
- subp 16 - 8, sp
- loadp Callee[cfr], t4 # t4 = rsi
- loadp JSFunction::m_executable[t4], t1
- move t0, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- addp 16 - 8, sp
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif ARM64
- loadp ScopeChain[cfr], t0
- andp MarkedBlockMask, t0
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0
- storep cfr, VM::topCallFrame[t0]
- loadp CallerFrame[cfr], t2
- loadp ScopeChain[t2], t1
- storep t1, ScopeChain[cfr]
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadp Callee[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- elsif C_LOOP
- loadp CallerFrame[cfr], t0
- loadp ScopeChain[t0], t1
- storep t1, ScopeChain[cfr]
-
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- storep cfr, VM::topCallFrame[t3]
-
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadp Callee[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
+ loadp Callee[cfr], t0
+ andp MarkedBlockMask, t0, t1
+ loadp MarkedBlock::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ if ARM64 or C_LOOP
+ storep lr, ReturnPC[cfr]
+ end
+ move cfr, a0
+ loadp Callee[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
cloopCallNative executableOffsetToFunction[t1]
-
- restoreReturnAddressBeforeReturn(t3)
- loadp ScopeChain[cfr], t3
- andp MarkedBlockMask, t3
- loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
else
- error
+ if X86_64_WIN
+ subp 32, sp
+ end
+ call executableOffsetToFunction[t1]
+ if X86_64_WIN
+ addp 32, sp
+ end
end
- btqnz VM::m_exception[t3], .exception
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_vm[t3], t3
+
+ btqnz VM::m_exception[t3], .handleException
+
+ functionEpilogue()
ret
-.exception:
- preserveReturnAddressAfterCall(t1) # This is really only needed on X86_64
- loadi ArgumentCount + TagOffset[cfr], PC
- loadp CodeBlock[cfr], PB
- loadp CodeBlock::m_vm[PB], t0
- loadp CodeBlock::m_instructions[PB], PB
- storep cfr, VM::topCallFrame[t0]
- callSlowPath(_llint_throw_from_native_call)
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
jmp _llint_throw_from_slow_path_trampoline
end
-
-macro getGlobalObject(dst)
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
loadisFromInstruction(dst, t1)
storeq t0, [cfr, t1, 8]
end
@@ -1975,15 +2050,9 @@ macro varInjectionCheck(slowPath)
end
macro resolveScope()
- loadp CodeBlock[cfr], t0
- loadisFromInstruction(4, t2)
- btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck
- loadis CodeBlock::m_activationRegister[t0], t1
- btpz [cfr, t1, 8], .resolveScopeAfterActivationCheck
- addi 1, t2
-
-.resolveScopeAfterActivationCheck:
- loadp ScopeChain[cfr], t0
+ loadisFromInstruction(5, t2)
+ loadisFromInstruction(2, t0)
+ loadp [cfr, t0, 8], t0
btiz t2, .resolveScopeLoopEnd
.resolveScopeLoop:
@@ -1999,73 +2068,90 @@ end
_llint_op_resolve_scope:
traceExecution()
- loadisFromInstruction(3, t0)
+ loadisFromInstruction(4, t0)
#rGlobalProperty:
bineq t0, GlobalProperty, .rGlobalVar
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVar:
- bineq t0, GlobalVar, .rClosureVar
- getGlobalObject(1)
- dispatch(6)
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
.rClosureVar:
- bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
+ bineq t0, ClosureVar, .rModuleVar
resolveScope()
- dispatch(6)
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
.rGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
.rGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.rDynamic)
- getGlobalObject(1)
- dispatch(6)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
.rClosureVarWithVarInjectionChecks:
bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
varInjectionCheck(.rDynamic)
resolveScope()
- dispatch(6)
+ dispatch(7)
.rDynamic:
- callSlowPath(_llint_slow_path_resolve_scope)
- dispatch(6)
+ callOpcodeSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
macro loadWithStructureCheck(operand, slowPath)
loadisFromInstruction(operand, t0)
loadq [cfr, t0, 8], t0
+ loadStructureWithScratch(t0, t2, t1)
loadpFromInstruction(5, t1)
- bpneq JSCell::m_structure[t0], t1, slowPath
+ bpneq t2, t1, slowPath
end
macro getProperty()
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
loadPropertyAtVariableOffset(t1, t0, t2)
valueProfile(t2, 7, t0)
loadisFromInstruction(1, t0)
storeq t2, [cfr, t0, 8]
end
-macro getGlobalVar()
+macro getGlobalVar(tdzCheckIfNecessary)
loadpFromInstruction(6, t0)
loadq [t0], t0
+ tdzCheckIfNecessary(t0)
valueProfile(t0, 7, t1)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
end
macro getClosureVar()
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- loadq [t0, t1, 8], t0
+ loadisFromInstruction(6, t1)
+ loadq JSEnvironmentRecord_variables[t0, t1, 8], t0
valueProfile(t0, 7, t1)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
@@ -2074,7 +2160,7 @@ end
_llint_op_get_from_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
#gGlobalProperty:
bineq t0, GlobalProperty, .gGlobalVar
@@ -2083,8 +2169,16 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVar:
- bineq t0, GlobalVar, .gClosureVar
- getGlobalVar()
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
dispatch(8)
.gClosureVar:
@@ -2100,10 +2194,18 @@ _llint_op_get_from_scope:
dispatch(8)
.gGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
varInjectionCheck(.gDynamic)
- loadVariable(2, t0)
- getGlobalVar()
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
dispatch(8)
.gClosureVarWithVarInjectionChecks:
@@ -2114,82 +2216,259 @@ _llint_op_get_from_scope:
dispatch(8)
.gDynamic:
- callSlowPath(_llint_slow_path_get_from_scope)
+ callOpcodeSlowPath(_llint_slow_path_get_from_scope)
dispatch(8)
macro putProperty()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2)
- loadpFromInstruction(6, t1)
+ loadisFromInstruction(6, t1)
storePropertyAtVariableOffset(t1, t0, t2)
end
-macro putGlobalVar()
+macro putGlobalVariable()
loadisFromInstruction(3, t0)
loadConstantOrVariable(t0, t1)
loadpFromInstruction(5, t2)
- notifyWrite(t2, t1, t0, .pDynamic)
loadpFromInstruction(6, t0)
+ notifyWrite(t2, .pDynamic)
storeq t1, [t0]
end
macro putClosureVar()
loadisFromInstruction(3, t1)
loadConstantOrVariable(t1, t2)
- loadp JSVariableObject::m_registers[t0], t0
- loadpFromInstruction(6, t1)
- storeq t2, [t0, t1, 8]
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadpFromInstruction(5, t3)
+ btpz t3, .noVariableWatchpointSet
+ notifyWrite(t3, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro checkTDZInGlobalPutToScopeIfNecessary()
+ loadisFromInstruction(4, t0)
+ andi InitializationModeMask, t0
+ rshifti InitializationModeShift, t0
+ bineq t0, NotInitialization, .noNeedForTDZCheck
+ loadpFromInstruction(6, t0)
+ loadq [t0], t0
+ bqeq t0, ValueEmpty, .pDynamic
+.noNeedForTDZCheck:
end
_llint_op_put_to_scope:
traceExecution()
loadisFromInstruction(4, t0)
- andi ResolveModeMask, t0
+ andi ResolveTypeMask, t0
-#pGlobalProperty:
- bineq t0, GlobalProperty, .pGlobalVar
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ loadVariable(1, t0)
+ putLocalClosureVar()
writeBarrierOnOperands(1, 3)
+ dispatch(7)
+
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
loadWithStructureCheck(1, .pDynamic)
putProperty()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalVar:
- bineq t0, GlobalVar, .pClosureVar
+ bineq t0, GlobalVar, .pGlobalLexicalVar
writeBarrierOnGlobalObject(3)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
dispatch(7)
.pClosureVar:
bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
- writeBarrierOnOperands(1, 3)
loadVariable(1, t0)
putClosureVar()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalPropertyWithVarInjectionChecks:
bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
- writeBarrierOnOperands(1, 3)
loadWithStructureCheck(1, .pDynamic)
putProperty()
+ writeBarrierOnOperands(1, 3)
dispatch(7)
.pGlobalVarWithVarInjectionChecks:
- bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
writeBarrierOnGlobalObject(3)
varInjectionCheck(.pDynamic)
- putGlobalVar()
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
dispatch(7)
.pClosureVarWithVarInjectionChecks:
- bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
- writeBarrierOnOperands(1, 3)
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
varInjectionCheck(.pDynamic)
loadVariable(1, t0)
putClosureVar()
+ writeBarrierOnOperands(1, 3)
+ dispatch(7)
+
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callOpcodeSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
dispatch(7)
.pDynamic:
- callSlowPath(_llint_slow_path_put_to_scope)
+ callOpcodeSlowPath(_llint_slow_path_put_to_scope)
dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadVariable(2, t0)
+ loadi 24[PB, PC, 8], t1
+ loadq DirectArguments_storage[t0, t1, 8], t0
+ valueProfile(t0, 4, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ loadVariable(1, t0)
+ loadi 16[PB, PC, 8], t1
+ loadisFromInstruction(3, t3)
+ loadConstantOrVariable(t3, t2)
+ storeq t2, DirectArguments_storage[t0, t1, 8]
+ writeBarrierOnOperands(1, 3)
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadVariable(2, t0)
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # t0 is holding the JSValue argument.
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t3, t0)
+
+ bqeq t0, ValueEmpty, .opProfileTypeDone
+ # Store the JSValue onto the log entry.
+ storeq t0, TypeProfilerLog::LogEntry::value[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ btqz t0, tagMask, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callOpcodeSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ addq 1, BasicBlockLocation::m_executionCount[t0]
+ dispatch(2)
+
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .boxUp
+.storeZero:
+ move 0, t0
+.boxUp:
+ orq tagTypeNumber, t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_log_shadow_chicken_prologue:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ loadp CallerFrame[cfr], t1
+ storep t1, ShadowChicken::Packet::callerFrame[t0]
+ loadp Callee[cfr], t1
+ storep t1, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t1)
+ storep t1, ShadowChicken::Packet::scope[t0]
+ dispatch(2)
+.opLogShadowChickenPrologueSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_prologue)
+ dispatch(2)
+
+
+_llint_op_log_shadow_chicken_tail:
+ traceExecution()
+ acquireShadowChickenPacket(.opLogShadowChickenTailSlow)
+ storep cfr, ShadowChicken::Packet::frame[t0]
+ storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0]
+ loadVariable(1, t1)
+ storep t1, ShadowChicken::Packet::thisValue[t0]
+ loadVariable(2, t1)
+ storep t1, ShadowChicken::Packet::scope[t0]
+ loadp CodeBlock[cfr], t1
+ storep t1, ShadowChicken::Packet::codeBlock[t0]
+ storei PC, ShadowChicken::Packet::callSiteIndex[t0]
+ dispatch(3)
+.opLogShadowChickenTailSlow:
+ callOpcodeSlowPath(_llint_slow_path_log_shadow_chicken_tail)
+ dispatch(3)