diff options
Diffstat (limited to 'Source/JavaScriptCore/jit/JITOpcodes.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes.cpp | 1283 |
1 files changed, 785 insertions, 498 deletions
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 2bdae1914..337e0b7c0 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> * * Redistribution and use in source and binary forms, with or without @@ -28,18 +28,22 @@ #if ENABLE(JIT) #include "JIT.h" -#include "Arguments.h" -#include "CopiedSpaceInlines.h" -#include "Debugger.h" +#include "BasicBlockLocation.h" +#include "Exception.h" #include "Heap.h" +#include "Interpreter.h" #include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" -#include "JSPropertyNameIterator.h" +#include "JSPropertyNameEnumerator.h" #include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" #include "SlowPathCall.h" +#include "TypeLocation.h" +#include "TypeProfilerLog.h" #include "VirtualRegister.h" +#include "Watchdog.h" namespace JSC { @@ -59,21 +63,13 @@ void JIT::emit_op_mov(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_captured_mov(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; - - emitGetVirtualRegister(src, regT0); - emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet); - emitPutVirtualRegister(dst); -} void JIT::emit_op_end(Instruction* currentInstruction) { RELEASE_ASSERT(returnValueGPR != callFrameRegister); emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); - restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset())); + emitRestoreCalleeSaves(); + emitFunctionEpilogue(); ret(); } @@ -87,38 +83,55 @@ void JIT::emit_op_new_object(Instruction* currentInstruction) { Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); - MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize); + MarkedAllocator* allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorFor(allocationSize); RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID scratchReg = regT2; move(TrustedImmPtr(allocator), allocatorReg); - emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg); + if (allocator) + addSlowCase(Jump()); + JumpList slowCases; + emitAllocateJSObject(resultReg, allocator, allocatorReg, TrustedImmPtr(structure), TrustedImmPtr(0), scratchReg, slowCases); + emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); + addSlowCase(slowCases); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); + linkSlowCase(iter); int dst = currentInstruction[1].u.operand; Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); callOperation(operationNewObject, structure); emitStoreCell(dst, returnValueGPR); } -void JIT::emit_op_check_has_instance(Instruction* currentInstruction) +void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction) { - int baseVal = currentInstruction[3].u.operand; + int dst = currentInstruction[1].u.operand; + int constructor = currentInstruction[2].u.operand; + int hasInstanceValue = currentInstruction[3].u.operand; + + emitGetVirtualRegister(hasInstanceValue, regT0); + + // We don't jump if we know what Symbol.hasInstance would do. + Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction())); + + emitGetVirtualRegister(constructor, regT0); - emitGetVirtualRegister(baseVal, regT0); + // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function. + test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0); + emitTagBool(regT0); + Jump done = jump(); - // Check that baseVal is a cell. - emitJumpSlowCaseIfNotJSCell(regT0, baseVal); + customhasInstanceValue.link(this); + move(TrustedImm32(ValueTrue), regT0); - // Check that baseVal 'ImplementsHasInstance'. - loadPtr(Address(regT0, JSCell::structureOffset()), regT0); - addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + done.link(this); + emitPutVirtualRegister(dst); } void JIT::emit_op_instanceof(Instruction* currentInstruction) @@ -132,13 +145,12 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitGetVirtualRegister(value, regT2); emitGetVirtualRegister(proto, regT1); - // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance. + // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance. emitJumpSlowCaseIfNotJSCell(regT2, value); emitJumpSlowCaseIfNotJSCell(regT1, proto); // Check that prototype is an object - loadPtr(Address(regT1, JSCell::structureOffset()), regT3); - addSlowCase(emitJumpIfNotObject(regT3)); + addSlowCase(emitJumpIfCellNotObject(regT1)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -146,9 +158,11 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); Label loop(this); + addSlowCase(branch8(Equal, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType))); + // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + emitLoadStructure(regT2, regT2, regT3); load64(Address(regT2, Structure::prototypeOffset()), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); emitJumpIfJSCell(regT2).linkTo(loop, this); @@ -161,6 +175,24 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitPutVirtualRegister(dst); } +void JIT::emit_op_instanceof_custom(Instruction*) +{ + // This always goes to slow path since we expect it to be rare. + addSlowCase(jump()); +} + +void JIT::emit_op_is_empty(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0); + + emitTagBool(regT0); + emitPutVirtualRegister(dst); +} + void JIT::emit_op_is_undefined(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; @@ -173,19 +205,19 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) Jump done = jump(); isCell.link(this); - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump notMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + emitLoadStructure(regT0, regT1, regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); comparePtr(Equal, regT0, regT1, regT0); notMasqueradesAsUndefined.link(this); done.link(this); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } @@ -197,7 +229,7 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction) emitGetVirtualRegister(value, regT0); xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0); test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } @@ -208,72 +240,50 @@ void JIT::emit_op_is_number(Instruction* currentInstruction) emitGetVirtualRegister(value, regT0); test64(NonZero, regT0, tagTypeNumberRegister, regT0); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } -void JIT::emit_op_is_string(Instruction* currentInstruction) +void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; int value = currentInstruction[2].u.operand; - + int type = currentInstruction[3].u.operand; + emitGetVirtualRegister(value, regT0); Jump isNotCell = emitJumpIfNotJSCell(regT0); - - loadPtr(Address(regT0, JSCell::structureOffset()), regT1); - compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); - emitTagAsBoolImmediate(regT0); + + compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0); + emitTagBool(regT0); Jump done = jump(); - + isNotCell.link(this); move(TrustedImm32(ValueFalse), regT0); - + done.link(this); emitPutVirtualRegister(dst); } -void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) -{ - int activation = currentInstruction[1].u.operand; - Jump activationNotCreated = branchTest64(Zero, addressFor(activation)); - emitGetVirtualRegister(activation, regT0); - callOperation(operationTearOffActivation, regT0); - activationNotCreated.link(this); -} - -void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) -{ - int arguments = currentInstruction[1].u.operand; - int activation = currentInstruction[2].u.operand; - - Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset()))); - emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0); - emitGetVirtualRegister(activation, regT1); - callOperation(operationTearOffArguments, regT0, regT1); - argsNotCreated.link(this); -} - -void JIT::emit_op_ret(Instruction* currentInstruction) +void JIT::emit_op_is_object(Instruction* currentInstruction) { - ASSERT(callFrameRegister != regT1); - ASSERT(regT1 != returnValueGPR); - ASSERT(returnValueGPR != callFrameRegister); + int dst = currentInstruction[1].u.operand; + int value = currentInstruction[2].u.operand; - // Return the result in %eax. - emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + emitGetVirtualRegister(value, regT0); + Jump isNotCell = emitJumpIfNotJSCell(regT0); - // Grab the return address. - emitGetReturnPCFromCallFrameHeaderPtr(regT1); + compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); + emitTagBool(regT0); + Jump done = jump(); - // Restore our caller's "r". - emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + isNotCell.link(this); + move(TrustedImm32(ValueFalse), regT0); - // Return. - restoreReturnAddressBeforeReturn(regT1); - ret(); + done.link(this); + emitPutVirtualRegister(dst); } -void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) +void JIT::emit_op_ret(Instruction* currentInstruction) { ASSERT(callFrameRegister != regT1); ASSERT(regT1 != returnValueGPR); @@ -281,33 +291,10 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) // Return the result in %eax. emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); - Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR); - loadPtr(Address(returnValueGPR, JSCell::structureOffset()), regT2); - Jump notObject = emitJumpIfNotObject(regT2); - - // Grab the return address. - emitGetReturnPCFromCallFrameHeaderPtr(regT1); - - // Restore our caller's "r". - emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); - - // Return. - restoreReturnAddressBeforeReturn(regT1); - ret(); - // Return 'this' in %eax. - notJSCell.link(this); - notObject.link(this); - emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR); - - // Grab the return address. - emitGetReturnPCFromCallFrameHeaderPtr(regT1); - - // Restore our caller's "r". - emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); - - // Return. - restoreReturnAddressBeforeReturn(regT1); + checkStackPointerAlignment(); + emitRestoreCalleeSaves(); + emitFunctionEpilogue(); ret(); } @@ -319,7 +306,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitGetVirtualRegister(src, regT0); Jump isImm = emitJumpIfNotJSCell(regT0); - addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + addSlowCase(emitJumpIfCellObject(regT0)); isImm.link(this); if (dst != src) @@ -327,6 +314,13 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) } +void JIT::emit_op_set_function_name(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); + callOperation(operationSetFunctionName, regT0, regT1); +} + void JIT::emit_op_strcat(Instruction* currentInstruction) { JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat); @@ -350,15 +344,16 @@ void JIT::emit_op_not(Instruction* currentInstruction) void JIT::emit_op_jfalse(Instruction* currentInstruction) { unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target); - Jump isNonZero = emitJumpIfImmediateInteger(regT0); + GPRReg value = regT0; + GPRReg result = regT1; + GPRReg scratch = regT2; + bool shouldCheckMasqueradesAsUndefined = true; - addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target); - addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true))))); + emitGetVirtualRegister(currentInstruction[1].u.operand, value); + emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()); - isNonZero.link(this); + addJump(branchTest32(Zero, result), target); } void JIT::emit_op_jeq_null(Instruction* currentInstruction) @@ -370,8 +365,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump masqueradesGlobalObjectIsForeign = jump(); @@ -393,8 +388,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); + emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); @@ -414,179 +409,58 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) unsigned target = currentInstruction[3].u.operand; emitGetVirtualRegister(src, regT0); - addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target); + CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))); + store32(TrustedImm32(1), ¤tInstruction[4].u.operand); + addJump(jump(), target); + equal.link(this); } void JIT::emit_op_eq(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); - emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); compare32(Equal, regT1, regT0, regT0); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emit_op_jtrue(Instruction* currentInstruction) { unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))); - addJump(emitJumpIfImmediateInteger(regT0), target); - - addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target); - addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false))))); - isZero.link(this); + GPRReg value = regT0; + GPRReg result = regT1; + GPRReg scratch = regT2; + bool shouldCheckMasqueradesAsUndefined = true; + emitGetVirtualRegister(currentInstruction[1].u.operand, value); + emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()); + addJump(branchTest32(NonZero, result), target); } void JIT::emit_op_neq(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); - emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); compare32(NotEqual, regT1, regT0, regT0); - emitTagAsBoolImmediate(regT0); - - emitPutVirtualRegister(currentInstruction[1].u.operand); - -} + emitTagBool(regT0); -void JIT::emit_op_bitxor(Instruction* currentInstruction) -{ - emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); - emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - xor64(regT1, regT0); - emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); -} -void JIT::emit_op_bitor(Instruction* currentInstruction) -{ - emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); - emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); - or64(regT1, regT0); - emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emit_op_throw(Instruction* currentInstruction) { ASSERT(regT0 == returnValueGPR); + copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(); emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); callOperationNoExceptionCheck(operationThrow, regT0); jumpToExceptionHandler(); } -void JIT::emit_op_get_pnames(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int breakTarget = currentInstruction[5].u.operand; - - JumpList isNotObject; - - emitGetVirtualRegister(base, regT0); - if (!m_codeBlock->isKnownNotImmediate(base)) - isNotObject.append(emitJumpIfNotJSCell(regT0)); - if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) { - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - isNotObject.append(emitJumpIfNotObject(regT2)); - } - - // We could inline the case where you have a valid cache, but - // this call doesn't seem to be hot. - Label isObject(this); - callOperation(operationGetPNames, regT0); - emitStoreCell(dst, returnValueGPR); - load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); - store64(tagTypeNumberRegister, addressFor(i)); - store32(TrustedImm32(Int32Tag), intTagFor(size)); - store32(regT3, intPayloadFor(size)); - Jump end = jump(); - - isNotObject.link(this); - move(regT0, regT1); - and32(TrustedImm32(~TagBitUndefined), regT1); - addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget); - callOperation(operationToObject, base, regT0); - jump().linkTo(isObject, this); - - end.link(this); -} - -void JIT::emit_op_next_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int i = currentInstruction[3].u.operand; - int size = currentInstruction[4].u.operand; - int it = currentInstruction[5].u.operand; - int target = currentInstruction[6].u.operand; - - JumpList callHasProperty; - - Label begin(this); - load32(intPayloadFor(i), regT0); - Jump end = branch32(Equal, regT0, intPayloadFor(size)); - - // Grab key @ i - loadPtr(addressFor(it), regT1); - loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); - - load64(BaseIndex(regT2, regT0, TimesEight), regT2); - - emitPutVirtualRegister(dst, regT2); - - // Increment i - add32(TrustedImm32(1), regT0); - store32(regT0, intPayloadFor(i)); - - // Verify that i is valid: - emitGetVirtualRegister(base, regT0); - - // Test base's structure - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); - - // Test base's prototype chain - loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); - loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); - addJump(branchTestPtr(Zero, Address(regT3)), target); - - Label checkPrototype(this); - load64(Address(regT2, Structure::prototypeOffset()), regT2); - callHasProperty.append(emitJumpIfNotJSCell(regT2)); - loadPtr(Address(regT2, JSCell::structureOffset()), regT2); - callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); - addPtr(TrustedImm32(sizeof(Structure*)), regT3); - branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); - - // Continue loop. - addJump(jump(), target); - - // Slow case: Ask the object if i is valid. - callHasProperty.link(this); - emitGetVirtualRegister(dst, regT1); - callOperation(operationHasProperty, regT0, regT1); - - // Test for valid key. - addJump(branchTest32(NonZero, regT0), target); - jump().linkTo(begin, this); - - // End of loop. - end.link(this); -} - void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - callOperation(operationPushWithScope, regT0); -} - -void JIT::emit_op_pop_scope(Instruction*) -{ - callOperation(operationPopScope); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope); + slowPathCall.call(); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) @@ -604,18 +478,18 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy // Jump slow if either is a double. First test if it's an integer, which is fine, and then test // if it's a double. - Jump leftOK = emitJumpIfImmediateInteger(regT0); - addSlowCase(emitJumpIfImmediateNumber(regT0)); + Jump leftOK = emitJumpIfInt(regT0); + addSlowCase(emitJumpIfNumber(regT0)); leftOK.link(this); - Jump rightOK = emitJumpIfImmediateInteger(regT1); - addSlowCase(emitJumpIfImmediateNumber(regT1)); + Jump rightOK = emitJumpIfInt(regT1); + addSlowCase(emitJumpIfNumber(regT1)); rightOK.link(this); if (type == OpStrictEq) compare64(Equal, regT1, regT0, regT0); else compare64(NotEqual, regT1, regT0, regT0); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } @@ -632,27 +506,70 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction) void JIT::emit_op_to_number(Instruction* currentInstruction) { + int dstVReg = currentInstruction[1].u.operand; int srcVReg = currentInstruction[2].u.operand; emitGetVirtualRegister(srcVReg, regT0); - addSlowCase(emitJumpIfNotImmediateNumber(regT0)); + addSlowCase(emitJumpIfNotNumber(regT0)); - emitPutVirtualRegister(currentInstruction[1].u.operand); + emitValueProfilingSite(); + if (srcVReg != dstVReg) + emitPutVirtualRegister(dstVReg); } -void JIT::emit_op_push_name_scope(Instruction* currentInstruction) +void JIT::emit_op_to_string(Instruction* currentInstruction) { - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand); + int srcVReg = currentInstruction[2].u.operand; + emitGetVirtualRegister(srcVReg, regT0); + + addSlowCase(emitJumpIfNotJSCell(regT0)); + addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + + emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emit_op_catch(Instruction* currentInstruction) { + restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(); + + move(TrustedImmPtr(m_vm), regT3); + load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister); + storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset())); + + addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); + + callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler); + Jump isCatchableException = branchTest32(Zero, returnValueGPR); + jumpToExceptionHandler(); + isCatchableException.link(this); + move(TrustedImmPtr(m_vm), regT3); - load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); load64(Address(regT3, VM::exceptionOffset()), regT0); store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); emitPutVirtualRegister(currentInstruction[1].u.operand); + + load64(Address(regT0, Exception::valueOffset()), regT0); + emitPutVirtualRegister(currentInstruction[2].u.operand); +} + +void JIT::emit_op_assert(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert); + slowPathCall.call(); +} + +void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment); + slowPathCall.call(); +} + +void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) +{ + int currentScope = currentInstruction[2].u.operand; + emitGetVirtualRegister(currentScope, regT0); + loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); + emitStoreCell(currentInstruction[1].u.operand, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) @@ -664,7 +581,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex); @@ -680,7 +597,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + jumpTable->ensureCTITable(); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex); @@ -702,12 +619,6 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) jump(returnValueGPR); } -void JIT::emit_op_throw_static_error(Instruction* currentInstruction) -{ - move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0); - callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand); -} - void JIT::emit_op_debug(Instruction* currentInstruction) { load32(codeBlock()->debuggerRequestsAddress(), regT0); @@ -724,12 +635,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(Equal, regT0, regT2, regT0); @@ -743,7 +654,7 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) wasNotImmediate.link(this); wasNotMasqueradesAsUndefined.link(this); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } @@ -756,12 +667,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(1), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); + emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(NotEqual, regT0, regT2, regT0); @@ -775,53 +686,30 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) wasNotImmediate.link(this); wasNotMasqueradesAsUndefined.link(this); - emitTagAsBoolImmediate(regT0); + emitTagBool(regT0); emitPutVirtualRegister(dst); } -void JIT::emit_op_enter(Instruction* currentInstruction) +void JIT::emit_op_enter(Instruction*) { - emitEnterOptimizationCheck(); - // Even though CTI doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. size_t count = m_codeBlock->m_numVars; - for (size_t j = 0; j < count; ++j) + for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j) emitInitRegister(virtualRegisterForLocal(j).offset()); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); - slowPathCall.call(); -} - -void JIT::emit_op_create_activation(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - - Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); - callOperation(operationCreateActivation, 0); - emitStoreCell(dst, returnValueGPR); - activationCreated.link(this); -} - -void JIT::emit_op_create_arguments(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; + emitWriteBarrier(m_codeBlock); - Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); - - callOperation(operationCreateArguments); - emitStoreCell(dst, returnValueGPR); - emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR); - - argsCreated.link(this); + emitEnterOptimizationCheck(); } -void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +void JIT::emit_op_get_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - - store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst)); + emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); + emitStoreCell(dst, regT0); } void JIT::emit_op_to_this(Instruction* currentInstruction) @@ -830,74 +718,73 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); emitJumpSlowCaseIfNotJSCell(regT1); - loadPtr(Address(regT1, JSCell::structureOffset()), regT0); - addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); + addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); loadPtr(cachedStructure, regT2); - addSlowCase(branchPtr(NotEqual, regT0, regT2)); -} - -void JIT::emit_op_get_callee(Instruction* currentInstruction) -{ - int result = currentInstruction[1].u.operand; - WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[2].u.jsCell; - emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - - loadPtr(cachedFunction, regT2); - addSlowCase(branchPtr(NotEqual, regT0, regT2)); - - emitPutVirtualRegister(result); -} - -void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee); - slowPathCall.call(); + addSlowCase(branchTestPtr(Zero, regT2)); + load32(Address(regT2, Structure::structureIDOffset()), regT2); + addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); } void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; + WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; + RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; + RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitGetVirtualRegister(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + addSlowCase(branch8(NotEqual, Address(calleeReg, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType))); + loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); + addSlowCase(branchTestPtr(Zero, rareDataReg)); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); - emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); + loadPtr(cachedFunction, cachedFunctionReg); + Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); + addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); + hasSeenMultipleCallees.link(this); + + JumpList slowCases; + emitAllocateJSObject(resultReg, nullptr, allocatorReg, structureReg, TrustedImmPtr(0), scratchReg, slowCases); + emitGetVirtualRegister(callee, scratchReg); + loadPtr(Address(scratchReg, JSFunction::offsetOfRareData()), scratchReg); + load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg); + emitInitializeInlineStorage(resultReg, scratchReg); + addSlowCase(slowCases); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { + linkSlowCase(iter); // Callee::m_type != JSFunctionType. + linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile - linkSlowCase(iter); // allocation failed + linkSlowCase(iter); // allocation failed (no allocator) + linkSlowCase(iter); // allocation failed (allocator empty) + linkSlowCase(iter); // cached function didn't match JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); slowPathCall.call(); } -void JIT::emit_op_profile_will_call(Instruction* currentInstruction) +void JIT::emit_op_check_tdz(Instruction* currentInstruction) { - Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress())); emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - callOperation(operationProfileWillCall, regT0); - profilerDone.link(this); + addSlowCase(branchTest64(Zero, regT0)); } -void JIT::emit_op_profile_did_call(Instruction* currentInstruction) +void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress())); - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - callOperation(operationProfileDidCall, regT0); - profilerDone.link(this); + linkSlowCase(iter); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); + slowPathCall.call(); } @@ -908,6 +795,7 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); + linkSlowCase(iter); JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this); slowPathCall.call(); @@ -929,39 +817,11 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry> slowPathCall.call(); } -void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - callOperation(operationConvertJSValueToBoolean, regT0); - emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), currentInstruction[2].u.operand); // inverted! -} - -void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - callOperation(operationConvertJSValueToBoolean, regT0); - emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand); -} - -void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor); - slowPathCall.call(); -} - -void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor); - slowPathCall.call(); -} - void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); callOperation(operationCompareEq, regT0, regT1); - emitTagAsBoolImmediate(returnValueGPR); + emitTagBool(returnValueGPR); emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); } @@ -970,7 +830,7 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry> linkSlowCase(iter); callOperation(operationCompareEq, regT0, regT1); xor32(TrustedImm32(0x1), regT0); - emitTagAsBoolImmediate(returnValueGPR); + emitTagBool(returnValueGPR); emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); } @@ -992,33 +852,35 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase slowPathCall.call(); } -void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; int value = currentInstruction[2].u.operand; - int baseVal = currentInstruction[3].u.operand; + int proto = currentInstruction[3].u.operand; - linkSlowCaseIfNotJSCell(iter, baseVal); + linkSlowCaseIfNotJSCell(iter, value); + linkSlowCaseIfNotJSCell(iter, proto); + linkSlowCase(iter); linkSlowCase(iter); emitGetVirtualRegister(value, regT0); - emitGetVirtualRegister(baseVal, regT1); - callOperation(operationCheckHasInstance, dst, regT0, regT1); - - emitJumpSlowToHot(jump(), currentInstruction[4].u.operand); + emitGetVirtualRegister(proto, regT1); + callOperation(operationInstanceOf, dst, regT0, regT1); } -void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; int value = currentInstruction[2].u.operand; - int proto = currentInstruction[3].u.operand; + int constructor = currentInstruction[3].u.operand; + int hasInstanceValue = currentInstruction[4].u.operand; - linkSlowCaseIfNotJSCell(iter, value); - linkSlowCaseIfNotJSCell(iter, proto); linkSlowCase(iter); emitGetVirtualRegister(value, regT0); - emitGetVirtualRegister(proto, regT1); - callOperation(operationInstanceOf, dst, regT0, regT1); + emitGetVirtualRegister(constructor, regT1); + emitGetVirtualRegister(hasInstanceValue, regT2); + callOperation(operationInstanceOfCustom, regT0, regT1, regT2); + emitTagBool(returnValueGPR); + emitPutVirtualRegister(dst, returnValueGPR); } void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -1029,125 +891,69 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase slowPathCall.call(); } -void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; - addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); - emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); - sub32(TrustedImm32(1), regT0); - emitFastArithReTagImmediate(regT0, regT0); - emitPutVirtualRegister(dst, regT0); -} - -void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - callOperation(operationGetArgumentsLength, dst, base); -} - -void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - int dst = currentInstruction[1].u.operand; - int argumentsRegister = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); - emitGetVirtualRegister(property, regT1); - addSlowCase(emitJumpIfNotImmediateInteger(regT1)); - add32(TrustedImm32(1), regT1); - // regT1 now contains the integer index of the argument we want, including this - emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2); - addSlowCase(branch32(AboveOrEqual, regT1, regT2)); - - signExtend32ToPtr(regT1, regT1); - load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); - emitValueProfilingSite(); - emitPutVirtualRegister(dst, regT0); -} + linkSlowCase(iter); // Not JSCell. + linkSlowCase(iter); // Not JSString. -void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - int arguments = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - - linkSlowCase(iter); - Jump skipArgumentsCreation = jump(); - - linkSlowCase(iter); - linkSlowCase(iter); - callOperation(operationCreateArguments); - emitStoreCell(arguments, returnValueGPR); - emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR); - - skipArgumentsCreation.link(this); - emitGetVirtualRegister(arguments, regT0); - emitGetVirtualRegister(property, regT1); - callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); + slowPathCall.call(); } #endif // USE(JSVALUE64) -void JIT::emit_op_touch_entry(Instruction* currentInstruction) -{ - if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated()) - return; - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry); - slowPathCall.call(); -} - void JIT::emit_op_loop_hint(Instruction*) { // Emit the JIT optimization check: if (canBeOptimized()) { - if (Options::enableOSREntryInLoops()) { - addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), - AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); - } else { - // Add with saturation. - move(TrustedImmPtr(m_codeBlock->addressOfJITExecuteCounter()), regT3); - load32(regT3, regT2); - Jump dontAdd = branch32( - GreaterThan, regT2, - TrustedImm32(std::numeric_limits<int32_t>::max() - Options::executionCounterIncrementForLoop())); - add32(TrustedImm32(Options::executionCounterIncrementForLoop()), regT2); - store32(regT2, regT3); - dontAdd.link(this); - } + addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), + AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); } - - // Emit the watchdog timer check: - if (m_vm->watchdog.isEnabled()) - addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress()))); } void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter) { #if ENABLE(DFG_JIT) // Emit the slow path for the JIT optimization check: - if (canBeOptimized() && Options::enableOSREntryInLoops()) { + if (canBeOptimized()) { linkSlowCase(iter); - + + copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(); + callOperation(operationOptimize, m_bytecodeOffset); Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR); + if (!ASSERT_DISABLED) { + Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); + abortWithReason(JITUnreasonableLoopHintJumpTarget); + ok.link(this); + } jump(returnValueGPR); noOptimizedEntry.link(this); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint)); } +#else + UNUSED_PARAM(iter); #endif +} - // Emit the slow path of the watchdog timer check: - if (m_vm->watchdog.isEnabled()) { - linkSlowCase(iter); - callOperation(operationHandleWatchdogTimer); +void JIT::emit_op_throw_static_error(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_static_error); + slowPathCall.call(); +} - emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint)); - } +void JIT::emit_op_watchdog(Instruction*) +{ + ASSERT(m_vm->watchdog()); + addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog()->timerDidFireAddress()))); +} +void JIT::emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator& iter) +{ + ASSERT(m_vm->watchdog()); + linkSlowCase(iter); + callOperation(operationHandleWatchdogTimer); } void JIT::emit_op_new_regexp(Instruction* currentInstruction) @@ -1155,36 +961,88 @@ void JIT::emit_op_new_regexp(Instruction* currentInstruction) callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand)); } -void JIT::emit_op_new_func(Instruction* currentInstruction) +void JIT::emitNewFuncCommon(Instruction* currentInstruction) { Jump lazyJump; int dst = currentInstruction[1].u.operand; - if (currentInstruction[3].u.operand) { -#if USE(JSVALUE32_64) - lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); + +#if USE(JSVALUE64) + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); #else - lazyJump = branchTest64(NonZero, addressFor(dst)); + emitLoadPayload(currentInstruction[2].u.operand, regT0); #endif + FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand); + + OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode); + if (opcodeID == op_new_func) + callOperation(operationNewFunction, dst, regT0, funcExec); + else if (opcodeID == op_new_generator_func) + callOperation(operationNewGeneratorFunction, dst, regT0, funcExec); + else { + ASSERT(opcodeID == op_new_async_func); + callOperation(operationNewAsyncFunction, dst, regT0, funcExec); } +} - FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand); - callOperation(operationNewFunction, dst, funcExec); +void JIT::emit_op_new_func(Instruction* currentInstruction) +{ + emitNewFuncCommon(currentInstruction); +} - if (currentInstruction[3].u.operand) - lazyJump.link(this); +void JIT::emit_op_new_generator_func(Instruction* currentInstruction) +{ + emitNewFuncCommon(currentInstruction); } -void JIT::emit_op_new_captured_func(Instruction* currentInstruction) +void JIT::emit_op_new_async_func(Instruction* currentInstruction) { - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func); - slowPathCall.call(); + emitNewFuncCommon(currentInstruction); } -void JIT::emit_op_new_func_exp(Instruction* currentInstruction) +void JIT::emitNewFuncExprCommon(Instruction* currentInstruction) { + Jump notUndefinedScope; int dst = currentInstruction[1].u.operand; - FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand); - callOperation(operationNewFunction, dst, funcExpr); +#if USE(JSVALUE64) + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined()))); + store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst)); +#else + emitLoadPayload(currentInstruction[2].u.operand, regT0); + notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag)); + emitStore(dst, jsUndefined()); +#endif + Jump done = jump(); + notUndefinedScope.link(this); + + FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand); + OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode); + + if (opcodeID == op_new_func_exp) + callOperation(operationNewFunction, dst, regT0, function); + else if (opcodeID == op_new_generator_func_exp) + callOperation(operationNewGeneratorFunction, dst, regT0, function); + else { + ASSERT(opcodeID == op_new_async_func_exp); + callOperation(operationNewAsyncFunction, dst, regT0, function); + } + + done.link(this); +} + +void JIT::emit_op_new_func_exp(Instruction* currentInstruction) +{ + emitNewFuncExprCommon(currentInstruction); +} + +void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction) +{ + emitNewFuncExprCommon(currentInstruction); +} + +void JIT::emit_op_new_async_func_exp(Instruction* currentInstruction) +{ + emitNewFuncExprCommon(currentInstruction); } void JIT::emit_op_new_array(Instruction* currentInstruction) @@ -1221,16 +1079,445 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size); } -void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emit_op_new_array_with_spread(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_array_with_spread); + slowPathCall.call(); +} + +void JIT::emit_op_spread(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_spread); + slowPathCall.call(); +} + +#if USE(JSVALUE64) +void JIT::emit_op_has_structure_property(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int enumerator = currentInstruction[4].u.operand; + + emitGetVirtualRegister(base, regT0); + emitGetVirtualRegister(enumerator, regT1); + emitJumpSlowCaseIfNotJSCell(regT0, base); + + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + emitPutVirtualRegister(dst); +} + +void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) +{ + Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; + + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + Jump done = jump(); + + LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + + patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + + patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); + + byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( + m_codeBlock, patchBuffer, + ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); + + MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); + MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric)); +} + +void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + ByValInfo* byValInfo = m_codeBlock->addByValInfo(); + + emitGetVirtualRegisters(base, regT0, property, regT1); + + // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. + // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if + // number was signed since m_vectorLength is always less than intmax (since the total allocation + // size is always less than 4Gb). As such zero extending will have been correct (and extending the value + // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign + // extending since it makes it easier to re-tag the value in the slow case. + zeroExtend32ToPtr(regT1, regT1); + + emitJumpSlowCaseIfNotJSCell(regT0, base); + emitArrayProfilingSiteWithCell(regT0, regT2, profile); + and32(TrustedImm32(IndexingShapeMask), regT2); + + JITArrayMode mode = chooseArrayMode(profile); + PatchableJump badType; + + // FIXME: Add support for other types like TypedArrays and Arguments. + // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. + JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); + + move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); + + addSlowCase(badType); + addSlowCase(slowCases); + + Label done = label(); + + emitPutVirtualRegister(dst); + + Label nextHotPath = label(); + + m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); +} + +void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; + + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base array check + linkSlowCase(iter); // vector length check + linkSlowCase(iter); // empty value + + Label slowPath = label(); + + emitGetVirtualRegister(base, regT0); + emitGetVirtualRegister(property, regT1); + Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo); + + m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; + m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; + m_byValInstructionIndex++; +} + +void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int index = currentInstruction[4].u.operand; + int enumerator = currentInstruction[5].u.operand; + + // Check that base is a cell + emitGetVirtualRegister(base, regT0); + emitJumpSlowCaseIfNotJSCell(regT0, base); + + // Check the structure + emitGetVirtualRegister(enumerator, regT2); + load32(Address(regT0, JSCell::structureIDOffset()), regT1); + addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + + // Compute the offset + emitGetVirtualRegister(index, regT1); + // If index is less than the enumerator's cached inline storage, then it's an inline access + Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); + addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); + signExtend32ToPtr(regT1, regT1); + load64(BaseIndex(regT0, regT1, TimesEight), regT0); + + Jump done = jump(); + + // Otherwise it's out of line + outOfLineAccess.link(this); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); + sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1); + neg32(regT1); + signExtend32ToPtr(regT1, regT1); + int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); + + done.link(this); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet; - if (!set || set->state() == IsInvalidated) - return; + int base = currentInstruction[2].u.operand; + linkSlowCaseIfNotJSCell(iter, base); linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); slowPathCall.call(); } +void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; + + emitGetVirtualRegister(index, regT0); + emitGetVirtualRegister(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); + + move(TrustedImm64(JSValue::encode(jsNull())), regT0); + + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + signExtend32ToPtr(regT0, regT0); + load64(BaseIndex(regT1, regT0, TimesEight), regT0); + + done.link(this); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int enumerator = currentInstruction[2].u.operand; + int index = currentInstruction[3].u.operand; + + emitGetVirtualRegister(index, regT0); + emitGetVirtualRegister(enumerator, regT1); + Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + + move(TrustedImm64(JSValue::encode(jsNull())), regT0); + + Jump done = jump(); + inBounds.link(this); + + loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); + signExtend32ToPtr(regT0, regT0); + load64(BaseIndex(regT1, regT0, TimesEight), regT0); + + done.link(this); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_profile_type(Instruction* currentInstruction) +{ + TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; + int valueToProfile = currentInstruction[1].u.operand; + + emitGetVirtualRegister(valueToProfile, regT0); + + JumpList jumpToEnd; + + jumpToEnd.append(branchTest64(Zero, regT0)); + + // Compile in a predictive type check, if possible, to see if we can skip writing to the log. + // These typechecks are inlined to match those of the 64-bit JSValue type checks. + if (cachedTypeLocation->m_lastSeenType == TypeUndefined) + jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined())))); + else if (cachedTypeLocation->m_lastSeenType == TypeNull) + jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull())))); + else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) { + move(regT0, regT1); + and64(TrustedImm32(~1), regT1); + jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse))); + } else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt) + jumpToEnd.append(emitJumpIfInt(regT0)); + else if (cachedTypeLocation->m_lastSeenType == TypeNumber) + jumpToEnd.append(emitJumpIfNumber(regT0)); + else if (cachedTypeLocation->m_lastSeenType == TypeString) { + Jump isNotCell = emitJumpIfNotJSCell(regT0); + jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + isNotCell.link(this); + } + + // Load the type profiling log into T2. + TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); + move(TrustedImmPtr(cachedTypeProfilerLog), regT2); + // Load the next log entry into T1. + loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); + + // Store the JSValue onto the log entry. + store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset())); + + // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry. + Jump notCell = emitJumpIfNotJSCell(regT0); + load32(Address(regT0, JSCell::structureIDOffset()), regT0); + store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + Jump skipIsCell = jump(); + notCell.link(this); + store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); + skipIsCell.link(this); + + // Store the typeLocation on the log entry. + move(TrustedImmPtr(cachedTypeLocation), regT0); + store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); + + // Increment the current log entry. + addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); + store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); + Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); + // Clear the log if we're at the end of the log. + callOperation(operationProcessTypeProfilerLog); + skipClearLog.link(this); + + jumpToEnd.link(this); +} + +void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction) +{ + updateTopCallFrame(); + static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); + GPRReg shadowPacketReg = regT0; + GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. + GPRReg scratch2Reg = regT2; + ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT3); + logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); +} + +void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction) +{ + updateTopCallFrame(); + static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); + GPRReg shadowPacketReg = regT0; + GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. + GPRReg scratch2Reg = regT2; + ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT2); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT3); + logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset)); +} + +#endif // USE(JSVALUE64) + +void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length); + slowPathCall.call(); +} + +void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property); + slowPathCall.call(); +} + +void JIT::emit_op_has_generic_property(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property); + slowPathCall.call(); +} + +void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator); + slowPathCall.call(); +} + +void JIT::emit_op_to_index_string(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string); + slowPathCall.call(); +} + +void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) +{ + BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; +#if USE(JSVALUE64) + basicBlockLocation->emitExecuteCode(*this); +#else + basicBlockLocation->emitExecuteCode(*this, regT0); +#endif +} + +void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments); + slowPathCall.call(); +} + +void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments); + slowPathCall.call(); +} + +void JIT::emit_op_create_cloned_arguments(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_cloned_arguments); + slowPathCall.call(); +} + +void JIT::emit_op_argument_count(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + load32(payloadFor(CallFrameSlot::argumentCount), regT0); + sub32(TrustedImm32(1), regT0); + JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1); + boxInt32(regT0, result); + emitPutVirtualRegister(dst, result); +} + +void JIT::emit_op_create_rest(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_rest); + slowPathCall.call(); +} + +void JIT::emit_op_get_rest_length(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; + load32(payloadFor(CallFrameSlot::argumentCount), regT0); + sub32(TrustedImm32(1), regT0); + Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip)); + sub32(Imm32(numParamsToSkip), regT0); +#if USE(JSVALUE64) + boxInt32(regT0, JSValueRegs(regT0)); +#endif + Jump done = jump(); + + zeroLength.link(this); +#if USE(JSVALUE64) + move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0); +#else + move(TrustedImm32(0), regT0); +#endif + + done.link(this); +#if USE(JSVALUE64) + emitPutVirtualRegister(dst, regT0); +#else + move(TrustedImm32(JSValue::Int32Tag), regT1); + emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0)); +#endif +} + +void JIT::emit_op_get_argument(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int index = currentInstruction[2].u.operand; +#if USE(JSVALUE64) + JSValueRegs resultRegs(regT0); +#else + JSValueRegs resultRegs(regT1, regT0); +#endif + + load32(payloadFor(CallFrameSlot::argumentCount), regT2); + Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index)); + loadValue(addressFor(CallFrameSlot::thisArgument + index), resultRegs); + Jump done = jump(); + + argumentOutOfBounds.link(this); + moveValue(jsUndefined(), resultRegs); + + done.link(this); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, resultRegs); +} + } // namespace JSC #endif // ENABLE(JIT) |