summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/Repatch.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/jit/Repatch.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/jit/Repatch.cpp')
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp2095
1 files changed, 748 insertions, 1347 deletions
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index 9c31722e8..bab11b696 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,55 +28,60 @@
#if ENABLE(JIT)
+#include "BinarySwitch.h"
#include "CCallHelpers.h"
-#include "CallFrameInlines.h"
+#include "CallFrameShuffler.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
+#include "DOMJITGetterSetter.h"
+#include "DirectArguments.h"
#include "FTLThunks.h"
+#include "FunctionCodeBlock.h"
#include "GCAwareJITStubRoutine.h"
+#include "GetterSetter.h"
+#include "GetterSetterAccessCase.h"
+#include "ICStats.h"
+#include "InlineAccess.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JIT.h"
+#include "JITInlines.h"
+#include "JSCInlines.h"
+#include "JSModuleNamespaceObject.h"
+#include "JSWebAssembly.h"
#include "LinkBuffer.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
-#include "RepatchBuffer.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "ScopedArguments.h"
#include "ScratchRegisterAllocator.h"
#include "StackAlignment.h"
#include "StructureRareDataInlines.h"
#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
#include "ThunkGenerators.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
-// Beware: in this code, it is not safe to assume anything about the following registers
-// that would ordinarily have well-known values:
-// - tagTypeNumberRegister
-// - tagMaskRegister
-// - callFrameRegister **
-//
-// We currently only use the callFrameRegister for closure call patching, and we're not going to
-// give the FTL closure call patching support until we switch to the C stack - but when we do that,
-// callFrameRegister will disappear.
-
-static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call)
+static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
{
FunctionPtr result = MacroAssembler::readCallTarget(call);
#if ENABLE(FTL_JIT)
- CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
MacroAssemblerCodePtr::createFromExecutableAddress(
result.executableAddress())).callTarget());
}
#else
- UNUSED_PARAM(repatchBuffer);
+ UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
return result;
}
-static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, FunctionPtr newCalleeFunction)
+void ftlThunkAwareRepatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
#if ENABLE(FTL_JIT)
- CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
VM& vm = *codeBlock->vm();
FTL::Thunks& thunks = *vm.ftlThunks;
@@ -87,655 +92,247 @@ static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, Fun
newCalleeFunction = FunctionPtr(
thunks.getSlowPathCallThunk(vm, key).code().executableAddress());
}
+#else // ENABLE(FTL_JIT)
+ UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
- repatchBuffer.relink(call, newCalleeFunction);
+ MacroAssembler::repatchCall(call, newCalleeFunction);
}
-static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchCall(repatchBuffer, call, newCalleeFunction);
-}
+enum InlineCacheAction {
+ GiveUpOnCache,
+ RetryCacheLater,
+ AttemptToCache
+};
-static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset,
- const FunctionPtr &slowPathFunction, bool compact)
+static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
{
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
-
- RepatchBuffer repatchBuffer(codeBlock);
+ Structure* structure = cell->structure(vm);
- // Only optimize once!
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction);
+ TypeInfo typeInfo = structure->typeInfo();
+ if (typeInfo.prohibitsPropertyCaching())
+ return GiveUpOnCache;
- // Patch the structure check & the offset of the load.
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset));
-#if USE(JSVALUE64)
- if (compact)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
- else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
-#elif USE(JSVALUE32_64)
- if (compact) {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- } else {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- }
-#endif
-}
-
-static void addStructureTransitionCheck(
- JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
-{
- if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
- structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock));
-#if !ASSERT_DISABLED
- // If we execute this code, the object must have the structure we expect. Assert
- // this in debug modes.
- jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
- MacroAssembler::Jump ok = jit.branchPtr(
- MacroAssembler::Equal,
- MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure));
- jit.breakpoint();
- ok.link(&jit);
-#endif
- return;
+ if (structure->isUncacheableDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ // Flattening could have changed the offset, so return early for another try.
+ asObject(cell)->flattenDictionaryObject(vm);
+ return RetryCacheLater;
}
- jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
- failureCases.append(
- jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure)));
-}
-
-static void addStructureTransitionCheck(
- JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
-{
- if (prototype.isNull())
- return;
-
- ASSERT(prototype.isCell());
-
- addStructureTransitionCheck(
- prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit,
- failureCases, scratchGPR);
-}
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
-static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
-{
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(
- stubInfo.callReturnLocation.dataLabelPtrAtOffset(
- -(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
- CodeLocationLabel(target));
- return;
- }
-
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(target));
+ return AttemptToCache;
}
-static void emitRestoreScratch(MacroAssembler& stubJit, bool needToRestoreScratch, GPRReg scratchGPR, MacroAssembler::Jump& success, MacroAssembler::Jump& fail, MacroAssembler::JumpList failureCases)
+static bool forceICFailure(ExecState*)
{
- if (needToRestoreScratch) {
- stubJit.popToRestore(scratchGPR);
-
- success = stubJit.jump();
-
- // link failure cases here, so we can pop scratchGPR, and then jump back.
- failureCases.link(&stubJit);
-
- stubJit.popToRestore(scratchGPR);
-
- fail = stubJit.jump();
- return;
- }
-
- success = stubJit.jump();
+#if CPU(ARM_TRADITIONAL)
+ // FIXME: Remove this workaround once the proper fixes are landed.
+ // [ARM] Disable Inline Caching on ARMv7 traditional until proper fix
+ // https://bugs.webkit.org/show_bug.cgi?id=159759
+ return true;
+#else
+ return Options::forceICFailure();
+#endif
}
-static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases, CodeLocationLabel successLabel, CodeLocationLabel slowCaseBegin)
+inline J_JITOperation_ESsiJI appropriateOptimizingGetByIdFunction(GetByIDKind kind)
{
- patchBuffer.link(success, successLabel);
-
- if (needToRestoreScratch) {
- patchBuffer.link(fail, slowCaseBegin);
- return;
- }
-
- // link failure cases directly back to normal path
- patchBuffer.link(failureCases, slowCaseBegin);
+ if (kind == GetByIDKind::Normal)
+ return operationGetByIdOptimize;
+ return operationTryGetByIdOptimize;
}
-static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
+inline J_JITOperation_ESsiJI appropriateGenericGetByIdFunction(GetByIDKind kind)
{
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ if (kind == GetByIDKind::Normal)
+ return operationGetById;
+ return operationTryGetById;
}
-enum ProtoChainGenerationResult {
- ProtoChainGenerationFailed,
- ProtoChainGenerationSucceeded
-};
-
-static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN;
-static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine)
+static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- VM* vm = &exec->vm();
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
- bool needToRestoreScratch = scratchGPR == InvalidGPRReg;
- if (needToRestoreScratch && !slot.isCacheableValue())
- return ProtoChainGenerationFailed;
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
- CCallHelpers stubJit(&exec->vm(), exec->codeBlock());
- if (needToRestoreScratch) {
-#if USE(JSVALUE64)
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
-#else
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
-#endif
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
- }
-
- MacroAssembler::JumpList failureCases;
-
- failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
CodeBlock* codeBlock = exec->codeBlock();
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(exec));
- Structure* protoStructure = protoObject->structure();
- if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
- addStructureTransitionCheck(
- protoObject, protoStructure, codeBlock, stubInfo, stubJit,
- failureCases, scratchGPR);
- currStructure = it->get();
- }
-
- bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom();
- if (isAccessor)
- stubJit.move(baseGPR, scratchGPR);
+ VM& vm = exec->vm();
- if (!slot.isCacheableCustom()) {
- if (isInlineOffset(offset)) {
-#if USE(JSVALUE64)
- stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
- } else {
- stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
- }
- }
- MacroAssembler::Call operationCall;
- MacroAssembler::Call handlerCall;
- FunctionPtr operationFunction;
- MacroAssembler::Jump success, fail;
- if (isAccessor) {
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- if (slot.isCacheableGetter()) {
- stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR);
- operationFunction = operationCallGetter;
- } else {
- stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
- stubJit.setupArguments(callFrameRegister, scratchGPR,
- MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
- MacroAssembler::TrustedImmPtr(propertyName.impl()));
- operationFunction = operationCallCustomGetter;
- }
+ std::unique_ptr<AccessCase> newCase;
- // Need to make sure that whenever this call is made in the future, we remember the
- // place that we made it from. It just so happens to be the place that we are at
- // right now!
- stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+ if (propertyName == vm.propertyNames->length) {
+ if (isJSArray(baseValue)) {
+ if (stubInfo.cacheType == CacheType::Unset
+ && slot.slotBase() == baseValue
+ && InlineAccess::isCacheableArrayLength(stubInfo, jsCast<JSArray*>(baseValue))) {
- operationCall = stubJit.call();
-#if USE(JSVALUE64)
- stubJit.move(GPRInfo::returnValueGPR, resultGPR);
-#else
- stubJit.setupResults(resultGPR, resultTagGPR);
-#endif
- MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
-
- stubJit.setupArgumentsExecState();
- handlerCall = stubJit.call();
- stubJit.jumpToExceptionHandler();
-
- noException.link(&stubJit);
- }
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
- if (isAccessor) {
- patchBuffer.link(operationCall, operationFunction);
- patchBuffer.link(handlerCall, lookupExceptionHandler);
- }
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG prototype chain access stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), successLabel.executableAddress()));
- return ProtoChainGenerationSucceeded;
-}
-
-static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- // FIXME: Write a test that proves we need to check for recursion here just
- // like the interpreter does, then add a check for recursion.
+ bool generatedCodeInline = InlineAccess::generateArrayLength(*codeBlock->vm(), stubInfo, jsCast<JSArray*>(baseValue));
+ if (generatedCodeInline) {
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ stubInfo.initArrayLength();
+ return RetryCacheLater;
+ }
+ }
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
- bool needToRestoreScratch = false;
-
- MacroAssembler stubJit;
-
- if (scratchGPR == InvalidGPRReg) {
-#if USE(JSVALUE64)
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
-#else
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
-#endif
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::ArrayLength);
+ } else if (isJSString(baseValue))
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::StringLength);
+ else if (DirectArguments* arguments = jsDynamicCast<DirectArguments*>(vm, baseValue)) {
+ // If there were overrides, then we can handle this as a normal property load! Guarding
+ // this with such a check enables us to add an IC case for that load if needed.
+ if (!arguments->overrodeThings())
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::DirectArgumentsLength);
+ } else if (ScopedArguments* arguments = jsDynamicCast<ScopedArguments*>(vm, baseValue)) {
+ // Ditto.
+ if (!arguments->overrodeThings())
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::ScopedArgumentsLength);
}
-
- MacroAssembler::JumpList failureCases;
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR);
- stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR);
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
- failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
-
- stubJit.move(scratchGPR, resultGPR);
-#if USE(JSVALUE64)
- stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
-#endif
-
- MacroAssembler::Jump success, fail;
-
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
-
- stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG GetById array length stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress()));
-
- RepatchBuffer repatchBuffer(codeBlock);
- replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
-
- return true;
}
-
- // FIXME: should support length access for String.
-
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.slotBase() == baseValue) {
- if (!slot.isCacheableValue()
- || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
- repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
- return true;
- }
- repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true);
- stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure);
- return true;
+ if (!propertyName.isSymbol() && isJSModuleNamespaceObject(baseValue) && !slot.isUnset()) {
+ if (auto moduleNamespaceSlot = slot.moduleNamespaceSlot())
+ newCase = ModuleNamespaceAccessCase::create(vm, codeBlock, jsCast<JSModuleNamespaceObject*>(baseValue), moduleNamespaceSlot->environment, ScopeOffset(moduleNamespaceSlot->scopeOffset));
}
- if (structure->isDictionary())
- return false;
-
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PropertyOffset offset = slot.cachedOffset();
- size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset);
- if (count == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
- if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset,
- structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed)
- return false;
-
- RepatchBuffer repatchBuffer(codeBlock);
- replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList);
-
- stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue());
- return true;
-}
+ if (!newCase) {
+ if (!slot.isCacheable() && !slot.isUnset())
+ return GiveUpOnCache;
-void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
-}
+ ObjectPropertyConditionSet conditionSet;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure(vm);
-static bool getPolymorphicStructureList(
- VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex,
- CodeLocationLabel& slowCase)
-{
- slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
-
- if (stubInfo.accessType == access_unset) {
- RELEASE_ASSERT(!stubInfo.stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList();
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false);
- listIndex = 0;
- } else if (stubInfo.accessType == access_get_by_id_self) {
- RELEASE_ASSERT(!stubInfo.stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true);
- listIndex = 1;
- } else if (stubInfo.accessType == access_get_by_id_chain) {
- RELEASE_ASSERT(!!stubInfo.stubRoutine);
- slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code());
- polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
- stubInfo.stubRoutine.clear();
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false);
- listIndex = 1;
- } else {
- RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
- polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
- listIndex = stubInfo.u.getByIdSelfList.listSize;
- slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
- }
-
- if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
- return false;
-
- RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
- return true;
-}
+ bool loadTargetFromProxy = false;
+ if (baseCell->type() == PureForwardingProxyType) {
+ baseValue = jsCast<JSProxy*>(baseCell)->target();
+ baseCell = baseValue.asCell();
+ structure = baseCell->structure(vm);
+ loadTargetFromProxy = true;
+ }
-static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine)
-{
- RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
- RepatchBuffer repatchBuffer(codeBlock);
- if (stubInfo.u.getByIdSelfList.didSelfPatching) {
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(stubRoutine->code().code()));
- return;
- }
-
- replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code());
-}
+ InlineCacheAction action = actionForCell(vm, baseCell);
+ if (action != AttemptToCache)
+ return action;
+
+ // Optimize self access.
+ if (stubInfo.cacheType == CacheType::Unset
+ && slot.isCacheableValue()
+ && slot.slotBase() == baseValue
+ && !slot.watchpointSet()
+ && !structure->needImpurePropertyWatchpoint()
+ && !loadTargetFromProxy) {
+
+ bool generatedCodeInline = InlineAccess::generateSelfPropertyAccess(*codeBlock->vm(), stubInfo, structure, slot.cachedOffset());
+ if (generatedCodeInline) {
+ LOG_IC((ICEvent::GetByIdSelfPatch, structure->classInfo(), propertyName));
+ structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ stubInfo.initGetByIdSelf(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+ }
-static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- if (!baseValue.isCell()
- || !slot.isCacheable()
- || !baseValue.asCell()->structure()->propertyAccessesAreCacheable())
- return false;
+ PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (slot.slotBase() == baseValue) {
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
- CodeLocationLabel slowCase;
+ if (slot.isUnset() || slot.slotBase() != baseValue) {
+ if (structure->typeInfo().prohibitsPropertyCaching())
+ return GiveUpOnCache;
- if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
- return false;
-
- stubInfo.u.getByIdSelfList.listSize++;
-
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
-
- CCallHelpers stubJit(vm, codeBlock);
-
- MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
-
- // The strategy we use for stubs is as follows:
- // 1) Call DFG helper that calls the getter.
- // 2) Check if there was an exception, and if there was, call yet another
- // helper.
-
- bool isDirect = false;
- MacroAssembler::Call operationCall;
- MacroAssembler::Call handlerCall;
- FunctionPtr operationFunction;
- MacroAssembler::Jump success;
-
- if (slot.isCacheableGetter() || slot.isCacheableCustom()) {
- if (slot.isCacheableGetter()) {
- ASSERT(scratchGPR != InvalidGPRReg);
- ASSERT(baseGPR != scratchGPR);
- if (isInlineOffset(slot.cachedOffset())) {
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#else
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#endif
- } else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#else
- stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#endif
- }
- stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR);
- operationFunction = operationCallGetter;
- } else {
- stubJit.setupArguments(
- callFrameRegister, baseGPR,
- MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
- MacroAssembler::TrustedImmPtr(ident.impl()));
- operationFunction = operationCallCustomGetter;
+ if (structure->isDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ structure->flattenDictionaryStructure(vm, jsCast<JSObject*>(baseCell));
}
- // Need to make sure that whenever this call is made in the future, we remember the
- // place that we made it from. It just so happens to be the place that we are at
- // right now!
- stubJit.store32(
- MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
-
- operationCall = stubJit.call();
-#if USE(JSVALUE64)
- stubJit.move(GPRInfo::returnValueGPR, resultGPR);
-#else
- stubJit.setupResults(resultGPR, resultTagGPR);
-#endif
- success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
-
- stubJit.setupArgumentsExecState();
- handlerCall = stubJit.call();
- stubJit.jumpToExceptionHandler();
- } else {
- if (isInlineOffset(slot.cachedOffset())) {
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
-#else
- if (baseGPR == resultTagGPR) {
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- } else {
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
- }
-#endif
+ if (slot.isUnset() && structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+ return GiveUpOnCache;
+
+ if (slot.isUnset()) {
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, propertyName.impl());
} else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
-#else
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(),
+ propertyName.impl());
}
- success = stubJit.jump();
- isDirect = true;
+
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+
+ offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
}
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
-
- patchBuffer.link(wrongStruct, slowCase);
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- if (!isDirect) {
- patchBuffer.link(operationCall, operationFunction);
- patchBuffer.link(handlerCall, lookupExceptionHandler);
+ JSFunction* getter = nullptr;
+ if (slot.isCacheableGetter())
+ getter = jsDynamicCast<JSFunction*>(vm, slot.getterSetter()->getter());
+
+ DOMJIT::GetterSetter* domJIT = nullptr;
+ if (slot.isCacheableCustom() && slot.domJIT())
+ domJIT = slot.domJIT();
+
+ if (kind == GetByIDKind::Try) {
+ AccessCase::AccessType type;
+ if (slot.isCacheableValue())
+ type = AccessCase::Load;
+ else if (slot.isUnset())
+ type = AccessCase::Miss;
+ else if (slot.isCacheableGetter())
+ type = AccessCase::GetGetter;
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ newCase = ProxyableAccessCase::create(vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy, slot.watchpointSet());
+ } else if (!loadTargetFromProxy && getter && IntrinsicGetterAccessCase::canEmitIntrinsicGetter(getter, structure))
+ newCase = IntrinsicGetterAccessCase::create(vm, codeBlock, slot.cachedOffset(), structure, conditionSet, getter);
+ else {
+ if (slot.isCacheableValue() || slot.isUnset()) {
+ newCase = ProxyableAccessCase::create(vm, codeBlock, slot.isUnset() ? AccessCase::Miss : AccessCase::Load,
+ offset, structure, conditionSet, loadTargetFromProxy, slot.watchpointSet());
+ } else {
+ AccessCase::AccessType type;
+ if (slot.isCacheableGetter())
+ type = AccessCase::Getter;
+ else if (slot.attributes() & CustomAccessor)
+ type = AccessCase::CustomAccessorGetter;
+ else
+ type = AccessCase::CustomValueGetter;
+
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
+ slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
+ slot.isCacheableCustom() ? slot.slotBase() : nullptr,
+ domJIT);
+ }
}
-
- RefPtr<JITStubRoutine> stubRoutine =
- createJITStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG GetById polymorphic list access for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress())),
- *vm,
- codeBlock->ownerExecutable(),
- slot.isCacheableGetter() || slot.isCacheableCustom());
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
-
- patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
-
- if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
- || baseValue.asCell()->structure()->isDictionary())
- return false;
-
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PropertyOffset offset = slot.cachedOffset();
- size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset);
- if (count == InvalidPrototypeChain)
- return false;
+ LOG_IC((ICEvent::GetByIdAddAccessCase, baseValue.classInfoOrNull(vm), propertyName));
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
- CodeLocationLabel slowCase;
- if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
- return false;
-
- stubInfo.u.getByIdProtoList.listSize++;
-
- RefPtr<JITStubRoutine> stubRoutine;
-
- if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- slowCase, stubRoutine) == ProtoChainGenerationFailed)
- return false;
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue());
-
- patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, propertyName, WTFMove(newCase));
+
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::GetByIdReplaceWithJump, baseValue.classInfoOrNull(vm), propertyName));
+
+ RELEASE_ASSERT(result.code());
+ InlineAccess::rewireStubAsJump(exec->vm(), stubInfo, CodeLocationLabel(result.code()));
+ }
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+ SuperSamplerScope superSamplerScope(false);
+ GCSafeConcurrentJSLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
- if (!dontChangeCall)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+ if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo, kind) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), appropriateGenericGetByIdFunction(kind));
}
static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
@@ -750,862 +347,666 @@ static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropert
return operationPutByIdNonStrict;
}
-static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
if (slot.isStrictMode()) {
if (putKind == Direct)
- return operationPutByIdDirectStrictBuildList;
- return operationPutByIdStrictBuildList;
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
}
if (putKind == Direct)
- return operationPutByIdDirectNonStrictBuildList;
- return operationPutByIdNonStrictBuildList;
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
}
-#if ENABLE(GGC)
-static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
- ASSERT(scratch1 != scratch2);
- WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer();
- jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1);
- jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
- MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
-
- jit.add32(MacroAssembler::TrustedImm32(1), scratch2);
- jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
-
- jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
- // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
- jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
-
- MacroAssembler::Jump done = jit.jump();
- needToFlush.link(&jit);
-
- ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize());
- allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1);
-
- unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*);
- unsigned bytesToSubtract = 0;
-#if CPU(X86)
- bytesToSubtract += 2 * sizeof(void*);
- bytesFromBase += bytesToSubtract;
-#endif
- unsigned currentAlignment = bytesFromBase % stackAlignmentBytes();
- bytesToSubtract += currentAlignment;
-
- if (bytesToSubtract)
- jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
-
- jit.setupArguments(callFrameRegister, cell);
- MacroAssembler::Call call = jit.call();
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
- if (bytesToSubtract)
- jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
- allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1);
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
+
+ if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
+ return GiveUpOnCache;
- done.link(&jit);
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
+
+ std::unique_ptr<AccessCase> newCase;
+
+ if (slot.base() == baseValue && slot.isCacheablePut()) {
+ if (slot.type() == PutPropertySlot::ExistingProperty) {
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+
+ if (stubInfo.cacheType == CacheType::Unset
+ && InlineAccess::canGenerateSelfPropertyReplace(stubInfo, slot.cachedOffset())
+ && !structure->needImpurePropertyWatchpoint()
+ && !structure->inferredTypeFor(ident.impl())) {
+
+ bool generatedCodeInline = InlineAccess::generateSelfPropertyReplace(vm, stubInfo, structure, slot.cachedOffset());
+ if (generatedCodeInline) {
+ LOG_IC((ICEvent::PutByIdSelfPatch, structure->classInfo(), ident));
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingPutByIdFunction(slot, putKind));
+ stubInfo.initPutByIdReplace(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+ }
- return call;
-}
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::Replace, slot.cachedOffset(), structure);
+ } else {
+ ASSERT(slot.type() == PutPropertySlot::NewProperty);
-static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
-{
- ASSERT(owner != scratch1);
- ASSERT(owner != scratch2);
+ if (!structure->isObject())
+ return GiveUpOnCache;
- MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2);
- MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator);
- definitelyNotMarked.link(&jit);
- return call;
-}
-#endif // ENABLE(GGC)
-
-static void emitPutReplaceStub(
- ExecState* exec,
- JSValue,
- const Identifier&,
- const PutPropertySlot& slot,
- StructureStubInfo& stubInfo,
- PutKind,
- Structure* structure,
- CodeLocationLabel failureLabel,
- RefPtr<JITStubRoutine>& stubRoutine)
-{
- VM* vm = &exec->vm();
-#if ENABLE(GGC)
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
-#endif
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
-
- ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
- allocator.lock(baseGPR);
-#if USE(JSVALUE32_64)
- allocator.lock(valueTagGPR);
-#endif
- allocator.lock(valueGPR);
-
- GPRReg scratchGPR1 = allocator.allocateScratchGPR();
-#if ENABLE(GGC)
- GPRReg scratchGPR2 = allocator.allocateScratchGPR();
-#endif
+ if (structure->isDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ structure->flattenDictionaryStructure(vm, jsCast<JSObject*>(baseValue));
+ }
- CCallHelpers stubJit(vm, exec->codeBlock());
+ PropertyOffset offset;
+ Structure* newStructure =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(
+ structure, ident.impl(), 0, offset);
+ if (!newStructure || !newStructure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
- allocator.preserveReusedRegistersByPushing(stubJit);
+ ASSERT(newStructure->previousID() == structure);
+ ASSERT(!newStructure->isDictionary());
+ ASSERT(newStructure->isObject());
+
+ ObjectPropertyConditionSet conditionSet;
+ if (putKind == NotDirect) {
+ conditionSet =
+ generateConditionsForPropertySetterMiss(
+ vm, codeBlock, exec, newStructure, ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
- MacroAssembler::Jump badStructure = stubJit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure));
+ newCase = AccessCase::create(vm, codeBlock, offset, structure, newStructure, conditionSet);
+ }
+ } else if (slot.isCacheableCustom() || slot.isCacheableSetter()) {
+ if (slot.isCacheableCustom()) {
+ ObjectPropertyConditionSet conditionSet;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHitCustom(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
-#if USE(JSVALUE64)
- if (isInlineOffset(slot.cachedOffset()))
- stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
- else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
- }
-#elif USE(JSVALUE32_64)
- if (isInlineOffset(slot.cachedOffset())) {
- stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- } else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, slot.isCustomAccessor() ? AccessCase::CustomAccessorSetter : AccessCase::CustomValueSetter, structure, invalidOffset, conditionSet,
+ slot.customSetter(), slot.base());
+ } else {
+ ObjectPropertyConditionSet conditionSet;
+ PropertyOffset offset;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ offset = conditionSet.slotBaseCondition().offset();
+ } else
+ offset = slot.cachedOffset();
+
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, AccessCase::Setter, structure, offset, conditionSet);
+ }
}
-#endif
-
-#if ENABLE(GGC)
- MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
-#endif
+
+ LOG_IC((ICEvent::PutByIdAddAccessCase, structure->classInfo(), ident));
- MacroAssembler::Jump success;
- MacroAssembler::Jump failure;
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (allocator.didReuseRegisters()) {
- allocator.restoreReusedRegistersByPopping(stubJit);
- success = stubJit.jump();
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::PutByIdReplaceWithJump, structure->classInfo(), ident));
- badStructure.link(&stubJit);
- allocator.restoreReusedRegistersByPopping(stubJit);
- failure = stubJit.jump();
- } else {
- success = stubJit.jump();
- failure = badStructure;
+ RELEASE_ASSERT(result.code());
+
+ InlineAccess::rewireStubAsJump(vm, stubInfo, CodeLocationLabel(result.code()));
}
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-#if ENABLE(GGC)
- patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
-#endif
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- patchBuffer.link(failure, failureLabel);
-
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG PutById replace stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress()));
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-static void emitPutTransitionStub(
- ExecState* exec,
- JSValue,
- const Identifier&,
- const PutPropertySlot& slot,
- StructureStubInfo& stubInfo,
- PutKind putKind,
- Structure* structure,
- Structure* oldStructure,
- StructureChain* prototypeChain,
- CodeLocationLabel failureLabel,
- RefPtr<JITStubRoutine>& stubRoutine)
+void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
- VM* vm = &exec->vm();
-
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
-
- ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
- allocator.lock(baseGPR);
-#if USE(JSVALUE32_64)
- allocator.lock(valueTagGPR);
-#endif
- allocator.lock(valueGPR);
-
- CCallHelpers stubJit(vm);
-
- bool needThirdScratch = false;
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity()) {
- needThirdScratch = true;
- }
-
- GPRReg scratchGPR1 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR1 != baseGPR);
- ASSERT(scratchGPR1 != valueGPR);
+ SuperSamplerScope superSamplerScope(false);
+ GCSafeConcurrentJSLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- GPRReg scratchGPR2 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR2 != baseGPR);
- ASSERT(scratchGPR2 != valueGPR);
- ASSERT(scratchGPR2 != scratchGPR1);
-
- GPRReg scratchGPR3;
- if (needThirdScratch) {
- scratchGPR3 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR3 != baseGPR);
- ASSERT(scratchGPR3 != valueGPR);
- ASSERT(scratchGPR3 != scratchGPR1);
- ASSERT(scratchGPR3 != scratchGPR2);
- } else
- scratchGPR3 = InvalidGPRReg;
-
- allocator.preserveReusedRegistersByPushing(stubJit);
+ if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), appropriateGenericPutByIdFunction(slot, putKind));
+}
- MacroAssembler::JumpList failureCases;
-
- ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
+static InlineCacheAction tryRepatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
- failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+ if (!base->structure()->propertyAccessesAreCacheable() || (!wasFound && !base->structure()->propertyAccessesAreCacheableForAbsence()))
+ return GiveUpOnCache;
- addStructureTransitionCheck(
- oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR1);
-
- if (putKind == NotDirect) {
- for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
- addStructureTransitionCheck(
- (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR1);
- }
+ if (wasFound) {
+ if (!slot.isCacheable())
+ return GiveUpOnCache;
}
-
- MacroAssembler::JumpList slowPath;
- bool scratchGPR1HasStorage = false;
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
+ Structure* structure = base->structure(vm);
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
- CopiedAllocator* copiedAllocator = &vm->heap.storageAllocator();
-
- if (!oldStructure->outOfLineCapacity()) {
- stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
- slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
- stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
- stubJit.negPtr(scratchGPR1);
- stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
- stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
- } else {
- size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
- ASSERT(newSize > oldSize);
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
- stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
- slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
- stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
- stubJit.negPtr(scratchGPR1);
- stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
- stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
- // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
- for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
- stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
- stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
- }
+ ObjectPropertyConditionSet conditionSet;
+ if (wasFound) {
+ if (slot.slotBase() != base) {
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
}
-
- stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()));
- scratchGPR1HasStorage = true;
- }
-
- stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
-#if USE(JSVALUE64)
- if (isInlineOffset(slot.cachedOffset()))
- stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
- else {
- if (!scratchGPR1HasStorage)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
- }
-#elif USE(JSVALUE32_64)
- if (isInlineOffset(slot.cachedOffset())) {
- stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
} else {
- if (!scratchGPR1HasStorage)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, ident.impl());
}
-#endif
-
-#if ENABLE(GGC)
- MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
-#endif
-
- MacroAssembler::Jump success;
- MacroAssembler::Jump failure;
-
- if (allocator.didReuseRegisters()) {
- allocator.restoreReusedRegistersByPopping(stubJit);
- success = stubJit.jump();
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
- failureCases.link(&stubJit);
- allocator.restoreReusedRegistersByPopping(stubJit);
- failure = stubJit.jump();
- } else
- success = stubJit.jump();
-
- MacroAssembler::Call operationCall;
- MacroAssembler::Jump successInSlowPath;
+ LOG_IC((ICEvent::InAddAccessCase, structure->classInfo(), ident));
+
+ std::unique_ptr<AccessCase> newCase = AccessCase::create(
+ vm, codeBlock, wasFound ? AccessCase::InHit : AccessCase::InMiss, invalidOffset, structure, conditionSet);
+
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- slowPath.link(&stubJit);
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::InReplaceWithJump, structure->classInfo(), ident));
- allocator.restoreReusedRegistersByPopping(stubJit);
- ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize());
- allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
-#if USE(JSVALUE64)
- stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
-#else
- stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
-#endif
- operationCall = stubJit.call();
- allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
- successInSlowPath = stubJit.jump();
- }
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-#if ENABLE(GGC)
- patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
-#endif
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- if (allocator.didReuseRegisters())
- patchBuffer.link(failure, failureLabel);
- else
- patchBuffer.link(failureCases, failureLabel);
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
- patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ RELEASE_ASSERT(result.code());
+
+ MacroAssembler::repatchJump(
+ stubInfo.patchableJumpForIn(),
+ CodeLocationLabel(result.code()));
}
- stubRoutine =
- createJITStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p",
- structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "",
- oldStructure, structure,
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress())),
- *vm,
- exec->codeBlock()->ownerExecutable(),
- structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
- structure);
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void repatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- Structure* oldStructure = structure->previousID();
-
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.base() == baseValue) {
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary())
- return false;
-
- // Skip optimizing the case where we need a realloc, if we don't have
- // enough registers to make it happen.
- if (GPRInfo::numberOfRegisters < 6
- && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity())
- return false;
-
- // Skip optimizing the case where we need realloc, and the structure has
- // indexing storage.
- if (oldStructure->couldHaveIndexingHeader())
- return false;
-
- if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- emitPutTransitionStub(
- exec, baseValue, ident, slot, stubInfo, putKind,
- structure, oldStructure, prototypeChain,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
- stubInfo.stubRoutine);
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(stubInfo.stubRoutine->code().code()));
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
-
- stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
-
- return true;
- }
-
- if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
- return false;
-
- repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
- stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure);
- return true;
- }
-
- return false;
+ SuperSamplerScope superSamplerScope(false);
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), operationIn);
}
-void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+ MacroAssembler::repatchNearCall(callLinkInfo.callReturnLocation(), CodeLocationLabel(codeRef.code()));
}
-static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
{
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- Structure* oldStructure = structure->previousID();
-
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.base() == baseValue) {
- PolymorphicPutByIdList* list;
- RefPtr<JITStubRoutine> stubRoutine;
-
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary())
- return false;
-
- // Skip optimizing the case where we need a realloc, if we don't have
- // enough registers to make it happen.
- if (GPRInfo::numberOfRegisters < 6
- && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity())
- return false;
-
- // Skip optimizing the case where we need realloc, and the structure has
- // indexing storage.
- if (oldStructure->couldHaveIndexingHeader())
- return false;
-
- if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- // We're now committed to creating the stub. Mogrify the meta-data accordingly.
- list = PolymorphicPutByIdList::from(
- putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
-
- emitPutTransitionStub(
- exec, baseValue, propertyName, slot, stubInfo, putKind,
- structure, oldStructure, prototypeChain,
- CodeLocationLabel(list->currentSlowPathTarget()),
- stubRoutine);
-
- list->addAccess(
- PutByIdAccess::transition(
- *vm, codeBlock->ownerExecutable(),
- oldStructure, structure, prototypeChain,
- stubRoutine));
- } else {
- // We're now committed to creating the stub. Mogrify the meta-data accordingly.
- list = PolymorphicPutByIdList::from(
- putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
-
- emitPutReplaceStub(
- exec, baseValue, propertyName, slot, stubInfo, putKind,
- structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
-
- list->addAccess(
- PutByIdAccess::replace(
- *vm, codeBlock->ownerExecutable(),
- structure, stubRoutine));
- }
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
-
- if (list->isFull())
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
-
- return true;
- }
-
- return false;
+ linkSlowFor(vm, callLinkInfo, vm->getCTIStub(generator));
}
-void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
+ linkSlowFor(vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
}
-static bool tryRepatchIn(
- ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
- const PropertySlot& slot, StructureStubInfo& stubInfo)
+static bool isWebAssemblyToJSCallee(VM& vm, JSCell* callee)
{
- if (!base->structure()->propertyAccessesAreCacheable())
- return false;
-
- if (wasFound) {
- if (!slot.isCacheable())
- return false;
- }
-
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
- Structure* structure = base->structure();
-
- PropertyOffset offsetIgnored;
- size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored);
- if (count == InvalidPrototypeChain)
- return false;
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
-
- CodeLocationLabel successLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
- CodeLocationLabel slowCaseLabel;
-
- if (stubInfo.accessType == access_unset) {
- polymorphicStructureList = new PolymorphicAccessStructureList();
- stubInfo.initInList(polymorphicStructureList, 0);
- slowCaseLabel = stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToSlowCase);
- listIndex = 0;
- } else {
- RELEASE_ASSERT(stubInfo.accessType == access_in_list);
- polymorphicStructureList = stubInfo.u.inList.structureList;
- listIndex = stubInfo.u.inList.listSize;
- slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
-
- if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
- return false;
- }
-
- StructureChain* chain = structure->prototypeChain(exec);
- RefPtr<JITStubRoutine> stubRoutine;
-
- {
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
-
- CCallHelpers stubJit(vm);
-
- bool needToRestoreScratch;
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
- } else
- needToRestoreScratch = false;
-
- MacroAssembler::JumpList failureCases;
- failureCases.append(stubJit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure)));
-
- CodeBlock* codeBlock = exec->codeBlock();
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- for (unsigned i = 0; i < count; ++i, ++it) {
- JSObject* prototype = asObject(currStructure->prototypeForLookup(exec));
- Structure* protoStructure = prototype->structure();
- addStructureTransitionCheck(
- prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit,
- failureCases, scratchGPR);
- if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
- currStructure = it->get();
- }
-
-#if USE(JSVALUE64)
- stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR);
+#if ENABLE(WEBASSEMBLY)
+ // The WebAssembly -> JS stub sets it caller frame's callee to a singleton which lives on the VM.
+ return callee == vm.webAssemblyToJSCallee.get();
#else
- stubJit.move(MacroAssembler::TrustedImm32(wasFound), resultGPR);
-#endif
-
- MacroAssembler::Jump success, fail;
-
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
-
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG In (found = %s) stub for %s, return point %p",
- wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(),
- successLabel.executableAddress()));
- }
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, true);
- stubInfo.u.inList.listSize++;
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
-
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
+ UNUSED_PARAM(vm);
+ UNUSED_PARAM(callee);
+ return false;
+#endif // ENABLE(WEBASSEMBLY)
}
-void repatchIn(
- ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
- const PropertySlot& slot, StructureStubInfo& stubInfo)
+static JSCell* webAssemblyOwner(VM& vm)
{
- if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo))
- return;
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
+#if ENABLE(WEBASSEMBLY)
+ // Each WebAssembly.Instance shares the stubs from their WebAssembly.Module, which are therefore the appropriate owner.
+ return vm.topJSWebAssemblyInstance->module();
+#else
+ UNUSED_PARAM(vm);
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+#endif // ENABLE(WEBASSEMBLY)
}
-static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
+void linkFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
+ JSFunction* callee, MacroAssemblerCodePtr codePtr)
{
- if (kind == CodeForCall) {
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ ASSERT(!callLinkInfo.stub());
+
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ // WebAssembly -> JS stubs don't have a valid CodeBlock.
+ JSCell* owner = isWebAssemblyToJSCallee(vm, callerFrame->callee()) ? webAssemblyOwner(vm) : callerCodeBlock;
+ ASSERT(owner);
+
+ ASSERT(!callLinkInfo.isLinked());
+ callLinkInfo.setCallee(vm, owner, callee);
+ callLinkInfo.setLastSeenCallee(vm, owner, callee);
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(callerFrame, &callLinkInfo);
+
+ if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) {
+ linkSlowFor(&vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
return;
}
- ASSERT(kind == CodeForConstruct);
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
+
+ linkSlowFor(&vm, callLinkInfo);
}
-void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
+void linkDirectFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
+ MacroAssemblerCodePtr codePtr)
{
- ASSERT(!callLinkInfo.stub);
-
- // If you're being call-linked from a DFG caller then you obviously didn't get inlined.
- if (calleeCodeBlock)
- calleeCodeBlock->m_shouldAlwaysBeInlined = false;
+ ASSERT(!callLinkInfo.stub());
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ CodeBlock* callerCodeBlock = exec->codeBlock();
+
VM* vm = callerCodeBlock->vm();
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
ASSERT(!callLinkInfo.isLinked());
- callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
- repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
+ callLinkInfo.setCodeBlock(*vm, callerCodeBlock, jsCast<FunctionCodeBlock*>(calleeCodeBlock));
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
+ if (callLinkInfo.callType() == CallLinkInfo::DirectTailCall)
+ MacroAssembler::repatchJumpToNop(callLinkInfo.patchableJump());
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
if (calleeCodeBlock)
- calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
-
- if (kind == CodeForCall) {
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
- return;
- }
-
- ASSERT(kind == CodeForConstruct);
- linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct);
+ calleeCodeBlock->linkIncomingCall(exec, &callLinkInfo);
}
-void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
+void linkSlowFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo)
{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ linkSlowFor(vm, callLinkInfo);
+}
+
+static void revertCall(VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
+{
+ if (callLinkInfo.isDirect()) {
+ callLinkInfo.clearCodeBlock();
+ if (callLinkInfo.callType() == CallLinkInfo::DirectTailCall)
+ MacroAssembler::repatchJump(callLinkInfo.patchableJump(), callLinkInfo.slowPathStart());
+ else
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), callLinkInfo.slowPathStart());
+ } else {
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
+ linkSlowFor(vm, callLinkInfo, codeRef);
+ callLinkInfo.clearCallee();
+ }
+ callLinkInfo.clearSeen();
+ callLinkInfo.clearStub();
+ callLinkInfo.clearSlowStub();
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
+}
+
+void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
+{
+ if (Options::dumpDisassembly())
+ dataLog("Unlinking call at ", callLinkInfo.hotPathOther(), "\n");
- linkSlowFor(repatchBuffer, vm, callLinkInfo, kind);
+ revertCall(&vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator));
+}
+
+void linkVirtualFor(ExecState* exec, CallLinkInfo& callLinkInfo)
+{
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking virtual call at ", *callerCodeBlock, " ", callerFrame->codeOrigin(), "\n");
+
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(&vm, callLinkInfo);
+ revertCall(&vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, vm, nullptr, true));
}
-void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr)
+namespace {
+struct CallToCodePtr {
+ CCallHelpers::Call call;
+ MacroAssemblerCodePtr codePtr;
+};
+} // annonymous namespace
+
+void linkPolymorphicCall(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
{
- ASSERT(!callLinkInfo.stub);
+ RELEASE_ASSERT(callLinkInfo.allowStubs());
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
+ // Currently we can't do anything for non-function callees.
+ // https://bugs.webkit.org/show_bug.cgi?id=140685
+ if (!newVariant || !newVariant.executable()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+ bool isWebAssembly = isWebAssemblyToJSCallee(vm, callerFrame->callee());
+
+ // WebAssembly -> JS stubs don't have a valid CodeBlock.
+ JSCell* owner = isWebAssembly ? webAssemblyOwner(vm) : callerCodeBlock;
+ ASSERT(owner);
+
+ CallVariantList list;
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
+ list = stub->variants();
+ else if (JSFunction* oldCallee = callLinkInfo.callee())
+ list = CallVariantList{ CallVariant(oldCallee) };
+
+ list = variantListWithVariant(list, newVariant);
+
+ // If there are any closure calls then it makes sense to treat all of them as closure calls.
+ // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
+ // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
+ bool isClosureCall = false;
+ for (CallVariant variant : list) {
+ if (variant.isClosureCall()) {
+ list = despecifiedVariantList(list);
+ isClosureCall = true;
+ break;
+ }
+ }
- GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR);
+ if (isClosureCall)
+ callLinkInfo.setHasSeenClosure();
+
+ Vector<PolymorphicCallCase> callCases;
+
+ // Figure out what our cases are.
+ for (CallVariant variant : list) {
+ CodeBlock* codeBlock;
+ if (isWebAssembly || variant.executable()->isHostFunction())
+ codeBlock = nullptr;
+ else {
+ ExecutableBase* executable = variant.executable();
+ codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
+ // If we cannot handle a callee, either because we don't have a CodeBlock or because arity mismatch,
+ // assume that it's better for this whole thing to be a virtual call.
+ if (!codeBlock || exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+ }
+
+ callCases.append(PolymorphicCallCase(variant, codeBlock));
+ }
- CCallHelpers stubJit(vm, callerCodeBlock);
+ // If we are over the limit, just use a normal virtual call.
+ unsigned maxPolymorphicCallVariantListSize;
+ if (isWebAssembly)
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForWebAssemblyToJS();
+ else if (callerCodeBlock->jitType() == JITCode::topTierJIT())
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
+ else
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
+
+ if (list.size() > maxPolymorphicCallVariantListSize) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+
+ CCallHelpers stubJit(&vm, callerCodeBlock);
CCallHelpers::JumpList slowPath;
+ std::unique_ptr<CallFrameShuffler> frameShuffler;
+ if (callLinkInfo.frameShuffleData()) {
+ ASSERT(callLinkInfo.isTailCall());
+ frameShuffler = std::make_unique<CallFrameShuffler>(stubJit, *callLinkInfo.frameShuffleData());
+#if USE(JSVALUE32_64)
+ // We would have already checked that the callee is a cell, and we can
+ // use the additional register this buys us.
+ frameShuffler->assumeCalleeIsCell();
+#endif
+ frameShuffler->lockGPR(calleeGPR);
+ }
+ GPRReg comparisonValueGPR;
+
+ if (isClosureCall) {
+ GPRReg scratchGPR;
+ if (frameShuffler)
+ scratchGPR = frameShuffler->acquireGPR();
+ else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ // Verify that we have a function and stash the executable in scratchGPR.
+
#if USE(JSVALUE64)
- // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
- // being set. So we do this the hard way.
- GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, GPRInfo::tagMaskRegister));
#else
- // We would have already checked that the callee is a cell.
+ // We would have already checked that the callee is a cell.
#endif
- slowPath.append(
- stubJit.branchPtr(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(calleeGPR, JSCell::structureOffset()),
- CCallHelpers::TrustedImmPtr(structure)));
+ slowPath.append(
+ stubJit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(JSFunctionType)));
- slowPath.append(
- stubJit.branchPtr(
- CCallHelpers::NotEqual,
+ stubJit.loadPtr(
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
- CCallHelpers::TrustedImmPtr(executable)));
+ scratchGPR);
+
+ comparisonValueGPR = scratchGPR;
+ } else
+ comparisonValueGPR = calleeGPR;
+
+ Vector<int64_t> caseValues(callCases.size());
+ Vector<CallToCodePtr> calls(callCases.size());
+ std::unique_ptr<uint32_t[]> fastCounts;
+
+ if (!isWebAssembly && callerCodeBlock->jitType() != JITCode::topTierJIT())
+ fastCounts = std::make_unique<uint32_t[]>(callCases.size());
+
+ for (size_t i = 0; i < callCases.size(); ++i) {
+ if (fastCounts)
+ fastCounts[i] = 0;
+
+ CallVariant variant = callCases[i].variant();
+ int64_t newCaseValue;
+ if (isClosureCall)
+ newCaseValue = bitwise_cast<intptr_t>(variant.executable());
+ else
+ newCaseValue = bitwise_cast<intptr_t>(variant.function());
+
+ if (!ASSERT_DISABLED) {
+ for (size_t j = 0; j < i; ++j) {
+ if (caseValues[j] != newCaseValue)
+ continue;
+
+ dataLog("ERROR: Attempt to add duplicate case value.\n");
+ dataLog("Existing case values: ");
+ CommaPrinter comma;
+ for (size_t k = 0; k < i; ++k)
+ dataLog(comma, caseValues[k]);
+ dataLog("\n");
+ dataLog("Attempting to add: ", newCaseValue, "\n");
+ dataLog("Variant list: ", listDump(callCases), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ caseValues[i] = newCaseValue;
+ }
- stubJit.loadPtr(
- CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
- GPRInfo::returnValueGPR);
+ GPRReg fastCountsBaseGPR;
+ if (frameShuffler)
+ fastCountsBaseGPR = frameShuffler->acquireGPR();
+ else {
+ fastCountsBaseGPR =
+ AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
+ if (!frameShuffler && callLinkInfo.isTailCall())
+ stubJit.emitRestoreCalleeSaves();
+ BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
+ CCallHelpers::JumpList done;
+ while (binarySwitch.advance(stubJit)) {
+ size_t caseIndex = binarySwitch.caseIndex();
+
+ CallVariant variant = callCases[caseIndex].variant();
+
+ ASSERT(variant.executable()->hasJITCodeForCall());
+ MacroAssemblerCodePtr codePtr =
+ variant.executable()->generatedJITCodeForCall()->addressForCall(ArityCheckNotRequired);
+
+ if (fastCounts) {
+ stubJit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
+ }
+ if (frameShuffler) {
+ CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else if (callLinkInfo.isTailCall()) {
+ stubJit.prepareForTailCallSlow();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else
+ calls[caseIndex].call = stubJit.nearCall();
+ calls[caseIndex].codePtr = codePtr;
+ done.append(stubJit.jump());
+ }
+
+ slowPath.link(&stubJit);
+ binarySwitch.fallThrough().link(&stubJit);
-#if USE(JSVALUE64)
- stubJit.store64(
- GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
+ if (frameShuffler) {
+ frameShuffler->releaseGPR(calleeGPR);
+ frameShuffler->releaseGPR(comparisonValueGPR);
+ frameShuffler->releaseGPR(fastCountsBaseGPR);
+#if USE(JSVALUE32_64)
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
#else
- stubJit.storePtr(
- GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(
- CCallHelpers::TrustedImm32(JSValue::CellTag),
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
#endif
-
- AssemblyHelpers::Call call = stubJit.nearCall();
- AssemblyHelpers::Jump done = stubJit.jump();
-
- slowPath.link(&stubJit);
- stubJit.move(calleeGPR, GPRInfo::regT0);
+ frameShuffler->prepareForSlowPath();
+ } else {
+ stubJit.move(calleeGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
- stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+ stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2);
- stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2);
- AssemblyHelpers::Jump slow = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG closure call stub for %s, return point %p, target %p (%s)",
- toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())),
- *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin));
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ AssemblyHelpers::Jump slow = stubJit.jump();
+
+ LinkBuffer patchBuffer(vm, stubJit, owner, JITCompilationCanFail);
+ if (patchBuffer.didFailToAllocate()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin),
+ RELEASE_ASSERT(callCases.size() == calls.size());
+ for (CallToCodePtr callToCodePtr : calls) {
+ // Tail call special-casing ensures proper linking on ARM Thumb2, where a tail call jumps to an address
+ // with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
+ bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
+ patchBuffer.link(
+ callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
+ }
+ if (isWebAssembly || JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
+ patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
+ else
+ patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm.getCTIStub(linkPolymorphicCallThunkGenerator).code()));
+
+ auto stubRoutine = adoptRef(*new PolymorphicCallStubRoutine(
+ FINALIZE_CODE_FOR(
+ callerCodeBlock, patchBuffer,
+ ("Polymorphic call stub for %s, return point %p, targets %s",
+ isWebAssembly ? "WebAssembly" : toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
+ toCString(listDump(callCases)).data())),
+ vm, owner, exec->callerFrame(), callLinkInfo, callCases,
+ WTFMove(fastCounts)));
+
+ MacroAssembler::replaceWithJump(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
CodeLocationLabel(stubRoutine->code().code()));
- linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall);
+ // The original slow path is unreachable on 64-bits, but still
+ // reachable on 32-bits since a non-cell callee will always
+ // trigger the slow path
+ linkSlowFor(&vm, callLinkInfo);
- callLinkInfo.stub = stubRoutine.release();
+ // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
+ // that it's no longer on stack.
+ callLinkInfo.setStub(WTFMove(stubRoutine));
- ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo));
+ // The call link info no longer has a call cache apart from the jump to the polymorphic call
+ // stub.
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
}
-void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize);
- CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureOffset()),
- reinterpret_cast<void*>(unusedPointer));
- }
- repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
-#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
}
-void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
{
- V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(repatchBuffer, stubInfo.callReturnLocation).executableAddress());
+ V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(codeBlock, stubInfo.slowPathCallLocation()).executableAddress());
V_JITOperation_ESsiJJI optimizedFunction;
- if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictBuildList)
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictOptimize)
optimizedFunction = operationPutByIdStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictBuildList)
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictOptimize)
optimizedFunction = operationPutByIdNonStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictBuildList)
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictOptimize)
optimizedFunction = operationPutByIdDirectStrictOptimize;
else {
- ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictBuildList);
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictOptimize);
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction);
- CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureOffset()),
- reinterpret_cast<void*>(unusedPointer));
- }
- repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
-#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), optimizedFunction);
+ InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
}
-void resetIn(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
{
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ MacroAssembler::repatchJump(stubInfo.patchableJumpForIn(), stubInfo.slowPathStartLocation());
}
} // namespace JSC