summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r--Source/JavaScriptCore/bytecode/AccessCase.cpp1029
-rw-r--r--Source/JavaScriptCore/bytecode/AccessCase.h233
-rw-r--r--Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp86
-rw-r--r--Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h72
-rw-r--r--Source/JavaScriptCore/bytecode/ArithProfile.cpp143
-rw-r--r--Source/JavaScriptCore/bytecode/ArithProfile.h241
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp4
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h8
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.cpp46
-rw-r--r--Source/JavaScriptCore/bytecode/ArrayProfile.h93
-rw-r--r--Source/JavaScriptCore/bytecode/ByValInfo.h90
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp170
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h69
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeConventions.h10
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp268
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h37
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeGraph.h125
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp93
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h108
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeKills.h177
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeList.json200
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp350
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h51
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h175
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp116
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeRewriter.h235
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeUseDef.h334
-rw-r--r--Source/JavaScriptCore/bytecode/CallEdge.cpp37
-rw-r--r--Source/JavaScriptCore/bytecode/CallEdge.h67
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.cpp249
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkInfo.h353
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.cpp340
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.h124
-rw-r--r--Source/JavaScriptCore/bytecode/CallMode.cpp49
-rw-r--r--Source/JavaScriptCore/bytecode/CallMode.h51
-rw-r--r--Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h8
-rw-r--r--Source/JavaScriptCore/bytecode/CallVariant.cpp97
-rw-r--r--Source/JavaScriptCore/bytecode/CallVariant.h214
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp3587
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h974
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockHash.h5
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp12
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h13
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h6
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.cpp133
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.h102
-rw-r--r--Source/JavaScriptCore/bytecode/CodeType.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/CodeType.h10
-rw-r--r--Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp78
-rw-r--r--Source/JavaScriptCore/bytecode/ComplexGetStatus.h110
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.cpp27
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.h65
-rw-r--r--Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp125
-rw-r--r--Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h60
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.cpp39
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.h17
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp38
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h19
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp66
-rw-r--r--Source/JavaScriptCore/bytecode/DeferredSourceDump.h (renamed from Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h)40
-rw-r--r--Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp54
-rw-r--r--Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h114
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp45
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeBlock.h84
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeCache.h83
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutableInfo.h85
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutionCounter.cpp55
-rw-r--r--Source/JavaScriptCore/bytecode/ExecutionCounter.h63
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.cpp47
-rw-r--r--Source/JavaScriptCore/bytecode/ExitKind.h41
-rw-r--r--Source/JavaScriptCore/bytecode/ExitingJITType.cpp52
-rw-r--r--Source/JavaScriptCore/bytecode/ExitingJITType.h58
-rw-r--r--Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h12
-rw-r--r--Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h29
-rw-r--r--Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp45
-rw-r--r--Source/JavaScriptCore/bytecode/FunctionCodeBlock.h79
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp563
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.h114
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdVariant.cpp153
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdVariant.h91
-rw-r--r--Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp238
-rw-r--r--Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h84
-rw-r--r--Source/JavaScriptCore/bytecode/GlobalCodeBlock.h54
-rw-r--r--Source/JavaScriptCore/bytecode/HandlerInfo.h91
-rw-r--r--Source/JavaScriptCore/bytecode/InlineAccess.cpp299
-rw-r--r--Source/JavaScriptCore/bytecode/InlineAccess.h123
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrame.cpp123
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrame.h265
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp3
-rw-r--r--Source/JavaScriptCore/bytecode/InlineCallFrameSet.h13
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h53
-rw-r--r--Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h64
-rw-r--r--Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp59
-rw-r--r--Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h59
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.cpp2
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.h13
-rw-r--r--Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h8
-rw-r--r--Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp65
-rw-r--r--Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h48
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp12
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h20
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp27
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h38
-rw-r--r--Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp84
-rw-r--r--Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h64
-rw-r--r--Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp45
-rw-r--r--Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h79
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h58
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp174
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h269
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp431
-rw-r--r--Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h181
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.cpp15
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h245
-rw-r--r--Source/JavaScriptCore/bytecode/Operands.h73
-rw-r--r--Source/JavaScriptCore/bytecode/OperandsInlines.h32
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp677
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccess.h277
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h139
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp148
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h195
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp121
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargets.h16
-rw-r--r--Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h82
-rw-r--r--Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp45
-rw-r--r--Source/JavaScriptCore/bytecode/ProgramCodeBlock.h79
-rw-r--r--Source/JavaScriptCore/bytecode/PropertyCondition.cpp364
-rw-r--r--Source/JavaScriptCore/bytecode/PropertyCondition.h334
-rw-r--r--Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp66
-rw-r--r--Source/JavaScriptCore/bytecode/ProxyableAccessCase.h59
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdFlags.cpp50
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdFlags.h101
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.cpp461
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.h100
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdVariant.cpp249
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdVariant.h147
-rw-r--r--Source/JavaScriptCore/bytecode/PutKind.h6
-rw-r--r--Source/JavaScriptCore/bytecode/ReduceWhitespace.h5
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.cpp478
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.h347
-rw-r--r--Source/JavaScriptCore/bytecode/SpecialPointer.cpp1
-rw-r--r--Source/JavaScriptCore/bytecode/SpecialPointer.h11
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.cpp313
-rw-r--r--Source/JavaScriptCore/bytecode/SpeculatedType.h256
-rw-r--r--Source/JavaScriptCore/bytecode/StructureSet.cpp48
-rw-r--r--Source/JavaScriptCore/bytecode/StructureSet.h157
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp48
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h50
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.cpp308
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h378
-rw-r--r--Source/JavaScriptCore/bytecode/SuperSampler.cpp92
-rw-r--r--Source/JavaScriptCore/bytecode/SuperSampler.h58
-rw-r--r--Source/JavaScriptCore/bytecode/ToThisStatus.cpp72
-rw-r--r--Source/JavaScriptCore/bytecode/ToThisStatus.h46
-rw-r--r--Source/JavaScriptCore/bytecode/TrackedReferences.cpp (renamed from Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp)71
-rw-r--r--Source/JavaScriptCore/bytecode/TrackedReferences.h52
-rw-r--r--Source/JavaScriptCore/bytecode/TypeLocation.h60
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp368
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h536
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp40
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h71
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp40
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h61
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp234
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h199
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h43
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp84
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h85
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp48
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h95
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp48
-rw-r--r--Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h72
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.h17
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.cpp28
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.h206
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWatchpointSet.h109
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp44
-rw-r--r--Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h52
-rw-r--r--Source/JavaScriptCore/bytecode/VirtualRegister.cpp65
-rw-r--r--Source/JavaScriptCore/bytecode/VirtualRegister.h60
-rw-r--r--Source/JavaScriptCore/bytecode/Watchpoint.cpp77
-rw-r--r--Source/JavaScriptCore/bytecode/Watchpoint.h215
182 files changed, 19792 insertions, 6731 deletions
diff --git a/Source/JavaScriptCore/bytecode/AccessCase.cpp b/Source/JavaScriptCore/bytecode/AccessCase.cpp
new file mode 100644
index 000000000..658ea0f1e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AccessCase.cpp
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CallLinkInfo.h"
+#include "DOMJITGetterSetter.h"
+#include "DirectArguments.h"
+#include "GetterSetter.h"
+#include "GetterSetterAccessCase.h"
+#include "HeapInlines.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSCJSValueInlines.h"
+#include "JSModuleEnvironment.h"
+#include "JSModuleNamespaceObject.h"
+#include "LinkBuffer.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "ScopedArguments.h"
+#include "ScratchRegisterAllocator.h"
+#include "SlotVisitorInlines.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+static const bool verbose = false;
+
+AccessCase::AccessCase(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+ : m_type(type)
+ , m_offset(offset)
+{
+ m_structure.setMayBeNull(vm, owner, structure);
+ m_conditionSet = conditionSet;
+}
+
+std::unique_ptr<AccessCase> AccessCase::create(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+{
+ switch (type) {
+ case InHit:
+ case InMiss:
+ case ArrayLength:
+ case StringLength:
+ case DirectArgumentsLength:
+ case ScopedArgumentsLength:
+ case ModuleNamespaceLoad:
+ case Replace:
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ };
+
+ return std::unique_ptr<AccessCase>(new AccessCase(vm, owner, type, offset, structure, conditionSet));
+}
+
+std::unique_ptr<AccessCase> AccessCase::create(
+ VM& vm, JSCell* owner, PropertyOffset offset, Structure* oldStructure, Structure* newStructure,
+ const ObjectPropertyConditionSet& conditionSet)
+{
+ RELEASE_ASSERT(oldStructure == newStructure->previousID());
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<AccessCase>(new AccessCase(vm, owner, Transition, offset, newStructure, conditionSet));
+}
+
+AccessCase::~AccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> AccessCase::fromStructureStubInfo(
+ VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
+{
+ switch (stubInfo.cacheType) {
+ case CacheType::GetByIdSelf:
+ return ProxyableAccessCase::create(vm, owner, Load, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+ case CacheType::PutByIdReplace:
+ return AccessCase::create(vm, owner, Replace, stubInfo.u.byIdSelf.offset, stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+ default:
+ return nullptr;
+ }
+}
+
+std::unique_ptr<AccessCase> AccessCase::clone() const
+{
+ std::unique_ptr<AccessCase> result(new AccessCase(*this));
+ result->resetState();
+ return result;
+}
+
+Vector<WatchpointSet*, 2> AccessCase::commit(VM& vm, const Identifier& ident)
+{
+ // It's fine to commit something that is already committed. That arises when we switch to using
+ // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
+ // because most AccessCases have no extra watchpoints anyway.
+ RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
+
+ Vector<WatchpointSet*, 2> result;
+
+ if ((structure() && structure()->needImpurePropertyWatchpoint())
+ || m_conditionSet.needImpurePropertyWatchpoint())
+ result.append(vm.ensureWatchpointSetForImpureProperty(ident));
+
+ if (additionalSet())
+ result.append(additionalSet());
+
+ m_state = Committed;
+
+ return result;
+}
+
+bool AccessCase::guardedByStructureCheck() const
+{
+ if (viaProxy())
+ return false;
+
+ switch (m_type) {
+ case ArrayLength:
+ case StringLength:
+ case DirectArgumentsLength:
+ case ScopedArgumentsLength:
+ case ModuleNamespaceLoad:
+ return false;
+ default:
+ return true;
+ }
+}
+
+bool AccessCase::doesCalls(Vector<JSCell*>* cellsToMark) const
+{
+ switch (type()) {
+ case Getter:
+ case Setter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case CustomValueSetter:
+ case CustomAccessorSetter:
+ return true;
+ case Transition:
+ if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
+ && structure()->couldHaveIndexingHeader()) {
+ if (cellsToMark)
+ cellsToMark->append(newStructure());
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool AccessCase::couldStillSucceed() const
+{
+ return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
+}
+
+bool AccessCase::canReplace(const AccessCase& other) const
+{
+ // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
+ // It's fine for this to return false if it's in doubt.
+
+ switch (type()) {
+ case ArrayLength:
+ case StringLength:
+ case DirectArgumentsLength:
+ case ScopedArgumentsLength:
+ return other.type() == type();
+ case ModuleNamespaceLoad: {
+ if (other.type() != type())
+ return false;
+ auto& thisCase = this->as<ModuleNamespaceAccessCase>();
+ auto& otherCase = this->as<ModuleNamespaceAccessCase>();
+ return thisCase.moduleNamespaceObject() == otherCase.moduleNamespaceObject();
+ }
+ default:
+ if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
+ return false;
+
+ return structure() == other.structure();
+ }
+}
+
+void AccessCase::dump(PrintStream& out) const
+{
+ out.print(m_type, ":(");
+
+ CommaPrinter comma;
+
+ out.print(comma, m_state);
+
+ if (m_type == Transition)
+ out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
+ else if (m_structure)
+ out.print(comma, "structure = ", pointerDump(m_structure.get()));
+
+ if (isValidOffset(m_offset))
+ out.print(comma, "offset = ", m_offset);
+ if (!m_conditionSet.isEmpty())
+ out.print(comma, "conditions = ", m_conditionSet);
+
+ dumpImpl(out, comma);
+ out.print(")");
+}
+
+bool AccessCase::visitWeak(VM& vm) const
+{
+ if (m_structure && !Heap::isMarked(m_structure.get()))
+ return false;
+ if (!m_conditionSet.areStillLive())
+ return false;
+ if (isAccessor()) {
+ auto& accessor = this->as<GetterSetterAccessCase>();
+ if (accessor.callLinkInfo())
+ accessor.callLinkInfo()->visitWeak(vm);
+ if (accessor.customSlotBase() && !Heap::isMarked(accessor.customSlotBase()))
+ return false;
+ } else if (type() == IntrinsicGetter) {
+ auto& intrinsic = this->as<IntrinsicGetterAccessCase>();
+ if (intrinsic.intrinsicFunction() && !Heap::isMarked(intrinsic.intrinsicFunction()))
+ return false;
+ } else if (type() == ModuleNamespaceLoad) {
+ auto& accessCase = this->as<ModuleNamespaceAccessCase>();
+ if (accessCase.moduleNamespaceObject() && !Heap::isMarked(accessCase.moduleNamespaceObject()))
+ return false;
+ if (accessCase.moduleEnvironment() && !Heap::isMarked(accessCase.moduleEnvironment()))
+ return false;
+ }
+
+ return true;
+}
+
+bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
+{
+ bool result = true;
+
+ if (m_structure)
+ result &= m_structure->markIfCheap(visitor);
+
+ switch (m_type) {
+ case Transition:
+ if (Heap::isMarkedConcurrently(m_structure->previousID()))
+ visitor.appendUnbarriered(m_structure.get());
+ else
+ result = false;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+void AccessCase::generateWithGuard(
+ AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ RELEASE_ASSERT(m_state == Committed);
+ m_state = Generated;
+
+ CCallHelpers& jit = *state.jit;
+ VM& vm = *jit.vm();
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg scratchGPR = state.scratchGPR;
+
+ UNUSED_PARAM(vm);
+
+ switch (m_type) {
+ case ArrayLength: {
+ ASSERT(!viaProxy());
+ jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeAndMiscOffset()), scratchGPR);
+ fallThrough.append(
+ jit.branchTest32(
+ CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
+ fallThrough.append(
+ jit.branchTest32(
+ CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
+ break;
+ }
+
+ case StringLength: {
+ ASSERT(!viaProxy());
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(StringType)));
+ break;
+ }
+
+ case DirectArgumentsLength: {
+ ASSERT(!viaProxy());
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(DirectArgumentsType)));
+
+ fallThrough.append(
+ jit.branchTestPtr(
+ CCallHelpers::NonZero,
+ CCallHelpers::Address(baseGPR, DirectArguments::offsetOfMappedArguments())));
+ jit.load32(
+ CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
+ valueRegs.payloadGPR());
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case ScopedArgumentsLength: {
+ ASSERT(!viaProxy());
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(ScopedArgumentsType)));
+
+ fallThrough.append(
+ jit.branchTest8(
+ CCallHelpers::NonZero,
+ CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
+ jit.load32(
+ CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
+ valueRegs.payloadGPR());
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case ModuleNamespaceLoad: {
+ this->as<ModuleNamespaceAccessCase>().emit(state, fallThrough);
+ return;
+ }
+
+ default: {
+ if (viaProxy()) {
+ fallThrough.append(
+ jit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(PureForwardingProxyType)));
+
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
+
+ fallThrough.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+ structure()));
+ } else {
+ fallThrough.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
+ structure()));
+ }
+ break;
+ } };
+
+ generateImpl(state);
+}
+
+void AccessCase::generate(AccessGenerationState& state)
+{
+ RELEASE_ASSERT(m_state == Committed);
+ m_state = Generated;
+
+ generateImpl(state);
+}
+
+void AccessCase::generateImpl(AccessGenerationState& state)
+{
+ SuperSamplerScope superSamplerScope(false);
+ if (verbose)
+ dataLog("\n\nGenerating code for: ", *this, "\n");
+
+ ASSERT(m_state == Generated); // We rely on the callers setting this for us.
+
+ CCallHelpers& jit = *state.jit;
+ VM& vm = *jit.vm();
+ CodeBlock* codeBlock = jit.codeBlock();
+ StructureStubInfo& stubInfo = *state.stubInfo;
+ const Identifier& ident = *state.ident;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg scratchGPR = state.scratchGPR;
+
+ ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
+
+ for (const ObjectPropertyCondition& condition : m_conditionSet) {
+ Structure* structure = condition.object()->structure();
+
+ if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
+ structure->addTransitionWatchpoint(state.addWatchpoint(condition));
+ continue;
+ }
+
+ if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
+ // The reason why this cannot happen is that we require that PolymorphicAccess calls
+ // AccessCase::generate() only after it has verified that
+ // AccessCase::couldStillSucceed() returned true.
+
+ dataLog("This condition is no longer met: ", condition, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ // We will emit code that has a weak reference that isn't otherwise listed anywhere.
+ state.weakReferences.append(WriteBarrier<JSCell>(vm, codeBlock, structure));
+
+ jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
+ state.failAndRepatch.append(
+ jit.branchStructure(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+ structure));
+ }
+
+ switch (m_type) {
+ case InHit:
+ case InMiss:
+ jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
+ state.succeed();
+ return;
+
+ case Miss:
+ jit.moveTrustedValue(jsUndefined(), valueRegs);
+ state.succeed();
+ return;
+
+ case Load:
+ case GetGetter:
+ case Getter:
+ case Setter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ case CustomValueSetter:
+ case CustomAccessorSetter: {
+ GPRReg valueRegsPayloadGPR = valueRegs.payloadGPR();
+
+ if (isValidOffset(m_offset)) {
+ Structure* currStructure;
+ if (m_conditionSet.isEmpty())
+ currStructure = structure();
+ else
+ currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+ currStructure->startWatchingPropertyForReplacements(vm, offset());
+ }
+
+ GPRReg baseForGetGPR;
+ if (viaProxy()) {
+ ASSERT(m_type != CustomValueSetter || m_type != CustomAccessorSetter); // Because setters need to not trash valueRegsPayloadGPR.
+ if (m_type == Getter || m_type == Setter)
+ baseForGetGPR = scratchGPR;
+ else
+ baseForGetGPR = valueRegsPayloadGPR;
+
+ ASSERT((m_type != Getter && m_type != Setter) || baseForGetGPR != baseGPR);
+ ASSERT(m_type != Setter || baseForGetGPR != valueRegsPayloadGPR);
+
+ jit.loadPtr(
+ CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
+ baseForGetGPR);
+ } else
+ baseForGetGPR = baseGPR;
+
+ GPRReg baseForAccessGPR;
+ if (!m_conditionSet.isEmpty()) {
+ jit.move(
+ CCallHelpers::TrustedImmPtr(alternateBase()),
+ scratchGPR);
+ baseForAccessGPR = scratchGPR;
+ } else
+ baseForAccessGPR = baseForGetGPR;
+
+ GPRReg loadedValueGPR = InvalidGPRReg;
+ if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
+ if (m_type == Load || m_type == GetGetter)
+ loadedValueGPR = valueRegsPayloadGPR;
+ else
+ loadedValueGPR = scratchGPR;
+
+ ASSERT((m_type != Getter && m_type != Setter) || loadedValueGPR != baseGPR);
+ ASSERT(m_type != Setter || loadedValueGPR != valueRegsPayloadGPR);
+
+ GPRReg storageGPR;
+ if (isInlineOffset(m_offset))
+ storageGPR = baseForAccessGPR;
+ else {
+ jit.loadPtr(
+ CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
+ loadedValueGPR);
+ storageGPR = loadedValueGPR;
+ }
+
+#if USE(JSVALUE64)
+ jit.load64(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
+#else
+ if (m_type == Load || m_type == GetGetter) {
+ jit.load32(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
+ valueRegs.tagGPR());
+ }
+ jit.load32(
+ CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
+ loadedValueGPR);
+#endif
+ }
+
+ if (m_type == Load || m_type == GetGetter) {
+ state.succeed();
+ return;
+ }
+
+ if (Options::useDOMJIT() && m_type == CustomAccessorGetter && this->as<GetterSetterAccessCase>().domJIT()) {
+ auto& access = this->as<GetterSetterAccessCase>();
+ // We do not need to emit CheckDOM operation since structure check ensures
+ // that the structure of the given base value is structure()! So all we should
+ // do is performing the CheckDOM thingy in IC compiling time here.
+ if (structure()->classInfo()->isSubClassOf(access.domJIT()->thisClassInfo())) {
+ access.emitDOMJITGetter(state, baseForGetGPR);
+ return;
+ }
+ }
+
+ // Stuff for custom getters/setters.
+ CCallHelpers::Call operationCall;
+
+ // Stuff for JS getters/setters.
+ CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
+ CCallHelpers::Call fastPathCall;
+ CCallHelpers::Call slowPathCall;
+
+ // This also does the necessary calculations of whether or not we're an
+ // exception handling call site.
+ AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
+
+ auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
+ RegisterSet dontRestore;
+ if (callHasReturnValue) {
+ // This is the result value. We don't want to overwrite the result with what we stored to the stack.
+ // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
+ dontRestore.set(valueRegs);
+ }
+ state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+ };
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
+
+ if (m_type == Getter || m_type == Setter) {
+ auto& access = this->as<GetterSetterAccessCase>();
+ ASSERT(baseGPR != loadedValueGPR);
+ ASSERT(m_type != Setter || (baseGPR != valueRegsPayloadGPR && loadedValueGPR != valueRegsPayloadGPR));
+
+ // Create a JS call using a JS call inline cache. Assume that:
+ //
+ // - SP is aligned and represents the extent of the calling compiler's stack usage.
+ //
+ // - FP is set correctly (i.e. it points to the caller's call frame header).
+ //
+ // - SP - FP is an aligned difference.
+ //
+ // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
+ // code.
+ //
+ // Therefore, we temporarily grow the stack for the purpose of the call and then
+ // shrink it after.
+
+ state.setSpillStateForJSGetterSetter(spillState);
+
+ RELEASE_ASSERT(!access.callLinkInfo());
+ access.m_callLinkInfo = std::make_unique<CallLinkInfo>();
+
+ // FIXME: If we generated a polymorphic call stub that jumped back to the getter
+ // stub, which then jumped back to the main code, then we'd have a reachability
+ // situation that the GC doesn't know about. The GC would ensure that the polymorphic
+ // call stub stayed alive, and it would ensure that the main code stayed alive, but
+ // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
+ // be GC objects, and then we'd be able to say that the polymorphic call stub has a
+ // reference to the getter stub.
+ // https://bugs.webkit.org/show_bug.cgi?id=148914
+ access.callLinkInfo()->disallowStubs();
+
+ access.callLinkInfo()->setUpCall(
+ CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
+
+ CCallHelpers::JumpList done;
+
+ // There is a "this" argument.
+ unsigned numberOfParameters = 1;
+ // ... and a value argument if we're calling a setter.
+ if (m_type == Setter)
+ numberOfParameters++;
+
+ // Get the accessor; if there ain't one then the result is jsUndefined().
+ if (m_type == Setter) {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
+ loadedValueGPR);
+ } else {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
+ loadedValueGPR);
+ }
+
+ CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
+ CCallHelpers::Zero, loadedValueGPR);
+
+ unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
+ unsigned numberOfBytesForCall = numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
+
+ unsigned alignedNumberOfBytesForCall =
+ WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
+
+ jit.subPtr(
+ CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
+ CCallHelpers::stackPointerRegister);
+
+ CCallHelpers::Address calleeFrame = CCallHelpers::Address(
+ CCallHelpers::stackPointerRegister,
+ -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(numberOfParameters),
+ calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
+
+ jit.storeCell(
+ loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
+
+ jit.storeCell(
+ baseGPR,
+ calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
+
+ if (m_type == Setter) {
+ jit.storeValue(
+ valueRegs,
+ calleeFrame.withOffset(
+ virtualRegisterForArgument(1).offset() * sizeof(Register)));
+ }
+
+ CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ fastPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
+
+ slowCase.link(&jit);
+ jit.move(loadedValueGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+ // We *always* know that the getter/setter, if non-null, is a cell.
+ jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+ jit.move(CCallHelpers::TrustedImmPtr(access.callLinkInfo()), GPRInfo::regT2);
+ slowPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
+
+ returnUndefined.link(&jit);
+ if (m_type == Getter)
+ jit.moveTrustedValue(jsUndefined(), valueRegs);
+
+ done.link(&jit);
+
+ jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ bool callHasReturnValue = isGetter();
+ restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+
+ jit.addLinkTask([=, &vm] (LinkBuffer& linkBuffer) {
+ this->as<GetterSetterAccessCase>().callLinkInfo()->setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(slowPathCall)),
+ CodeLocationLabel(linkBuffer.locationOf(addressOfLinkFunctionCheck)),
+ linkBuffer.locationOfNearCall(fastPathCall));
+
+ linkBuffer.link(
+ slowPathCall,
+ CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
+ });
+ } else {
+ ASSERT(m_type == CustomValueGetter || m_type == CustomAccessorGetter || m_type == CustomValueSetter || m_type == CustomAccessorSetter);
+
+ // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
+ // hard to track if someone did spillage or not, so we just assume that we always need
+ // to make some space here.
+ jit.makeSpaceOnStackForCCall();
+
+ // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
+ // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
+ // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
+ // FIXME: Remove this differences in custom values and custom accessors.
+ // https://bugs.webkit.org/show_bug.cgi?id=158014
+ GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
+#if USE(JSVALUE64)
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+ jit.setupArgumentsWithExecState(
+ baseForCustomValue,
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else
+ jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
+#else
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+ jit.setupArgumentsWithExecState(
+ EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else {
+ jit.setupArgumentsWithExecState(
+ EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ valueRegs.payloadGPR(), valueRegs.tagGPR());
+ }
+#endif
+ jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
+
+ operationCall = jit.call();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(operationCall, FunctionPtr(this->as<GetterSetterAccessCase>().m_customAccessor.opaque));
+ });
+
+ if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
+ jit.setupResults(valueRegs);
+ jit.reclaimSpaceOnStackForCCall();
+
+ CCallHelpers::Jump noException =
+ jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+ state.emitExplicitExceptionHandler();
+
+ noException.link(&jit);
+ bool callHasReturnValue = isGetter();
+ restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+ }
+ state.succeed();
+ return;
+ }
+
+ case Replace: {
+ if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
+ if (verbose)
+ dataLog("Have type: ", type->descriptor(), "\n");
+ state.failAndRepatch.append(
+ jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+ } else if (verbose)
+ dataLog("Don't have type.\n");
+
+ if (isInlineOffset(m_offset)) {
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ baseGPR,
+ JSObject::offsetOfInlineStorage() +
+ offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+ } else {
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+ }
+ state.succeed();
+ return;
+ }
+
+ case Transition: {
+ // AccessCase::transition() should have returned null if this wasn't true.
+ RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
+
+ if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
+ if (verbose)
+ dataLog("Have type: ", type->descriptor(), "\n");
+ state.failAndRepatch.append(
+ jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+ } else if (verbose)
+ dataLog("Don't have type.\n");
+
+ // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
+ // exactly when this would make calls.
+ bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
+ bool reallocating = allocating && structure()->outOfLineCapacity();
+ bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+#endif
+ allocator.lock(valueRegs);
+ allocator.lock(scratchGPR);
+
+ GPRReg scratchGPR2 = InvalidGPRReg;
+ GPRReg scratchGPR3 = InvalidGPRReg;
+ if (allocatingInline) {
+ scratchGPR2 = allocator.allocateScratchGPR();
+ scratchGPR3 = allocator.allocateScratchGPR();
+ }
+
+ ScratchRegisterAllocator::PreservedState preservedState =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+ CCallHelpers::JumpList slowPath;
+
+ ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
+
+ if (allocating) {
+ size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
+
+ if (allocatingInline) {
+ MarkedAllocator* allocator = vm.auxiliarySpace.allocatorFor(newSize);
+
+ if (!allocator) {
+ // Yuck, this case would suck!
+ slowPath.append(jit.jump());
+ }
+
+ jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
+ jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
+ jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
+
+ size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
+
+ if (reallocating) {
+ // Handle the case where we are reallocating (i.e. the old structure/butterfly
+ // already had out-of-line property storage).
+
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+
+ // We have scratchGPR = new storage, scratchGPR3 = old storage,
+ // scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+ jit.loadPtr(
+ CCallHelpers::Address(
+ scratchGPR3,
+ -static_cast<ptrdiff_t>(
+ offset + sizeof(JSValue) + sizeof(void*))),
+ scratchGPR2);
+ jit.storePtr(
+ scratchGPR2,
+ CCallHelpers::Address(
+ scratchGPR,
+ -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
+ }
+ }
+
+ for (size_t offset = oldSize; offset < newSize; offset += sizeof(void*))
+ jit.storePtr(CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address(scratchGPR, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
+ } else {
+ // Handle the case where we are allocating out-of-line using an operation.
+ RegisterSet extraRegistersToPreserve;
+ extraRegistersToPreserve.set(baseGPR);
+ extraRegistersToPreserve.set(valueRegs);
+ AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(
+ state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
+
+ jit.makeSpaceOnStackForCCall();
+
+ if (!reallocating) {
+ jit.setupArgumentsWithExecState(baseGPR);
+
+ CCallHelpers::Call operationCall = jit.call();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(
+ operationCall,
+ FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
+ });
+ } else {
+ // Handle the case where we are reallocating (i.e. the old structure/butterfly
+ // already had out-of-line property storage).
+ jit.setupArgumentsWithExecState(
+ baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
+
+ CCallHelpers::Call operationCall = jit.call();
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(
+ operationCall,
+ FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
+ });
+ }
+
+ jit.reclaimSpaceOnStackForCCall();
+ jit.move(GPRInfo::returnValueGPR, scratchGPR);
+
+ CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+ state.emitExplicitExceptionHandler();
+
+ noException.link(&jit);
+ state.restoreLiveRegistersFromStackForCall(spillState);
+ }
+ }
+
+ if (isInlineOffset(m_offset)) {
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(
+ baseGPR,
+ JSObject::offsetOfInlineStorage() +
+ offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+ } else {
+ if (!allocating)
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.storeValue(
+ valueRegs,
+ CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+ }
+
+ if (allocatingInline) {
+ // We set the new butterfly and the structure last. Doing it this way ensures that
+ // whatever we had done up to this point is forgotten if we choose to branch to slow
+ // path.
+ jit.nukeStructureAndStoreButterfly(scratchGPR, baseGPR);
+ }
+
+ uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
+ jit.store32(
+ CCallHelpers::TrustedImm32(structureBits),
+ CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
+
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ state.succeed();
+
+ // We will have a slow path if we were allocating without the help of an operation.
+ if (allocatingInline) {
+ if (allocator.didReuseRegisters()) {
+ slowPath.link(&jit);
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ state.failAndIgnore.append(jit.jump());
+ } else
+ state.failAndIgnore.append(slowPath);
+ } else
+ RELEASE_ASSERT(slowPath.empty());
+ return;
+ }
+
+ case ArrayLength: {
+ jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+ state.failAndIgnore.append(
+ jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
+ jit.boxInt32(scratchGPR, valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case StringLength: {
+ jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case IntrinsicGetter: {
+ RELEASE_ASSERT(isValidOffset(offset()));
+
+ // We need to ensure the getter value does not move from under us. Note that GetterSetters
+ // are immutable so we just need to watch the property not any value inside it.
+ Structure* currStructure;
+ if (m_conditionSet.isEmpty())
+ currStructure = structure();
+ else
+ currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+ currStructure->startWatchingPropertyForReplacements(vm, offset());
+
+ this->as<IntrinsicGetterAccessCase>().emitIntrinsicGetter(state);
+ return;
+ }
+
+ case DirectArgumentsLength:
+ case ScopedArgumentsLength:
+ case ModuleNamespaceLoad:
+ // These need to be handled by generateWithGuard(), since the guard is part of the
+ // algorithm. We can be sure that nobody will call generate() directly for these since they
+ // are not guarded by structure checks.
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/AccessCase.h b/Source/JavaScriptCore/bytecode/AccessCase.h
new file mode 100644
index 000000000..9f8a20063
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AccessCase.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JSFunctionInlines.h"
+#include "ObjectPropertyConditionSet.h"
+
+namespace JSC {
+
+struct AccessGenerationState;
+
+// An AccessCase describes one of the cases of a PolymorphicAccess. A PolymorphicAccess represents a
+// planned (to generate in future) or generated stub for some inline cache. That stub contains fast
+// path code for some finite number of fast cases, each described by an AccessCase object.
+//
+// An AccessCase object has a lifecycle that proceeds through several states. Note that the states
+// of AccessCase have a lot to do with the global effect epoch (we'll say epoch for short). This is
+// a simple way of reasoning about the state of the system outside this AccessCase. Any observable
+// effect - like storing to a property, changing an object's structure, etc. - increments the epoch.
+// The states are:
+//
+// Primordial: This is an AccessCase that was just allocated. It does not correspond to any actual
+// code and it is not owned by any PolymorphicAccess. In this state, the AccessCase
+// assumes that it is in the same epoch as when it was created. This is important
+// because it may make claims about itself ("I represent a valid case so long as you
+// register a watchpoint on this set") that could be contradicted by some outside
+// effects (like firing and deleting the watchpoint set in question). This is also the
+// state that an AccessCase is in when it is cloned (AccessCase::clone()).
+//
+// Committed: This happens as soon as some PolymorphicAccess takes ownership of this AccessCase.
+// In this state, the AccessCase no longer assumes anything about the epoch. To
+// accomplish this, PolymorphicAccess calls AccessCase::commit(). This must be done
+// during the same epoch when the AccessCase was created, either by the client or by
+// clone(). When created by the client, committing during the same epoch works because
+// we can be sure that whatever watchpoint sets they spoke of are still valid. When
+// created by clone(), we can be sure that the set is still valid because the original
+// of the clone still has watchpoints on it.
+//
+// Generated: This is the state when the PolymorphicAccess generates code for this case by
+// calling AccessCase::generate() or AccessCase::generateWithGuard(). At this point
+// the case object will have some extra stuff in it, like possibly the CallLinkInfo
+// object associated with the inline cache.
+// FIXME: Moving into the Generated state should not mutate the AccessCase object or
+// put more stuff into it. If we fix this, then we can get rid of AccessCase::clone().
+// https://bugs.webkit.org/show_bug.cgi?id=156456
+//
+// An AccessCase may be destroyed while in any of these states.
+//
+// We will sometimes buffer committed AccessCases in the PolymorphicAccess object before generating
+// code. This allows us to only regenerate once we've accumulated (hopefully) more than one new
+// AccessCase.
+class AccessCase {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum AccessType : uint8_t {
+ Load,
+ Transition,
+ Replace,
+ Miss,
+ GetGetter,
+ Getter,
+ Setter,
+ CustomValueGetter,
+ CustomAccessorGetter,
+ CustomValueSetter,
+ CustomAccessorSetter,
+ IntrinsicGetter,
+ InHit,
+ InMiss,
+ ArrayLength,
+ StringLength,
+ DirectArgumentsLength,
+ ScopedArgumentsLength,
+ ModuleNamespaceLoad,
+ };
+
+ enum State : uint8_t {
+ Primordial,
+ Committed,
+ Generated
+ };
+
+ template<typename T>
+ T& as() { return *static_cast<T*>(this); }
+
+ template<typename T>
+ const T& as() const { return *static_cast<const T*>(this); }
+
+
+ template<typename AccessCaseType, typename... Arguments>
+ static std::unique_ptr<AccessCaseType> create(Arguments... arguments)
+ {
+ return std::unique_ptr<AccessCaseType>(new AccessCaseType(arguments...));
+ }
+
+ static std::unique_ptr<AccessCase> create(VM&, JSCell* owner, AccessType, PropertyOffset = invalidOffset,
+ Structure* = nullptr, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+ // This create method should be used for transitions.
+ static std::unique_ptr<AccessCase> create(VM&, JSCell* owner, PropertyOffset, Structure* oldStructure,
+ Structure* newStructure, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+ static std::unique_ptr<AccessCase> fromStructureStubInfo(VM&, JSCell* owner, StructureStubInfo&);
+
+ AccessType type() const { return m_type; }
+ State state() const { return m_state; }
+ PropertyOffset offset() const { return m_offset; }
+
+ Structure* structure() const
+ {
+ if (m_type == Transition)
+ return m_structure->previousID();
+ return m_structure.get();
+ }
+ bool guardedByStructureCheck() const;
+
+ Structure* newStructure() const
+ {
+ ASSERT(m_type == Transition);
+ return m_structure.get();
+ }
+
+ ObjectPropertyConditionSet conditionSet() const { return m_conditionSet; }
+
+ virtual JSObject* alternateBase() const { return conditionSet().slotBaseCondition().object(); }
+ virtual WatchpointSet* additionalSet() const { return nullptr; }
+ virtual bool viaProxy() const { return false; }
+
+ // If you supply the optional vector, this will append the set of cells that this will need to keep alive
+ // past the call.
+ bool doesCalls(Vector<JSCell*>* cellsToMark = nullptr) const;
+
+ bool isGetter() const
+ {
+ switch (type()) {
+ case Getter:
+ case CustomValueGetter:
+ case CustomAccessorGetter:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isAccessor() const { return isGetter() || type() == Setter; }
+
+ // Is it still possible for this case to ever be taken? Must call this as a prerequisite for
+ // calling generate() and friends. If this returns true, then you can call generate(). If
+ // this returns false, then generate() will crash. You must call generate() in the same epoch
+ // as when you called couldStillSucceed().
+ bool couldStillSucceed() const;
+
+ // If this method returns true, then it's a good idea to remove 'other' from the access once 'this'
+ // is added. This method assumes that in case of contradictions, 'this' represents a newer, and so
+ // more useful, truth. This method can be conservative; it will return false when it doubt.
+ bool canReplace(const AccessCase& other) const;
+
+ void dump(PrintStream& out) const;
+ virtual void dumpImpl(PrintStream&, CommaPrinter&) const { }
+
+ virtual ~AccessCase();
+
+protected:
+ AccessCase(VM&, JSCell* owner, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&);
+ AccessCase(const AccessCase&) = default;
+ AccessCase& operator=(const AccessCase&) = delete;
+ void resetState() { m_state = Primordial; }
+
+private:
+ friend class CodeBlock;
+ friend class PolymorphicAccess;
+
+ bool visitWeak(VM&) const;
+ bool propagateTransitions(SlotVisitor&) const;
+
+ // FIXME: This only exists because of how AccessCase puts post-generation things into itself.
+ // https://bugs.webkit.org/show_bug.cgi?id=156456
+ virtual std::unique_ptr<AccessCase> clone() const;
+
+ // Perform any action that must be performed before the end of the epoch in which the case
+ // was created. Returns a set of watchpoint sets that will need to be watched.
+ Vector<WatchpointSet*, 2> commit(VM&, const Identifier&);
+
+ // Fall through on success. Two kinds of failures are supported: fall-through, which means that we
+ // should try a different case; and failure, which means that this was the right case but it needs
+ // help from the slow path.
+ void generateWithGuard(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+ // Fall through on success, add a jump to the failure list on failure.
+ void generate(AccessGenerationState&);
+
+ void generateImpl(AccessGenerationState&);
+
+ AccessType m_type;
+ State m_state { Primordial };
+ PropertyOffset m_offset;
+
+ // Usually this is the structure that we expect the base object to have. But, this is the *new*
+ // structure for a transition and we rely on the fact that it has a strong reference to the old
+ // structure. For proxies, this is the structure of the object behind the proxy.
+ WriteBarrier<Structure> m_structure;
+
+ ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
new file mode 100644
index 000000000..2fd703150
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AdaptiveInferredPropertyValueWatchpointBase.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key)
+ : m_key(key)
+{
+ RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::install()
+{
+ RELEASE_ASSERT(m_key.isWatchable());
+
+ m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint);
+
+ PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid());
+ WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset);
+ set->add(&m_propertyWatchpoint);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::fire(const FireDetail& detail)
+{
+ // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are
+ // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints
+ // wherever from scratch.
+ if (m_structureWatchpoint.isOnList())
+ m_structureWatchpoint.remove();
+ if (m_propertyWatchpoint.isOnList())
+ m_propertyWatchpoint.remove();
+
+ if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+ install();
+ return;
+ }
+
+ handleFire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::StructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+ ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_structureWatchpoint);
+
+ AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset);
+
+ parent->fire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::PropertyWatchpoint::fireInternal(const FireDetail& detail)
+{
+ ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_propertyWatchpoint);
+
+ AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast<AdaptiveInferredPropertyValueWatchpointBase*>(bitwise_cast<char*>(this) - myOffset);
+
+ parent->fire(detail);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
new file mode 100644
index 000000000..410a93fc9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class AdaptiveInferredPropertyValueWatchpointBase {
+ WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase);
+ WTF_MAKE_FAST_ALLOCATED;
+
+public:
+ AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&);
+
+ const ObjectPropertyCondition& key() const { return m_key; }
+
+ void install();
+
+ virtual ~AdaptiveInferredPropertyValueWatchpointBase() = default;
+
+protected:
+ virtual void handleFire(const FireDetail&) = 0;
+
+private:
+ class StructureWatchpoint : public Watchpoint {
+ public:
+ StructureWatchpoint() { }
+ protected:
+ void fireInternal(const FireDetail&) override;
+ };
+ class PropertyWatchpoint : public Watchpoint {
+ public:
+ PropertyWatchpoint() { }
+ protected:
+ void fireInternal(const FireDetail&) override;
+ };
+
+ void fire(const FireDetail&);
+
+ ObjectPropertyCondition m_key;
+ StructureWatchpoint m_structureWatchpoint;
+ PropertyWatchpoint m_propertyWatchpoint;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.cpp b/Source/JavaScriptCore/bytecode/ArithProfile.cpp
new file mode 100644
index 000000000..1fa7c7989
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArithProfile.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ArithProfile.h"
+
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
+{
+ if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber())
+ return;
+
+ CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode);
+ CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode);
+ emitSetDouble(jit);
+ CCallHelpers::Jump done = jit.jump();
+ notDouble.link(&jit);
+ emitSetNonNumber(jit);
+ done.link(&jit);
+ isInt32.link(&jit);
+}
+
+bool ArithProfile::shouldEmitSetDouble() const
+{
+ uint32_t mask = ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble;
+ return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetDouble(CCallHelpers& jit) const
+{
+ if (shouldEmitSetDouble())
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+
+bool ArithProfile::shouldEmitSetNonNumber() const
+{
+ uint32_t mask = ArithProfile::NonNumber;
+ return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetNonNumber(CCallHelpers& jit) const
+{
+ if (shouldEmitSetNonNumber())
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNumber), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, const ArithProfile& profile)
+{
+ const char* separator = "";
+
+ out.print("Result:<");
+ if (!profile.didObserveNonInt32()) {
+ out.print("Int32");
+ separator = "|";
+ } else {
+ if (profile.didObserveNegZeroDouble()) {
+ out.print(separator, "NegZeroDouble");
+ separator = "|";
+ }
+ if (profile.didObserveNonNegZeroDouble()) {
+ out.print(separator, "NonNegZeroDouble");
+ separator = "|";
+ }
+ if (profile.didObserveNonNumber()) {
+ out.print(separator, "NonNumber");
+ separator = "|";
+ }
+ if (profile.didObserveInt32Overflow()) {
+ out.print(separator, "Int32Overflow");
+ separator = "|";
+ }
+ if (profile.didObserveInt52Overflow()) {
+ out.print(separator, "Int52Overflow");
+ separator = "|";
+ }
+ }
+ if (profile.tookSpecialFastPath())
+ out.print(separator, "Took special fast path.");
+ out.print(">");
+
+ out.print(" LHS ObservedType:<");
+ out.print(profile.lhsObservedType());
+ out.print("> RHS ObservedType:<");
+ out.print(profile.rhsObservedType());
+ out.print(">");
+
+ out.print(" LHS ResultType:<", RawPointer(bitwise_cast<void*>(static_cast<uintptr_t>(profile.lhsResultType().bits()))));
+ out.print("> RHS ResultType:<", RawPointer(bitwise_cast<void*>(static_cast<uintptr_t>(profile.rhsResultType().bits()))));
+ out.print(">");
+}
+
+void printInternal(PrintStream& out, const JSC::ObservedType& observedType)
+{
+ const char* separator = "";
+ if (observedType.sawInt32()) {
+ out.print(separator, "Int32");
+ separator = "|";
+ }
+ if (observedType.sawNumber()) {
+ out.print(separator, "Number");
+ separator = "|";
+ }
+ if (observedType.sawNonNumber()) {
+ out.print(separator, "NonNumber");
+ separator = "|";
+ }
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.h b/Source/JavaScriptCore/bytecode/ArithProfile.h
new file mode 100644
index 000000000..40fad1be3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ArithProfile.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "ResultType.h"
+#include "TagRegistersMode.h"
+
+namespace JSC {
+
+class CCallHelpers;
+
+struct ObservedType {
+ ObservedType(uint8_t bits = TypeEmpty)
+ : m_bits(bits)
+ { }
+
+ bool sawInt32() const { return m_bits & TypeInt32; }
+ bool isOnlyInt32() const { return m_bits == TypeInt32; }
+ bool sawNumber() const { return m_bits & TypeNumber; }
+ bool isOnlyNumber() const { return m_bits == TypeNumber; }
+ bool sawNonNumber() const { return m_bits & TypeNonNumber; }
+ bool isOnlyNonNumber() const { return m_bits == TypeNonNumber; }
+ bool isEmpty() const { return !m_bits; }
+ uint8_t bits() const { return m_bits; }
+
+ ObservedType withInt32() const { return ObservedType(m_bits | TypeInt32); }
+ ObservedType withNumber() const { return ObservedType(m_bits | TypeNumber); }
+ ObservedType withNonNumber() const { return ObservedType(m_bits | TypeNonNumber); }
+ ObservedType withoutNonNumber() const { return ObservedType(m_bits & ~TypeNonNumber); }
+
+ bool operator==(const ObservedType& other) const { return m_bits == other.m_bits; }
+
+ static const uint8_t TypeEmpty = 0x0;
+ static const uint8_t TypeInt32 = 0x1;
+ static const uint8_t TypeNumber = 0x02;
+ static const uint8_t TypeNonNumber = 0x04;
+
+ static const uint32_t numBitsNeeded = 3;
+
+private:
+ uint8_t m_bits;
+};
+
+struct ArithProfile {
+private:
+ static const uint32_t numberOfFlagBits = 5;
+ static const uint32_t rhsResultTypeShift = numberOfFlagBits;
+ static const uint32_t lhsResultTypeShift = rhsResultTypeShift + ResultType::numBitsNeeded;
+ static const uint32_t rhsObservedTypeShift = lhsResultTypeShift + ResultType::numBitsNeeded;
+ static const uint32_t lhsObservedTypeShift = rhsObservedTypeShift + ObservedType::numBitsNeeded;
+
+ static_assert(ObservedType::numBitsNeeded == 3, "We make a hard assumption about that here.");
+ static const uint32_t clearRhsObservedTypeBitMask = static_cast<uint32_t>(~((1 << rhsObservedTypeShift) | (1 << (rhsObservedTypeShift + 1)) | (1 << (rhsObservedTypeShift + 2))));
+ static const uint32_t clearLhsObservedTypeBitMask = static_cast<uint32_t>(~((1 << lhsObservedTypeShift) | (1 << (lhsObservedTypeShift + 1)) | (1 << (lhsObservedTypeShift + 2))));
+
+ static const uint32_t resultTypeMask = (1 << ResultType::numBitsNeeded) - 1;
+ static const uint32_t observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
+public:
+ static const uint32_t specialFastPathBit = 1 << (lhsObservedTypeShift + ObservedType::numBitsNeeded);
+ static_assert((lhsObservedTypeShift + ObservedType::numBitsNeeded) <= (sizeof(uint32_t) * 8) - 1, "Should fit in a uint32_t.");
+ static_assert(!(specialFastPathBit & ~clearLhsObservedTypeBitMask), "These bits should not intersect.");
+ static_assert(specialFastPathBit & clearLhsObservedTypeBitMask, "These bits should intersect.");
+ static_assert(specialFastPathBit > ~clearLhsObservedTypeBitMask, "These bits should not intersect and specialFastPathBit should be a higher bit.");
+
+ ArithProfile(ResultType arg)
+ {
+ m_bits = (arg.bits() << lhsResultTypeShift);
+ ASSERT(lhsResultType().bits() == arg.bits());
+ ASSERT(lhsObservedType().isEmpty());
+ ASSERT(rhsObservedType().isEmpty());
+ }
+
+ ArithProfile(ResultType lhs, ResultType rhs)
+ {
+ m_bits = (lhs.bits() << lhsResultTypeShift) | (rhs.bits() << rhsResultTypeShift);
+ ASSERT(lhsResultType().bits() == lhs.bits() && rhsResultType().bits() == rhs.bits());
+ ASSERT(lhsObservedType().isEmpty());
+ ASSERT(rhsObservedType().isEmpty());
+ }
+ ArithProfile() = default;
+
+ static ArithProfile fromInt(uint32_t bits)
+ {
+ ArithProfile result;
+ result.m_bits = bits;
+ return result;
+ }
+
+ enum ObservedResults {
+ NonNegZeroDouble = 1 << 0,
+ NegZeroDouble = 1 << 1,
+ NonNumber = 1 << 2,
+ Int32Overflow = 1 << 3,
+ Int52Overflow = 1 << 4,
+ };
+
+ ResultType lhsResultType() const { return ResultType((m_bits >> lhsResultTypeShift) & resultTypeMask); }
+ ResultType rhsResultType() const { return ResultType((m_bits >> rhsResultTypeShift) & resultTypeMask); }
+
+ ObservedType lhsObservedType() const { return ObservedType((m_bits >> lhsObservedTypeShift) & observedTypeMask); }
+ ObservedType rhsObservedType() const { return ObservedType((m_bits >> rhsObservedTypeShift) & observedTypeMask); }
+ void setLhsObservedType(ObservedType type)
+ {
+ uint32_t bits = m_bits;
+ bits &= clearLhsObservedTypeBitMask;
+ bits |= type.bits() << lhsObservedTypeShift;
+ m_bits = bits;
+ ASSERT(lhsObservedType() == type);
+ }
+
+ void setRhsObservedType(ObservedType type)
+ {
+ uint32_t bits = m_bits;
+ bits &= clearRhsObservedTypeBitMask;
+ bits |= type.bits() << rhsObservedTypeShift;
+ m_bits = bits;
+ ASSERT(rhsObservedType() == type);
+ }
+
+ bool tookSpecialFastPath() const { return m_bits & specialFastPathBit; }
+
+ bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumber); }
+ bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
+ bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
+ bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
+ bool didObserveNonNumber() const { return hasBits(NonNumber); }
+ bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
+ bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
+
+ void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
+ void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
+ void setObservedNonNumber() { setBit(NonNumber); }
+ void setObservedInt32Overflow() { setBit(Int32Overflow); }
+ void setObservedInt52Overflow() { setBit(Int52Overflow); }
+
+ const void* addressOfBits() const { return &m_bits; }
+
+ void observeResult(JSValue value)
+ {
+ if (value.isInt32())
+ return;
+ if (value.isNumber()) {
+ m_bits |= Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble;
+ return;
+ }
+ m_bits |= NonNumber;
+ }
+
+ void lhsSawInt32() { setLhsObservedType(lhsObservedType().withInt32()); }
+ void lhsSawNumber() { setLhsObservedType(lhsObservedType().withNumber()); }
+ void lhsSawNonNumber() { setLhsObservedType(lhsObservedType().withNonNumber()); }
+ void rhsSawInt32() { setRhsObservedType(rhsObservedType().withInt32()); }
+ void rhsSawNumber() { setRhsObservedType(rhsObservedType().withNumber()); }
+ void rhsSawNonNumber() { setRhsObservedType(rhsObservedType().withNonNumber()); }
+
+ void observeLHS(JSValue lhs)
+ {
+ ArithProfile newProfile = *this;
+ if (lhs.isNumber()) {
+ if (lhs.isInt32())
+ newProfile.lhsSawInt32();
+ else
+ newProfile.lhsSawNumber();
+ } else
+ newProfile.lhsSawNonNumber();
+
+ m_bits = newProfile.bits();
+ }
+
+ void observeLHSAndRHS(JSValue lhs, JSValue rhs)
+ {
+ observeLHS(lhs);
+
+ ArithProfile newProfile = *this;
+ if (rhs.isNumber()) {
+ if (rhs.isInt32())
+ newProfile.rhsSawInt32();
+ else
+ newProfile.rhsSawNumber();
+ } else
+ newProfile.rhsSawNonNumber();
+
+ m_bits = newProfile.bits();
+ }
+
+#if ENABLE(JIT)
+ // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble) if it sees a
+ // double. Sets NonNumber if it sees a non-number.
+ void emitObserveResult(CCallHelpers&, JSValueRegs, TagRegistersMode = HaveTagRegisters);
+
+ // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble).
+ bool shouldEmitSetDouble() const;
+ void emitSetDouble(CCallHelpers&) const;
+
+ // Sets NonNumber.
+ void emitSetNonNumber(CCallHelpers&) const;
+ bool shouldEmitSetNonNumber() const;
+#endif // ENABLE(JIT)
+
+ uint32_t bits() const { return m_bits; }
+
+private:
+ bool hasBits(int mask) const { return m_bits & mask; }
+ void setBit(int mask) { m_bits |= mask; }
+
+ uint32_t m_bits { 0 }; // We take care to update m_bits only in a single operation. We don't ever store an inconsistent bit representation to it.
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, const JSC::ArithProfile&);
+void printInternal(PrintStream&, const JSC::ObservedType&);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
index 4a008e083..905b5bd3c 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp
@@ -26,7 +26,7 @@
#include "config.h"
#include "ArrayAllocationProfile.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -49,7 +49,7 @@ void ArrayAllocationProfile::updateIndexingType()
JSArray* lastArray = m_lastArray;
if (!lastArray)
return;
- m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->structure()->indexingType());
+ m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType());
m_lastArray = 0;
}
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
index f77b92a2f..cf30de6b9 100644
--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ArrayAllocationProfile_h
-#define ArrayAllocationProfile_h
+#pragma once
#include "IndexingType.h"
#include "JSArray.h"
@@ -42,7 +41,7 @@ public:
IndexingType selectIndexingType()
{
JSArray* lastArray = m_lastArray;
- if (lastArray && UNLIKELY(lastArray->structure()->indexingType() != m_currentIndexingType))
+ if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType))
updateIndexingType();
return m_currentIndexingType;
}
@@ -76,6 +75,3 @@ private:
};
} // namespace JSC
-
-#endif // ArrayAllocationProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
index 4c055fea5..3146b18f8 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp
@@ -27,6 +27,7 @@
#include "ArrayProfile.h"
#include "CodeBlock.h"
+#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
#include <wtf/StringExtras.h>
#include <wtf/StringPrintStream.h>
@@ -72,37 +73,62 @@ void dumpArrayModes(PrintStream& out, ArrayModes arrayModes)
out.print(comma, "ArrayWithArrayStorage");
if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage))
out.print(comma, "ArrayWithSlowPutArrayStorage");
+
+ if (arrayModes & Int8ArrayMode)
+ out.print(comma, "Int8ArrayMode");
+ if (arrayModes & Int16ArrayMode)
+ out.print(comma, "Int16ArrayMode");
+ if (arrayModes & Int32ArrayMode)
+ out.print(comma, "Int32ArrayMode");
+ if (arrayModes & Uint8ArrayMode)
+ out.print(comma, "Uint8ArrayMode");
+ if (arrayModes & Uint8ClampedArrayMode)
+ out.print(comma, "Uint8ClampedArrayMode");
+ if (arrayModes & Uint16ArrayMode)
+ out.print(comma, "Uint16ArrayMode");
+ if (arrayModes & Uint32ArrayMode)
+ out.print(comma, "Uint32ArrayMode");
+ if (arrayModes & Float32ArrayMode)
+ out.print(comma, "Float32ArrayMode");
+ if (arrayModes & Float64ArrayMode)
+ out.print(comma, "Float64ArrayMode");
}
-void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock)
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
{
- if (!m_lastSeenStructure)
+ if (!m_lastSeenStructureID)
return;
- m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure);
+ Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID);
+ computeUpdatedPrediction(locker, codeBlock, lastSeenStructure);
+ m_lastSeenStructureID = 0;
+}
+
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure)
+{
+ m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure);
if (!m_didPerformFirstRunPruning
&& hasTwoOrMoreBitsSet(m_observedArrayModes)) {
- m_observedArrayModes = arrayModeFromStructure(m_lastSeenStructure);
+ m_observedArrayModes = arrayModeFromStructure(lastSeenStructure);
m_didPerformFirstRunPruning = true;
}
m_mayInterceptIndexedAccesses |=
- m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
+ lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
JSGlobalObject* globalObject = codeBlock->globalObject();
- if (!globalObject->isOriginalArrayStructure(m_lastSeenStructure)
- && !globalObject->isOriginalTypedArrayStructure(m_lastSeenStructure))
+ if (!globalObject->isOriginalArrayStructure(lastSeenStructure)
+ && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure))
m_usesOriginalArrayStructures = false;
- m_lastSeenStructure = 0;
}
-CString ArrayProfile::briefDescription(const ConcurrentJITLocker& locker, CodeBlock* codeBlock)
+CString ArrayProfile::briefDescription(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
{
computeUpdatedPrediction(locker, codeBlock);
return briefDescriptionWithoutUpdating(locker);
}
-CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJITLocker&)
+CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJSLocker&)
{
StringPrintStream out;
diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h
index c23230e06..279906de1 100644
--- a/Source/JavaScriptCore/bytecode/ArrayProfile.h
+++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h
@@ -23,13 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ArrayProfile_h
-#define ArrayProfile_h
+#pragma once
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
#include "JSArray.h"
#include "Structure.h"
-#include <wtf/HashMap.h>
#include <wtf/SegmentedVector.h>
namespace JSC {
@@ -37,20 +35,44 @@ namespace JSC {
class CodeBlock;
class LLIntOffsetsExtractor;
-// This is a bitfield where each bit represents an IndexingType that we have seen.
-// There are 32 indexing types, so an unsigned is enough.
+// This is a bitfield where each bit represents an type of array access that we have seen.
+// There are 16 indexing types that use the lower bits.
+// There are 9 typed array types taking the bits 16 to 25.
typedef unsigned ArrayModes;
+const ArrayModes Int8ArrayMode = 1 << 16;
+const ArrayModes Int16ArrayMode = 1 << 17;
+const ArrayModes Int32ArrayMode = 1 << 18;
+const ArrayModes Uint8ArrayMode = 1 << 19;
+const ArrayModes Uint8ClampedArrayMode = 1 << 20;
+const ArrayModes Uint16ArrayMode = 1 << 21;
+const ArrayModes Uint32ArrayMode = 1 << 22;
+const ArrayModes Float32ArrayMode = 1 << 23;
+const ArrayModes Float64ArrayMode = 1 << 24;
+
#define asArrayModes(type) \
(static_cast<unsigned>(1) << static_cast<unsigned>(type))
+#define ALL_TYPED_ARRAY_MODES \
+ (Int8ArrayMode \
+ | Int16ArrayMode \
+ | Int32ArrayMode \
+ | Uint8ArrayMode \
+ | Uint8ClampedArrayMode \
+ | Uint16ArrayMode \
+ | Uint32ArrayMode \
+ | Float32ArrayMode \
+ | Float64ArrayMode \
+ )
+
#define ALL_NON_ARRAY_ARRAY_MODES \
(asArrayModes(NonArray) \
| asArrayModes(NonArrayWithInt32) \
| asArrayModes(NonArrayWithDouble) \
| asArrayModes(NonArrayWithContiguous) \
| asArrayModes(NonArrayWithArrayStorage) \
- | asArrayModes(NonArrayWithSlowPutArrayStorage))
+ | asArrayModes(NonArrayWithSlowPutArrayStorage) \
+ | ALL_TYPED_ARRAY_MODES)
#define ALL_ARRAY_ARRAY_MODES \
(asArrayModes(ArrayClass) \
@@ -65,6 +87,29 @@ typedef unsigned ArrayModes;
inline ArrayModes arrayModeFromStructure(Structure* structure)
{
+ switch (structure->classInfo()->typedArrayStorageType) {
+ case TypeInt8:
+ return Int8ArrayMode;
+ case TypeUint8:
+ return Uint8ArrayMode;
+ case TypeUint8Clamped:
+ return Uint8ClampedArrayMode;
+ case TypeInt16:
+ return Int16ArrayMode;
+ case TypeUint16:
+ return Uint16ArrayMode;
+ case TypeInt32:
+ return Int32ArrayMode;
+ case TypeUint32:
+ return Uint32ArrayMode;
+ case TypeFloat32:
+ return Float32ArrayMode;
+ case TypeFloat64:
+ return Float64ArrayMode;
+ case TypeDataView:
+ case NotTypedArray:
+ break;
+ }
return asArrayModes(structure->indexingType());
}
@@ -135,7 +180,7 @@ class ArrayProfile {
public:
ArrayProfile()
: m_bytecodeOffset(std::numeric_limits<unsigned>::max())
- , m_lastSeenStructure(0)
+ , m_lastSeenStructureID(0)
, m_mayStoreToHole(false)
, m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
@@ -147,7 +192,7 @@ public:
ArrayProfile(unsigned bytecodeOffset)
: m_bytecodeOffset(bytecodeOffset)
- , m_lastSeenStructure(0)
+ , m_lastSeenStructureID(0)
, m_mayStoreToHole(false)
, m_outOfBounds(false)
, m_mayInterceptIndexedAccesses(false)
@@ -159,28 +204,31 @@ public:
unsigned bytecodeOffset() const { return m_bytecodeOffset; }
- Structure** addressOfLastSeenStructure() { return &m_lastSeenStructure; }
+ StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; }
ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; }
bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; }
+
+ void setOutOfBounds() { m_outOfBounds = true; }
bool* addressOfOutOfBounds() { return &m_outOfBounds; }
void observeStructure(Structure* structure)
{
- m_lastSeenStructure = structure;
+ m_lastSeenStructureID = structure->id();
}
- void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*);
+ void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*);
+ void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*, Structure* lastSeenStructure);
- ArrayModes observedArrayModes(const ConcurrentJITLocker&) const { return m_observedArrayModes; }
- bool mayInterceptIndexedAccesses(const ConcurrentJITLocker&) const { return m_mayInterceptIndexedAccesses; }
+ ArrayModes observedArrayModes(const ConcurrentJSLocker&) const { return m_observedArrayModes; }
+ bool mayInterceptIndexedAccesses(const ConcurrentJSLocker&) const { return m_mayInterceptIndexedAccesses; }
- bool mayStoreToHole(const ConcurrentJITLocker&) const { return m_mayStoreToHole; }
- bool outOfBounds(const ConcurrentJITLocker&) const { return m_outOfBounds; }
+ bool mayStoreToHole(const ConcurrentJSLocker&) const { return m_mayStoreToHole; }
+ bool outOfBounds(const ConcurrentJSLocker&) const { return m_outOfBounds; }
- bool usesOriginalArrayStructures(const ConcurrentJITLocker&) const { return m_usesOriginalArrayStructures; }
+ bool usesOriginalArrayStructures(const ConcurrentJSLocker&) const { return m_usesOriginalArrayStructures; }
- CString briefDescription(const ConcurrentJITLocker&, CodeBlock*);
- CString briefDescriptionWithoutUpdating(const ConcurrentJITLocker&);
+ CString briefDescription(const ConcurrentJSLocker&, CodeBlock*);
+ CString briefDescriptionWithoutUpdating(const ConcurrentJSLocker&);
private:
friend class LLIntOffsetsExtractor;
@@ -188,7 +236,7 @@ private:
static Structure* polymorphicStructure() { return static_cast<Structure*>(reinterpret_cast<void*>(1)); }
unsigned m_bytecodeOffset;
- Structure* m_lastSeenStructure;
+ StructureID m_lastSeenStructureID;
bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies.
bool m_outOfBounds;
bool m_mayInterceptIndexedAccesses : 1;
@@ -197,9 +245,6 @@ private:
ArrayModes m_observedArrayModes;
};
-typedef SegmentedVector<ArrayProfile, 4, 0> ArrayProfileVector;
+typedef SegmentedVector<ArrayProfile, 4> ArrayProfileVector;
} // namespace JSC
-
-#endif // ArrayProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ByValInfo.h b/Source/JavaScriptCore/bytecode/ByValInfo.h
index 35fae0c60..e5fa70858 100644
--- a/Source/JavaScriptCore/bytecode/ByValInfo.h
+++ b/Source/JavaScriptCore/bytecode/ByValInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,26 +23,30 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ByValInfo_h
-#define ByValInfo_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(JIT)
+#pragma once
#include "ClassInfo.h"
#include "CodeLocation.h"
+#include "CodeOrigin.h"
#include "IndexingType.h"
#include "JITStubRoutine.h"
#include "Structure.h"
namespace JSC {
+class Symbol;
+
+#if ENABLE(JIT)
+
+class StructureStubInfo;
+
enum JITArrayMode {
JITInt32,
JITDouble,
JITContiguous,
JITArrayStorage,
+ JITDirectArguments,
+ JITScopedArguments,
JITInt8Array,
JITInt16Array,
JITInt32Array,
@@ -67,6 +71,17 @@ inline bool isOptimizableIndexingType(IndexingType indexingType)
}
}
+inline bool hasOptimizableIndexingForJSType(JSType type)
+{
+ switch (type) {
+ case DirectArgumentsType:
+ case ScopedArgumentsType:
+ return true;
+ default:
+ return false;
+ }
+}
+
inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
{
return isTypedView(classInfo->typedArrayStorageType);
@@ -75,6 +90,7 @@ inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
inline bool hasOptimizableIndexing(Structure* structure)
{
return isOptimizableIndexingType(structure->indexingType())
+ || hasOptimizableIndexingForJSType(structure->typeInfo().type())
|| hasOptimizableIndexingForClassInfo(structure->classInfo());
}
@@ -95,6 +111,19 @@ inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType)
}
}
+inline JITArrayMode jitArrayModeForJSType(JSType type)
+{
+ switch (type) {
+ case DirectArgumentsType:
+ return JITDirectArguments;
+ case ScopedArgumentsType:
+ return JITScopedArguments;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return JITContiguous;
+ }
+}
+
inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
{
switch (classInfo->typedArrayStorageType) {
@@ -122,6 +151,19 @@ inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
}
}
+inline bool jitArrayModePermitsPut(JITArrayMode mode)
+{
+ switch (mode) {
+ case JITDirectArguments:
+ case JITScopedArguments:
+ // We could support put_by_val on these at some point, but it's just not that profitable
+ // at the moment.
+ return false;
+ default:
+ return true;
+ }
+}
+
inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode)
{
switch (mode) {
@@ -154,30 +196,49 @@ inline JITArrayMode jitArrayModeForStructure(Structure* structure)
if (isOptimizableIndexingType(structure->indexingType()))
return jitArrayModeForIndexingType(structure->indexingType());
+ if (hasOptimizableIndexingForJSType(structure->typeInfo().type()))
+ return jitArrayModeForJSType(structure->typeInfo().type());
+
ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo()));
return jitArrayModeForClassInfo(structure->classInfo());
}
struct ByValInfo {
ByValInfo() { }
-
- ByValInfo(unsigned bytecodeIndex, CodeLocationJump badTypeJump, JITArrayMode arrayMode, int16_t badTypeJumpToDone, int16_t returnAddressToSlowPath)
+
+ ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, CodeLocationLabel exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
: bytecodeIndex(bytecodeIndex)
+ , notIndexJump(notIndexJump)
, badTypeJump(badTypeJump)
+ , exceptionHandler(exceptionHandler)
, arrayMode(arrayMode)
+ , arrayProfile(arrayProfile)
, badTypeJumpToDone(badTypeJumpToDone)
+ , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath)
, returnAddressToSlowPath(returnAddressToSlowPath)
, slowPathCount(0)
+ , stubInfo(nullptr)
+ , tookSlowPath(false)
+ , seen(false)
{
}
-
+
unsigned bytecodeIndex;
+ CodeLocationJump notIndexJump;
CodeLocationJump badTypeJump;
+ CodeLocationLabel exceptionHandler;
JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code.
+ ArrayProfile* arrayProfile;
int16_t badTypeJumpToDone;
+ int16_t badTypeJumpToNextHotPath;
int16_t returnAddressToSlowPath;
unsigned slowPathCount;
RefPtr<JITStubRoutine> stubRoutine;
+ Identifier cachedId;
+ WriteBarrier<Symbol> cachedSymbol;
+ StructureStubInfo* stubInfo;
+ bool tookSlowPath : 1;
+ bool seen : 1;
};
inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
@@ -185,9 +246,12 @@ inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
return info->bytecodeIndex;
}
-} // namespace JSC
+typedef HashMap<CodeOrigin, ByValInfo*, CodeOriginApproximateHash> ByValInfoMap;
-#endif // ENABLE(JIT)
+#else // ENABLE(JIT)
+
+typedef HashMap<int, void*> ByValInfoMap;
-#endif // ByValInfo_h
+#endif // ENABLE(JIT)
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
index d7489d31a..47c481d5d 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,120 +27,67 @@
#include "BytecodeBasicBlock.h"
#include "CodeBlock.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
#include "PreciseJumpTargets.h"
namespace JSC {
-static bool isBranch(OpcodeID opcodeID)
+void BytecodeBasicBlock::shrinkToFit()
{
- switch (opcodeID) {
- case op_jmp:
- case op_jtrue:
- case op_jfalse:
- case op_jeq_null:
- case op_jneq_null:
- case op_jneq_ptr:
- case op_jless:
- case op_jlesseq:
- case op_jgreater:
- case op_jgreatereq:
- case op_jnless:
- case op_jnlesseq:
- case op_jngreater:
- case op_jngreatereq:
- case op_switch_imm:
- case op_switch_char:
- case op_switch_string:
- case op_get_pnames:
- case op_next_pname:
- case op_check_has_instance:
- return true;
- default:
- return false;
- }
+ m_offsets.shrinkToFit();
+ m_successors.shrinkToFit();
}
-static bool isUnconditionalBranch(OpcodeID opcodeID)
+static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset)
{
- switch (opcodeID) {
- case op_jmp:
+ if (opcodeID == op_catch)
return true;
- default:
- return false;
- }
-}
-static bool isTerminal(OpcodeID opcodeID)
-{
- switch (opcodeID) {
- case op_ret:
- case op_ret_object_or_this:
- case op_end:
- return true;
- default:
- return false;
- }
+ return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset);
}
-static bool isThrow(OpcodeID opcodeID)
+template<typename Block, typename Instruction>
+void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)
{
- switch (opcodeID) {
- case op_throw:
- case op_throw_static_error:
- return true;
- default:
- return false;
- }
-}
+ Vector<unsigned, 32> jumpTargets;
+ computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets);
-static bool isJumpTarget(OpcodeID opcodeID, Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset)
-{
- if (opcodeID == op_catch)
- return true;
+ auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) {
+ block->m_index = basicBlocks.size();
+ basicBlocks.append(WTFMove(block));
+ };
- for (unsigned i = 0; i < jumpTargets.size(); i++) {
- if (bytecodeOffset == jumpTargets[i])
- return true;
- }
- return false;
-}
+ auto linkBlocks = [&] (BytecodeBasicBlock* from, BytecodeBasicBlock* to) {
+ from->addSuccessor(to);
+ };
-static void linkBlocks(BytecodeBasicBlock* predecessor, BytecodeBasicBlock* successor)
-{
- predecessor->addSuccessor(successor);
- successor->addPredecessor(predecessor);
-}
+ // Create the entry and exit basic blocks.
+ basicBlocks.reserveCapacity(jumpTargets.size() + 2);
-void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks)
-{
- Vector<unsigned, 32> jumpTargets;
- computePreciseJumpTargets(codeBlock, jumpTargets);
+ auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock);
+ auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0);
+ linkBlocks(entry.get(), firstBlock.get());
- // Create the entry and exit basic blocks.
- BytecodeBasicBlock* entry = new BytecodeBasicBlock(BytecodeBasicBlock::EntryBlock);
- basicBlocks.append(adoptRef(entry));
- BytecodeBasicBlock* exit = new BytecodeBasicBlock(BytecodeBasicBlock::ExitBlock);
+ appendBlock(WTFMove(entry));
+ BytecodeBasicBlock* current = firstBlock.get();
+ appendBlock(WTFMove(firstBlock));
- // Find basic block boundaries.
- BytecodeBasicBlock* current = new BytecodeBasicBlock(0, 0);
- linkBlocks(entry, current);
- basicBlocks.append(adoptRef(current));
+ auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock);
bool nextInstructionIsLeader = false;
Interpreter* interpreter = codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = codeBlock->instructions().begin();
- unsigned instructionCount = codeBlock->instructions().size();
for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
- OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
unsigned opcodeLength = opcodeLengths[opcodeID];
bool createdBlock = false;
// If the current bytecode is a jump target, then it's the leader of its own basic block.
if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) {
- BytecodeBasicBlock* block = new BytecodeBasicBlock(bytecodeOffset, opcodeLength);
- basicBlocks.append(adoptRef(block));
- current = block;
+ auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength);
+ current = newBlock.get();
+ appendBlock(WTFMove(newBlock));
createdBlock = true;
nextInstructionIsLeader = false;
bytecodeOffset += opcodeLength;
@@ -154,7 +101,7 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
continue;
// Otherwise, just add to the length of the current block.
- current->addBytecodeLength(opcodeLength);
+ current->addLength(opcodeLength);
bytecodeOffset += opcodeLength;
}
@@ -166,14 +113,13 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
continue;
bool fallsThrough = true;
- for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) {
- const Instruction& currentInstruction = instructionsBegin[bytecodeOffset];
- OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction.u.opcode);
+ for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
unsigned opcodeLength = opcodeLengths[opcodeID];
// If we found a terminal bytecode, link to the exit block.
if (isTerminal(opcodeID)) {
- ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength());
- linkBlocks(block, exit);
+ ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+ linkBlocks(block, exit.get());
fallsThrough = false;
break;
}
@@ -182,16 +128,16 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
// If there isn't one, treat this throw as a terminal. This is true even if we have a finally
// block because the finally block will create its own catch, which will generate a HandlerInfo.
if (isThrow(opcodeID)) {
- ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength());
- HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
+ ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+ auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
fallsThrough = false;
if (!handler) {
- linkBlocks(block, exit);
+ linkBlocks(block, exit.get());
break;
}
for (unsigned i = 0; i < basicBlocks.size(); i++) {
BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
- if (handler->target == otherBlock->leaderBytecodeOffset()) {
+ if (handler->target == otherBlock->leaderOffset()) {
linkBlocks(block, otherBlock);
break;
}
@@ -201,15 +147,26 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
// If we found a branch, link to the block(s) that we jump to.
if (isBranch(opcodeID)) {
- ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength());
+ ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
Vector<unsigned, 1> bytecodeOffsetsJumpedTo;
- findJumpTargetsForBytecodeOffset(codeBlock, bytecodeOffset, bytecodeOffsetsJumpedTo);
+ findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo);
+ size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size();
+ ASSERT(numberOfJumpTargets);
for (unsigned i = 0; i < basicBlocks.size(); i++) {
BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
- if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderBytecodeOffset()))
+ if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderOffset())) {
linkBlocks(block, otherBlock);
+ --numberOfJumpTargets;
+ if (!numberOfJumpTargets)
+ break;
+ }
}
+ // numberOfJumpTargets may not be 0 here if there are multiple jumps targeting the same
+ // basic blocks (e.g. in a switch type opcode). Since we only decrement numberOfJumpTargets
+ // once per basic block, the duplicates are not accounted for. For our purpose here,
+ // that doesn't matter because we only need to link to the target block once regardless
+ // of how many ways this block can jump there.
if (isUnconditionalBranch(opcodeID))
fallsThrough = false;
@@ -227,7 +184,20 @@ void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasi
}
}
- basicBlocks.append(adoptRef(exit));
+ appendBlock(WTFMove(exit));
+
+ for (auto& basicBlock : basicBlocks)
+ basicBlock->shrinkToFit();
+}
+
+void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)
+{
+ computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
+}
+
+void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)
+{
+ BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
index 736ba8540..fb81650ca 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,77 +23,80 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BytecodeBasicBlock_h
-#define BytecodeBasicBlock_h
+#pragma once
#include <limits.h>
#include <wtf/FastBitVector.h>
-#include <wtf/HashMap.h>
-#include <wtf/RefCounted.h>
#include <wtf/Vector.h>
namespace JSC {
class CodeBlock;
+class UnlinkedCodeBlock;
+struct Instruction;
+struct UnlinkedInstruction;
-class BytecodeBasicBlock : public RefCounted<BytecodeBasicBlock> {
+class BytecodeBasicBlock {
+ WTF_MAKE_FAST_ALLOCATED;
public:
enum SpecialBlockType { EntryBlock, ExitBlock };
BytecodeBasicBlock(unsigned start, unsigned length);
BytecodeBasicBlock(SpecialBlockType);
+ void shrinkToFit();
- bool isEntryBlock() { return !m_leaderBytecodeOffset && !m_totalBytecodeLength; }
- bool isExitBlock() { return m_leaderBytecodeOffset == UINT_MAX && m_totalBytecodeLength == UINT_MAX; }
+ bool isEntryBlock() { return !m_leaderOffset && !m_totalLength; }
+ bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; }
- unsigned leaderBytecodeOffset() { return m_leaderBytecodeOffset; }
- unsigned totalBytecodeLength() { return m_totalBytecodeLength; }
+ unsigned leaderOffset() { return m_leaderOffset; }
+ unsigned totalLength() { return m_totalLength; }
- Vector<unsigned>& bytecodeOffsets() { return m_bytecodeOffsets; }
- void addBytecodeLength(unsigned);
+ const Vector<unsigned>& offsets() const { return m_offsets; }
- void addPredecessor(BytecodeBasicBlock* block) { m_predecessors.append(block); }
- void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
-
- Vector<BytecodeBasicBlock*>& predecessors() { return m_predecessors; }
- Vector<BytecodeBasicBlock*>& successors() { return m_successors; }
+ const Vector<BytecodeBasicBlock*>& successors() const { return m_successors; }
FastBitVector& in() { return m_in; }
FastBitVector& out() { return m_out; }
+ unsigned index() const { return m_index; }
+
+ static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&);
+ static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&);
+
private:
- unsigned m_leaderBytecodeOffset;
- unsigned m_totalBytecodeLength;
+ template<typename Block, typename Instruction> static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks);
- Vector<unsigned> m_bytecodeOffsets;
+ void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
- Vector<BytecodeBasicBlock*> m_predecessors;
+ void addLength(unsigned);
+
+ unsigned m_leaderOffset;
+ unsigned m_totalLength;
+ unsigned m_index;
+
+ Vector<unsigned> m_offsets;
Vector<BytecodeBasicBlock*> m_successors;
FastBitVector m_in;
FastBitVector m_out;
};
-void computeBytecodeBasicBlocks(CodeBlock*, Vector<RefPtr<BytecodeBasicBlock> >&);
-
inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length)
- : m_leaderBytecodeOffset(start)
- , m_totalBytecodeLength(length)
+ : m_leaderOffset(start)
+ , m_totalLength(length)
{
- m_bytecodeOffsets.append(m_leaderBytecodeOffset);
+ m_offsets.append(m_leaderOffset);
}
inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockType blockType)
- : m_leaderBytecodeOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
- , m_totalBytecodeLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+ : m_leaderOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+ , m_totalLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
{
}
-inline void BytecodeBasicBlock::addBytecodeLength(unsigned bytecodeLength)
+inline void BytecodeBasicBlock::addLength(unsigned bytecodeLength)
{
- m_bytecodeOffsets.append(m_leaderBytecodeOffset + m_totalBytecodeLength);
- m_totalBytecodeLength += bytecodeLength;
+ m_offsets.append(m_leaderOffset + m_totalLength);
+ m_totalLength += bytecodeLength;
}
} // namespace JSC
-
-#endif // BytecodeBasicBlock_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeConventions.h b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
index e375f263c..7781378ce 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeConventions.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,14 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BytecodeConventions_h
-#define BytecodeConventions_h
+#pragma once
// Register numbers used in bytecode operations have different meaning according to their ranges:
-// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see JSStack.h.
+// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame.
// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
static const int FirstConstantRegisterIndex = 0x40000000;
-
-#endif // BytecodeConventions_h
-
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
new file mode 100644
index 000000000..f7e1e9a3d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeGeneratorification.h"
+
+#include "BytecodeLivenessAnalysisInlines.h"
+#include "BytecodeRewriter.h"
+#include "BytecodeUseDef.h"
+#include "IdentifierInlines.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include <wtf/Optional.h>
+
+namespace JSC {
+
+struct YieldData {
+ size_t point { 0 };
+ int argument { 0 };
+ FastBitVector liveness;
+};
+
+class BytecodeGeneratorification {
+public:
+ typedef Vector<YieldData> Yields;
+
+ BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+ : m_graph(codeBlock, instructions)
+ , m_generatorFrameSymbolTable(*codeBlock->vm(), generatorFrameSymbolTable)
+ , m_generatorFrameSymbolTableIndex(generatorFrameSymbolTableIndex)
+ {
+ for (BytecodeBasicBlock* block : m_graph) {
+ for (unsigned bytecodeOffset : block->offsets()) {
+ const UnlinkedInstruction* pc = &m_graph.instructions()[bytecodeOffset];
+ switch (pc->u.opcode) {
+ case op_enter: {
+ m_enterPoint = bytecodeOffset;
+ break;
+ }
+
+ case op_yield: {
+ unsigned liveCalleeLocalsIndex = pc[2].u.index;
+ if (liveCalleeLocalsIndex >= m_yields.size())
+ m_yields.resize(liveCalleeLocalsIndex + 1);
+ YieldData& data = m_yields[liveCalleeLocalsIndex];
+ data.point = bytecodeOffset;
+ data.argument = pc[3].u.operand;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ struct Storage {
+ Identifier identifier;
+ unsigned identifierIndex;
+ ScopeOffset scopeOffset;
+ };
+
+ void run();
+
+ BytecodeGraph<UnlinkedCodeBlock>& graph() { return m_graph; }
+
+ const Yields& yields() const
+ {
+ return m_yields;
+ }
+
+ Yields& yields()
+ {
+ return m_yields;
+ }
+
+ unsigned enterPoint() const
+ {
+ return m_enterPoint;
+ }
+
+private:
+ Storage storageForGeneratorLocal(unsigned index)
+ {
+ // We assign a symbol to a register. There is one-on-one corresponding between a register and a symbol.
+ // By doing so, we allocate the specific storage to save the given register.
+ // This allow us not to save all the live registers even if the registers are not overwritten from the previous resuming time.
+ // It means that, the register can be retrieved even if the immediate previous op_save does not save it.
+
+ if (m_storages.size() <= index)
+ m_storages.resize(index + 1);
+ if (std::optional<Storage> storage = m_storages[index])
+ return *storage;
+
+ UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+ Identifier identifier = Identifier::fromUid(PrivateName());
+ unsigned identifierIndex = codeBlock->numberOfIdentifiers();
+ codeBlock->addIdentifier(identifier);
+ ScopeOffset scopeOffset = m_generatorFrameSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+ m_generatorFrameSymbolTable->set(NoLockingNecessary, identifier.impl(), SymbolTableEntry(VarOffset(scopeOffset)));
+
+ Storage storage = {
+ identifier,
+ identifierIndex,
+ scopeOffset
+ };
+ m_storages[index] = storage;
+ return storage;
+ }
+
+ unsigned m_enterPoint { 0 };
+ BytecodeGraph<UnlinkedCodeBlock> m_graph;
+ Vector<std::optional<Storage>> m_storages;
+ Yields m_yields;
+ Strong<SymbolTable> m_generatorFrameSymbolTable;
+ int m_generatorFrameSymbolTableIndex;
+};
+
+class GeneratorLivenessAnalysis : public BytecodeLivenessPropagation<GeneratorLivenessAnalysis> {
+public:
+ GeneratorLivenessAnalysis(BytecodeGeneratorification& generatorification)
+ : m_generatorification(generatorification)
+ {
+ }
+
+ template<typename Functor>
+ void computeDefsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+ {
+ JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+ }
+
+ template<typename Functor>
+ void computeUsesForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+ {
+ JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+ }
+
+ void run()
+ {
+ // Perform modified liveness analysis to determine which locals are live at the merge points.
+ // This produces the conservative results for the question, "which variables should be saved and resumed?".
+
+ runLivenessFixpoint(m_generatorification.graph());
+
+ for (YieldData& data : m_generatorification.yields())
+ data.liveness = getLivenessInfoAtBytecodeOffset(m_generatorification.graph(), data.point + opcodeLength(op_yield));
+ }
+
+private:
+ BytecodeGeneratorification& m_generatorification;
+};
+
+void BytecodeGeneratorification::run()
+{
+ // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively.
+
+ {
+ GeneratorLivenessAnalysis pass(*this);
+ pass.run();
+ }
+
+ UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+ BytecodeRewriter rewriter(m_graph);
+
+ // Setup the global switch for the generator.
+ {
+ unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter);
+ unsigned switchTableIndex = m_graph.codeBlock()->numberOfSwitchJumpTables();
+ VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State));
+ auto& jumpTable = m_graph.codeBlock()->addSwitchJumpTable();
+ jumpTable.min = 0;
+ jumpTable.branchOffsets.resize(m_yields.size() + 1);
+ jumpTable.branchOffsets.fill(0);
+ jumpTable.add(0, nextToEnterPoint);
+ for (unsigned i = 0; i < m_yields.size(); ++i)
+ jumpTable.add(i + 1, m_yields[i].point);
+
+ rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) {
+ fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset());
+ });
+ }
+
+ for (const YieldData& data : m_yields) {
+ VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame));
+
+ // Emit save sequence.
+ rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+ data.liveness.forEachSetBit([&](size_t index) {
+ VirtualRegister operand = virtualRegisterForLocal(index);
+ Storage storage = storageForGeneratorLocal(index);
+
+ fragment.appendInstruction(
+ op_put_to_scope,
+ scope.offset(), // scope
+ storage.identifierIndex, // identifier
+ operand.offset(), // value
+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+ m_generatorFrameSymbolTableIndex, // symbol table constant index
+ storage.scopeOffset.offset() // scope offset
+ );
+ });
+
+ // Insert op_ret just after save sequence.
+ fragment.appendInstruction(op_ret, data.argument);
+ });
+
+ // Emit resume sequence.
+ rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+ data.liveness.forEachSetBit([&](size_t index) {
+ VirtualRegister operand = virtualRegisterForLocal(index);
+ Storage storage = storageForGeneratorLocal(index);
+
+ UnlinkedValueProfile profile = codeBlock->addValueProfile();
+ fragment.appendInstruction(
+ op_get_from_scope,
+ operand.offset(), // dst
+ scope.offset(), // scope
+ storage.identifierIndex, // identifier
+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+ 0, // local scope depth
+ storage.scopeOffset.offset(), // scope offset
+ profile // profile
+ );
+ });
+ });
+
+ // Clip the unnecessary bytecodes.
+ rewriter.removeBytecode(data.point);
+ }
+
+ rewriter.execute();
+}
+
+void performGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+{
+ BytecodeGeneratorification pass(codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex);
+ pass.run();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h
new file mode 100644
index 000000000..c7b613746
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class SymbolTable;
+
+void performGeneratorification(UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeGraph.h b/Source/JavaScriptCore/bytecode/BytecodeGraph.h
new file mode 100644
index 000000000..38a13c601
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeGraph.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeBasicBlock.h"
+#include <wtf/IndexedContainerIterator.h>
+#include <wtf/IteratorRange.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class BytecodeBasicBlock;
+
+template<typename Block>
+class BytecodeGraph {
+ WTF_MAKE_FAST_ALLOCATED;
+ WTF_MAKE_NONCOPYABLE(BytecodeGraph);
+public:
+ typedef Block CodeBlock;
+ typedef typename Block::Instruction Instruction;
+ typedef Vector<std::unique_ptr<BytecodeBasicBlock>> BasicBlocksVector;
+
+ typedef WTF::IndexedContainerIterator<BytecodeGraph<Block>> iterator;
+
+ inline BytecodeGraph(Block*, typename Block::UnpackedInstructions&);
+
+ Block* codeBlock() const { return m_codeBlock; }
+
+ typename Block::UnpackedInstructions& instructions() { return m_instructions; }
+
+ WTF::IteratorRange<BasicBlocksVector::reverse_iterator> basicBlocksInReverseOrder()
+ {
+ return WTF::makeIteratorRange(m_basicBlocks.rbegin(), m_basicBlocks.rend());
+ }
+
+ static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
+ {
+ unsigned leaderOffset = block->leaderOffset();
+ return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalLength();
+ }
+
+ BytecodeBasicBlock* findBasicBlockForBytecodeOffset(unsigned bytecodeOffset)
+ {
+ /*
+ for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
+ if (blockContainsBytecodeOffset(m_basicBlocks[i].get(), bytecodeOffset))
+ return m_basicBlocks[i].get();
+ }
+ return 0;
+ */
+
+ std::unique_ptr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(m_basicBlocks, m_basicBlocks.size(), bytecodeOffset, [] (std::unique_ptr<BytecodeBasicBlock>* basicBlock) { return (*basicBlock)->leaderOffset(); });
+ // We found the block we were looking for.
+ if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
+ return (*basicBlock).get();
+
+ // Basic block is to the left of the returned block.
+ if (bytecodeOffset < (*basicBlock)->leaderOffset()) {
+ ASSERT(basicBlock - 1 >= m_basicBlocks.data());
+ ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset));
+ return basicBlock[-1].get();
+ }
+
+ // Basic block is to the right of the returned block.
+ ASSERT(&basicBlock[1] <= &m_basicBlocks.last());
+ ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset));
+ return basicBlock[1].get();
+ }
+
+ BytecodeBasicBlock* findBasicBlockWithLeaderOffset(unsigned leaderOffset)
+ {
+ return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr<BytecodeBasicBlock>* basicBlock) { return (*basicBlock)->leaderOffset(); })).get();
+ }
+
+ unsigned size() const { return m_basicBlocks.size(); }
+ BytecodeBasicBlock* at(unsigned index) const { return m_basicBlocks[index].get(); }
+ BytecodeBasicBlock* operator[](unsigned index) const { return at(index); }
+
+ iterator begin() const { return iterator(*this, 0); }
+ iterator end() const { return iterator(*this, size()); }
+ BytecodeBasicBlock* first() { return at(0); }
+ BytecodeBasicBlock* last() { return at(size() - 1); }
+
+private:
+ Block* m_codeBlock;
+ BasicBlocksVector m_basicBlocks;
+ typename Block::UnpackedInstructions& m_instructions;
+};
+
+
+template<typename Block>
+BytecodeGraph<Block>::BytecodeGraph(Block* codeBlock, typename Block::UnpackedInstructions& instructions)
+ : m_codeBlock(codeBlock)
+ , m_instructions(instructions)
+{
+ ASSERT(m_codeBlock);
+ BytecodeBasicBlock::compute(m_codeBlock, instructions.begin(), instructions.size(), m_basicBlocks);
+ ASSERT(m_basicBlocks.size());
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
new file mode 100644
index 000000000..00c9c01bb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeIntrinsicRegistry.h"
+
+#include "ArrayIteratorPrototype.h"
+#include "BuiltinNames.h"
+#include "BytecodeGenerator.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "JSModuleLoader.h"
+#include "JSPromise.h"
+#include "Nodes.h"
+#include "StrongInlines.h"
+
+namespace JSC {
+
+#define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->builtinNames().name##PrivateName().impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name);
+
+BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(VM& vm)
+ : m_vm(vm)
+ , m_bytecodeIntrinsicMap()
+{
+ JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+
+ m_undefined.set(m_vm, jsUndefined());
+ m_Infinity.set(m_vm, jsDoubleNumber(std::numeric_limits<double>::infinity()));
+ m_iterationKindKey.set(m_vm, jsNumber(IterateKey));
+ m_iterationKindValue.set(m_vm, jsNumber(IterateValue));
+ m_iterationKindKeyValue.set(m_vm, jsNumber(IterateKeyValue));
+ m_MAX_ARRAY_INDEX.set(m_vm, jsNumber(MAX_ARRAY_INDEX));
+ m_MAX_STRING_LENGTH.set(m_vm, jsNumber(JSString::MaxLength));
+ m_MAX_SAFE_INTEGER.set(m_vm, jsDoubleNumber(maxSafeInteger()));
+ m_ModuleFetch.set(m_vm, jsNumber(static_cast<unsigned>(JSModuleLoader::Status::Fetch)));
+ m_ModuleInstantiate.set(m_vm, jsNumber(static_cast<unsigned>(JSModuleLoader::Status::Instantiate)));
+ m_ModuleSatisfy.set(m_vm, jsNumber(static_cast<unsigned>(JSModuleLoader::Status::Satisfy)));
+ m_ModuleLink.set(m_vm, jsNumber(static_cast<unsigned>(JSModuleLoader::Status::Link)));
+ m_ModuleReady.set(m_vm, jsNumber(static_cast<unsigned>(JSModuleLoader::Status::Ready)));
+ m_promiseStatePending.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)));
+ m_promiseStateFulfilled.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Fulfilled)));
+ m_promiseStateRejected.set(m_vm, jsNumber(static_cast<unsigned>(JSPromise::Status::Rejected)));
+ m_GeneratorResumeModeNormal.set(m_vm, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)));
+ m_GeneratorResumeModeThrow.set(m_vm, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)));
+ m_GeneratorResumeModeReturn.set(m_vm, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)));
+ m_GeneratorStateCompleted.set(m_vm, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorState::Completed)));
+ m_GeneratorStateExecuting.set(m_vm, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorState::Executing)));
+}
+
+BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const
+{
+ if (!m_vm.propertyNames->isPrivateName(ident))
+ return nullptr;
+ auto iterator = m_bytecodeIntrinsicMap.find(ident.impl());
+ if (iterator == m_bytecodeIntrinsicMap.end())
+ return nullptr;
+ return iterator->value;
+}
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) \
+ JSValue BytecodeIntrinsicRegistry::name##Value(BytecodeGenerator&) \
+ { \
+ return m_##name.get(); \
+ }
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
new file mode 100644
index 000000000..0259bc652
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>.
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Identifier.h"
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class CommonIdentifiers;
+class BytecodeGenerator;
+class BytecodeIntrinsicNode;
+class RegisterID;
+class Identifier;
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+ macro(argument) \
+ macro(argumentCount) \
+ macro(assert) \
+ macro(isObject) \
+ macro(isJSArray) \
+ macro(isProxyObject) \
+ macro(isDerivedArray) \
+ macro(isRegExpObject) \
+ macro(isMap) \
+ macro(isSet) \
+ macro(tailCallForwardArguments) \
+ macro(throwTypeError) \
+ macro(throwRangeError) \
+ macro(throwOutOfMemoryError) \
+ macro(tryGetById) \
+ macro(putByValDirect) \
+ macro(toNumber) \
+ macro(toString) \
+ macro(newArrayWithSize) \
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+ macro(undefined) \
+ macro(Infinity) \
+ macro(iterationKindKey) \
+ macro(iterationKindValue) \
+ macro(iterationKindKeyValue) \
+ macro(MAX_ARRAY_INDEX) \
+ macro(MAX_STRING_LENGTH) \
+ macro(MAX_SAFE_INTEGER) \
+ macro(ModuleFetch) \
+ macro(ModuleTranslate) \
+ macro(ModuleInstantiate) \
+ macro(ModuleSatisfy) \
+ macro(ModuleLink) \
+ macro(ModuleReady) \
+ macro(promiseStatePending) \
+ macro(promiseStateFulfilled) \
+ macro(promiseStateRejected) \
+ macro(GeneratorResumeModeNormal) \
+ macro(GeneratorResumeModeThrow) \
+ macro(GeneratorResumeModeReturn) \
+ macro(GeneratorStateCompleted) \
+ macro(GeneratorStateExecuting) \
+
+
+class BytecodeIntrinsicRegistry {
+ WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry);
+public:
+ explicit BytecodeIntrinsicRegistry(VM&);
+
+ typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*);
+
+ EmitterType lookup(const Identifier&) const;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) JSValue name##Value(BytecodeGenerator&);
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+private:
+ VM& m_vm;
+ HashMap<RefPtr<UniquedStringImpl>, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) Strong<Unknown> m_##name;
+ JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h
new file mode 100644
index 000000000..dbdd44d7a
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include <wtf/FastBitVector.h>
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+
+class BytecodeKills {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ BytecodeKills()
+ : m_codeBlock(nullptr)
+ {
+ }
+
+ // By convention, we say that non-local operands are never killed.
+ bool operandIsKilled(unsigned bytecodeIndex, int operand) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+ VirtualRegister reg(operand);
+ if (reg.isLocal())
+ return m_killSets[bytecodeIndex].contains(operand);
+ return false;
+ }
+
+ bool operandIsKilled(Instruction* instruction, int operand) const
+ {
+ return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand);
+ }
+
+ template<typename Functor>
+ void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const
+ {
+ ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+ m_killSets[bytecodeIndex].forEachLocal(
+ [&] (unsigned local) {
+ functor(virtualRegisterForLocal(local));
+ });
+ }
+
+ template<typename Functor>
+ void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const
+ {
+ forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor);
+ }
+
+private:
+ friend class BytecodeLivenessAnalysis;
+
+ class KillSet {
+ public:
+ KillSet()
+ : m_word(0)
+ {
+ }
+
+ ~KillSet()
+ {
+ if (hasVector())
+ delete vector();
+ }
+
+ void add(unsigned local)
+ {
+ if (isEmpty()) {
+ setOneItem(local);
+ return;
+ }
+ if (hasOneItem()) {
+ ASSERT(oneItem() != local);
+ Vector<unsigned>* vector = new Vector<unsigned>();
+ vector->append(oneItem());
+ vector->append(local);
+ setVector(vector);
+ return;
+ }
+ ASSERT(!vector()->contains(local));
+ vector()->append(local);
+ }
+
+ template<typename Functor>
+ void forEachLocal(const Functor& functor)
+ {
+ if (isEmpty())
+ return;
+ if (hasOneItem()) {
+ functor(oneItem());
+ return;
+ }
+ for (unsigned local : *vector())
+ functor(local);
+ }
+
+ bool contains(unsigned expectedLocal)
+ {
+ if (isEmpty())
+ return false;
+ if (hasOneItem())
+ return oneItem() == expectedLocal;
+ for (unsigned local : *vector()) {
+ if (local == expectedLocal)
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ bool isEmpty() const
+ {
+ return !m_word;
+ }
+
+ bool hasOneItem() const
+ {
+ return m_word & 1;
+ }
+
+ unsigned oneItem() const
+ {
+ return m_word >> 1;
+ }
+
+ void setOneItem(unsigned value)
+ {
+ m_word = (value << 1) | 1;
+ }
+
+ bool hasVector() const
+ {
+ return !isEmpty() && !hasOneItem();
+ }
+
+ Vector<unsigned>* vector()
+ {
+ return bitwise_cast<Vector<unsigned>*>(m_word);
+ }
+
+ void setVector(Vector<unsigned>* value)
+ {
+ m_word = bitwise_cast<uintptr_t>(value);
+ }
+
+ uintptr_t m_word;
+ };
+
+ CodeBlock* m_codeBlock;
+ std::unique_ptr<KillSet[]> m_killSets;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json
new file mode 100644
index 000000000..ada4429f7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeList.json
@@ -0,0 +1,200 @@
+[
+ {
+ "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true,
+ "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_",
+ "bytecodes" : [
+ { "name" : "op_enter", "length" : 1 },
+ { "name" : "op_get_scope", "length" : 2 },
+ { "name" : "op_create_direct_arguments", "length" : 2 },
+ { "name" : "op_create_scoped_arguments", "length" : 3 },
+ { "name" : "op_create_cloned_arguments", "length" : 2 },
+ { "name" : "op_create_this", "length" : 5 },
+ { "name" : "op_get_argument", "length" : 4 },
+ { "name" : "op_argument_count", "length" : 2 },
+ { "name" : "op_to_this", "length" : 4 },
+ { "name" : "op_check_tdz", "length" : 2 },
+ { "name" : "op_new_object", "length" : 4 },
+ { "name" : "op_new_array", "length" : 5 },
+ { "name" : "op_new_array_with_size", "length" : 4 },
+ { "name" : "op_new_array_with_spread", "length" : 5 },
+ { "name" : "op_spread", "length" : 3 },
+ { "name" : "op_new_array_buffer", "length" : 5 },
+ { "name" : "op_new_regexp", "length" : 3 },
+ { "name" : "op_mov", "length" : 3 },
+ { "name" : "op_not", "length" : 3 },
+ { "name" : "op_eq", "length" : 4 },
+ { "name" : "op_eq_null", "length" : 3 },
+ { "name" : "op_neq", "length" : 4 },
+ { "name" : "op_neq_null", "length" : 3 },
+ { "name" : "op_stricteq", "length" : 4 },
+ { "name" : "op_nstricteq", "length" : 4 },
+ { "name" : "op_less", "length" : 4 },
+ { "name" : "op_lesseq", "length" : 4 },
+ { "name" : "op_greater", "length" : 4 },
+ { "name" : "op_greatereq", "length" : 4 },
+ { "name" : "op_inc", "length" : 2 },
+ { "name" : "op_dec", "length" : 2 },
+ { "name" : "op_to_number", "length" : 4 },
+ { "name" : "op_to_string", "length" : 3 },
+ { "name" : "op_negate", "length" : 4 },
+ { "name" : "op_add", "length" : 5 },
+ { "name" : "op_mul", "length" : 5 },
+ { "name" : "op_div", "length" : 5 },
+ { "name" : "op_mod", "length" : 4 },
+ { "name" : "op_sub", "length" : 5 },
+ { "name" : "op_pow", "length" : 4 },
+ { "name" : "op_lshift", "length" : 4 },
+ { "name" : "op_rshift", "length" : 4 },
+ { "name" : "op_urshift", "length" : 4 },
+ { "name" : "op_unsigned", "length" : 3 },
+ { "name" : "op_bitand", "length" : 5 },
+ { "name" : "op_bitxor", "length" : 5 },
+ { "name" : "op_bitor", "length" : 5 },
+ { "name" : "op_overrides_has_instance", "length" : 4 },
+ { "name" : "op_instanceof", "length" : 4 },
+ { "name" : "op_instanceof_custom", "length" : 5 },
+ { "name" : "op_typeof", "length" : 3 },
+ { "name" : "op_is_empty", "length" : 3 },
+ { "name" : "op_is_undefined", "length" : 3 },
+ { "name" : "op_is_boolean", "length" : 3 },
+ { "name" : "op_is_number", "length" : 3 },
+ { "name" : "op_is_object", "length" : 3 },
+ { "name" : "op_is_object_or_null", "length" : 3 },
+ { "name" : "op_is_function", "length" : 3 },
+ { "name" : "op_is_cell_with_type", "length" : 4 },
+ { "name" : "op_in", "length" : 5 },
+ { "name" : "op_get_array_length", "length" : 9 },
+ { "name" : "op_get_by_id", "length" : 9 },
+ { "name" : "op_get_by_id_proto_load", "length" : 9 },
+ { "name" : "op_get_by_id_unset", "length" : 9 },
+ { "name" : "op_get_by_id_with_this", "length" : 6 },
+ { "name" : "op_get_by_val_with_this", "length" : 6 },
+ { "name" : "op_try_get_by_id", "length" : 5 },
+ { "name" : "op_put_by_id", "length" : 9 },
+ { "name" : "op_put_by_id_with_this", "length" : 5 },
+ { "name" : "op_del_by_id", "length" : 4 },
+ { "name" : "op_get_by_val", "length" : 6 },
+ { "name" : "op_put_by_val", "length" : 5 },
+ { "name" : "op_put_by_val_with_this", "length" : 5 },
+ { "name" : "op_put_by_val_direct", "length" : 5 },
+ { "name" : "op_del_by_val", "length" : 4 },
+ { "name" : "op_put_by_index", "length" : 4 },
+ { "name" : "op_put_getter_by_id", "length" : 5 },
+ { "name" : "op_put_setter_by_id", "length" : 5 },
+ { "name" : "op_put_getter_setter_by_id", "length" : 6 },
+ { "name" : "op_put_getter_by_val", "length" : 5 },
+ { "name" : "op_put_setter_by_val", "length" : 5 },
+ { "name" : "op_define_data_property", "length" : 5 },
+ { "name" : "op_define_accessor_property", "length" : 6 },
+ { "name" : "op_jmp", "length" : 2 },
+ { "name" : "op_jtrue", "length" : 3 },
+ { "name" : "op_jfalse", "length" : 3 },
+ { "name" : "op_jeq_null", "length" : 3 },
+ { "name" : "op_jneq_null", "length" : 3 },
+ { "name" : "op_jneq_ptr", "length" : 5 },
+ { "name" : "op_jless", "length" : 4 },
+ { "name" : "op_jlesseq", "length" : 4 },
+ { "name" : "op_jgreater", "length" : 4 },
+ { "name" : "op_jgreatereq", "length" : 4 },
+ { "name" : "op_jnless", "length" : 4 },
+ { "name" : "op_jnlesseq", "length" : 4 },
+ { "name" : "op_jngreater", "length" : 4 },
+ { "name" : "op_jngreatereq", "length" : 4 },
+ { "name" : "op_loop_hint", "length" : 1 },
+ { "name" : "op_switch_imm", "length" : 4 },
+ { "name" : "op_switch_char", "length" : 4 },
+ { "name" : "op_switch_string", "length" : 4 },
+ { "name" : "op_new_func", "length" : 4 },
+ { "name" : "op_new_func_exp", "length" : 4 },
+ { "name" : "op_new_generator_func", "length" : 4 },
+ { "name" : "op_new_generator_func_exp", "length" : 4 },
+ { "name" : "op_new_async_func", "length" : 4 },
+ { "name" : "op_new_async_func_exp", "length" : 4 },
+ { "name" : "op_set_function_name", "length" : 3 },
+ { "name" : "op_call", "length" : 9 },
+ { "name" : "op_tail_call", "length" : 9 },
+ { "name" : "op_call_eval", "length" : 9 },
+ { "name" : "op_call_varargs", "length" : 9 },
+ { "name" : "op_tail_call_varargs", "length" : 9 },
+ { "name" : "op_tail_call_forward_arguments", "length" : 9 },
+ { "name" : "op_ret", "length" : 2 },
+ { "name" : "op_construct", "length" : 9 },
+ { "name" : "op_construct_varargs", "length" : 9 },
+ { "name" : "op_strcat", "length" : 4 },
+ { "name" : "op_to_primitive", "length" : 3 },
+ { "name" : "op_resolve_scope", "length" : 7 },
+ { "name" : "op_get_from_scope", "length" : 8 },
+ { "name" : "op_put_to_scope", "length" : 7 },
+ { "name" : "op_get_from_arguments", "length" : 5 },
+ { "name" : "op_put_to_arguments", "length" : 4 },
+ { "name" : "op_push_with_scope", "length" : 4 },
+ { "name" : "op_create_lexical_environment", "length" : 5 },
+ { "name" : "op_get_parent_scope", "length" : 3 },
+ { "name" : "op_catch", "length" : 3 },
+ { "name" : "op_throw", "length" : 2 },
+ { "name" : "op_throw_static_error", "length" : 3 },
+ { "name" : "op_debug", "length" : 3 },
+ { "name" : "op_end", "length" : 2 },
+ { "name" : "op_profile_type", "length" : 6 },
+ { "name" : "op_profile_control_flow", "length" : 2 },
+ { "name" : "op_get_enumerable_length", "length" : 3 },
+ { "name" : "op_has_indexed_property", "length" : 5 },
+ { "name" : "op_has_structure_property", "length" : 5 },
+ { "name" : "op_has_generic_property", "length" : 4 },
+ { "name" : "op_get_direct_pname", "length" : 7 },
+ { "name" : "op_get_property_enumerator", "length" : 3 },
+ { "name" : "op_enumerator_structure_pname", "length" : 4 },
+ { "name" : "op_enumerator_generic_pname", "length" : 4 },
+ { "name" : "op_to_index_string", "length" : 3 },
+ { "name" : "op_assert", "length" : 3 },
+ { "name" : "op_create_rest", "length": 4 },
+ { "name" : "op_get_rest_length", "length": 3 },
+ { "name" : "op_yield", "length" : 4 },
+ { "name" : "op_watchdog", "length" : 1 },
+ { "name" : "op_log_shadow_chicken_prologue", "length" : 2},
+ { "name" : "op_log_shadow_chicken_tail", "length" : 3}
+ ]
+ },
+ {
+ "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1,
+ "macroNameComponent" : "CLOOP_BYTECODE_HELPER",
+ "bytecodes" : [
+ { "name" : "llint_entry" },
+ { "name" : "getHostCallReturnValue" },
+ { "name" : "llint_return_to_host" },
+ { "name" : "llint_vm_entry_to_javascript" },
+ { "name" : "llint_vm_entry_to_native" },
+ { "name" : "llint_cloop_did_return_from_js_1" },
+ { "name" : "llint_cloop_did_return_from_js_2" },
+ { "name" : "llint_cloop_did_return_from_js_3" },
+ { "name" : "llint_cloop_did_return_from_js_4" },
+ { "name" : "llint_cloop_did_return_from_js_5" },
+ { "name" : "llint_cloop_did_return_from_js_6" },
+ { "name" : "llint_cloop_did_return_from_js_7" },
+ { "name" : "llint_cloop_did_return_from_js_8" },
+ { "name" : "llint_cloop_did_return_from_js_9" },
+ { "name" : "llint_cloop_did_return_from_js_10" },
+ { "name" : "llint_cloop_did_return_from_js_11" },
+ { "name" : "llint_cloop_did_return_from_js_12" }
+ ]
+ },
+ {
+ "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1,
+ "macroNameComponent" : "BYTECODE_HELPER",
+ "bytecodes" : [
+ { "name" : "llint_program_prologue" },
+ { "name" : "llint_eval_prologue" },
+ { "name" : "llint_module_program_prologue" },
+ { "name" : "llint_function_for_call_prologue" },
+ { "name" : "llint_function_for_construct_prologue" },
+ { "name" : "llint_function_for_call_arity_check" },
+ { "name" : "llint_function_for_construct_arity_check" },
+ { "name" : "llint_generic_return_point" },
+ { "name" : "llint_throw_from_slow_path_trampoline" },
+ { "name" : "llint_throw_during_call_trampoline" },
+ { "name" : "llint_native_call_trampoline" },
+ { "name" : "llint_native_construct_trampoline" },
+ { "name" : "handleUncaughtException" }
+ ]
+ }
+]
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
index 926334c44..60eeb7174 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,299 +26,159 @@
#include "config.h"
#include "BytecodeLivenessAnalysis.h"
+#include "BytecodeKills.h"
#include "BytecodeLivenessAnalysisInlines.h"
#include "BytecodeUseDef.h"
#include "CodeBlock.h"
#include "FullBytecodeLiveness.h"
+#include "HeapInlines.h"
+#include "InterpreterInlines.h"
#include "PreciseJumpTargets.h"
namespace JSC {
BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock)
- : m_codeBlock(codeBlock)
+ : m_graph(codeBlock, codeBlock->instructions())
{
- ASSERT(m_codeBlock);
compute();
}
-static bool isValidRegisterForLiveness(CodeBlock* codeBlock, int operand)
+template<typename Functor>
+void BytecodeLivenessAnalysis::computeDefsForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
{
- if (codeBlock->isConstantRegisterIndex(operand))
- return false;
-
- VirtualRegister virtualReg(operand);
- if (!virtualReg.isLocal())
- return false;
-
- if (codeBlock->captureCount()
- && operand <= codeBlock->captureStart()
- && operand > codeBlock->captureEnd())
- return false;
-
- return true;
-}
-
-static void setForOperand(CodeBlock* codeBlock, FastBitVector& bits, int operand)
-{
- ASSERT(isValidRegisterForLiveness(codeBlock, operand));
- VirtualRegister virtualReg(operand);
- if (virtualReg.offset() > codeBlock->captureStart())
- bits.set(virtualReg.toLocal());
- else
- bits.set(virtualReg.toLocal() - codeBlock->captureCount());
-}
-
-namespace {
-
-class SetBit {
-public:
- SetBit(FastBitVector& bits)
- : m_bits(bits)
- {
- }
-
- void operator()(CodeBlock* codeBlock, Instruction*, OpcodeID, int operand)
- {
- if (isValidRegisterForLiveness(codeBlock, operand))
- setForOperand(codeBlock, m_bits, operand);
- }
-
-private:
- FastBitVector& m_bits;
-};
-
-} // anonymous namespace
-
-static unsigned getLeaderOffsetForBasicBlock(RefPtr<BytecodeBasicBlock>* basicBlock)
-{
- return (*basicBlock)->leaderBytecodeOffset();
-}
-
-static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned leaderOffset)
-{
- return (*tryBinarySearch<RefPtr<BytecodeBasicBlock>, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get();
-}
-
-static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
-{
- unsigned leaderOffset = block->leaderBytecodeOffset();
- return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalBytecodeLength();
+ JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
}
-static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned bytecodeOffset)
+template<typename Functor>
+void BytecodeLivenessAnalysis::computeUsesForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
{
-/*
- for (unsigned i = 0; i < basicBlocks.size(); i++) {
- if (blockContainsBytecodeOffset(basicBlocks[i].get(), bytecodeOffset))
- return basicBlocks[i].get();
- }
- return 0;
-*/
- RefPtr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<RefPtr<BytecodeBasicBlock>, unsigned>(
- basicBlocks, basicBlocks.size(), bytecodeOffset, getLeaderOffsetForBasicBlock);
- // We found the block we were looking for.
- if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
- return (*basicBlock).get();
-
- // Basic block is to the left of the returned block.
- if (bytecodeOffset < (*basicBlock)->leaderBytecodeOffset()) {
- ASSERT(basicBlock - 1 >= basicBlocks.data());
- ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset));
- return basicBlock[-1].get();
- }
-
- // Basic block is to the right of the returned block.
- ASSERT(&basicBlock[1] <= &basicBlocks.last());
- ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset));
- return basicBlock[1].get();
-}
-
-static void stepOverInstruction(CodeBlock* codeBlock, Vector<RefPtr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& uses, FastBitVector& defs, FastBitVector& out)
-{
- uses.clearAll();
- defs.clearAll();
-
- SetBit setUses(uses);
- SetBit setDefs(defs);
- computeUsesForBytecodeOffset(codeBlock, bytecodeOffset, setUses);
- computeDefsForBytecodeOffset(codeBlock, bytecodeOffset, setDefs);
-
- out.exclude(defs);
- out.merge(uses);
-
- // If we have an exception handler, we want the live-in variables of the
- // exception handler block to be included in the live-in of this particular bytecode.
- if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
- BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target);
- ASSERT(handlerBlock);
- out.merge(handlerBlock->in());
- }
+ JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
}
-static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks, unsigned targetOffset, FastBitVector& result)
+void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
{
- ASSERT(!block->isExitBlock());
- ASSERT(!block->isEntryBlock());
-
- FastBitVector out = block->out();
-
- FastBitVector uses;
- FastBitVector defs;
- uses.resize(out.numBits());
- defs.resize(out.numBits());
-
- for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) {
- unsigned bytecodeOffset = block->bytecodeOffsets()[i];
- if (targetOffset > bytecodeOffset)
- break;
-
- stepOverInstruction(codeBlock, basicBlocks, bytecodeOffset, uses, defs, out);
- }
-
- result.set(out);
-}
-
-static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<RefPtr<BytecodeBasicBlock> >& basicBlocks)
-{
- if (block->isExitBlock() || block->isEntryBlock())
- return;
- computeLocalLivenessForBytecodeOffset(codeBlock, block, basicBlocks, block->leaderBytecodeOffset(), block->in());
-}
-
-void BytecodeLivenessAnalysis::runLivenessFixpoint()
-{
- UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock();
- unsigned numberOfVariables =
- unlinkedCodeBlock->m_numCalleeRegisters - m_codeBlock->captureCount();
-
- for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
- BytecodeBasicBlock* block = m_basicBlocks[i].get();
- block->in().resize(numberOfVariables);
- block->out().resize(numberOfVariables);
- }
-
- bool changed;
- m_basicBlocks.last()->in().clearAll();
- m_basicBlocks.last()->out().clearAll();
- FastBitVector newOut;
- newOut.resize(m_basicBlocks.last()->out().numBits());
- do {
- changed = false;
- for (int i = m_basicBlocks.size() - 2; i >= 0; i--) {
- BytecodeBasicBlock* block = m_basicBlocks[i].get();
- newOut.clearAll();
- for (unsigned j = 0; j < block->successors().size(); j++)
- newOut.merge(block->successors()[j]->in());
- bool outDidChange = block->out().setAndCheck(newOut);
- computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks);
- changed |= outDidChange;
- }
- } while (changed);
-}
-
-void BytecodeLivenessAnalysis::getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
-{
- BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset);
+ BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
ASSERT(block);
ASSERT(!block->isEntryBlock());
ASSERT(!block->isExitBlock());
result.resize(block->out().numBits());
- computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result);
+ computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result);
}
bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
{
- if (operandIsAlwaysLive(m_codeBlock, operand))
+ if (operandIsAlwaysLive(operand))
return true;
FastBitVector result;
- getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, result);
- return operandThatIsNotAlwaysLiveIsLive(m_codeBlock, result, operand);
+ getLivenessInfoAtBytecodeOffset(bytecodeOffset, result);
+ return operandThatIsNotAlwaysLiveIsLive(result, operand);
}
-FastBitVector getLivenessInfo(CodeBlock* codeBlock, const FastBitVector& out)
+FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
{
- FastBitVector result;
-
- unsigned numCapturedVars = codeBlock->captureCount();
- if (numCapturedVars) {
- int firstCapturedLocal = VirtualRegister(codeBlock->captureStart()).toLocal();
- result.resize(out.numBits() + numCapturedVars);
- for (unsigned i = 0; i < numCapturedVars; ++i)
- result.set(firstCapturedLocal + i);
- } else
- result.resize(out.numBits());
-
- int outLength = out.numBits();
- ASSERT(outLength >= 0);
- for (int i = 0; i < outLength; i++) {
- if (!out.get(i))
- continue;
-
- if (!numCapturedVars) {
- result.set(i);
- continue;
- }
-
- if (virtualRegisterForLocal(i).offset() > codeBlock->captureStart())
- result.set(i);
- else
- result.set(numCapturedVars + i);
- }
- return result;
+ FastBitVector out;
+ getLivenessInfoAtBytecodeOffset(bytecodeOffset, out);
+ return out;
}
-FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
+void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
{
FastBitVector out;
- getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, out);
- return getLivenessInfo(m_codeBlock, out);
+ CodeBlock* codeBlock = m_graph.codeBlock();
+
+ result.m_map.resize(codeBlock->instructions().size());
+
+ for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) {
+ if (block->isEntryBlock() || block->isExitBlock())
+ continue;
+
+ out = block->out();
+
+ for (unsigned i = block->offsets().size(); i--;) {
+ unsigned bytecodeOffset = block->offsets()[i];
+ stepOverInstruction(m_graph, bytecodeOffset, out);
+ result.m_map[bytecodeOffset] = out;
+ }
+ }
}
-void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
+void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result)
{
FastBitVector out;
- FastBitVector uses;
- FastBitVector defs;
- result.m_codeBlock = m_codeBlock;
- result.m_map.clear();
+ CodeBlock* codeBlock = m_graph.codeBlock();
+ result.m_codeBlock = codeBlock;
+ result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(codeBlock->instructions().size());
- for (unsigned i = m_basicBlocks.size(); i--;) {
- BytecodeBasicBlock* block = m_basicBlocks[i].get();
+ for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) {
if (block->isEntryBlock() || block->isExitBlock())
continue;
out = block->out();
- uses.resize(out.numBits());
- defs.resize(out.numBits());
- for (unsigned i = block->bytecodeOffsets().size(); i--;) {
- unsigned bytecodeOffset = block->bytecodeOffsets()[i];
- stepOverInstruction(m_codeBlock, m_basicBlocks, bytecodeOffset, uses, defs, out);
- result.m_map.add(bytecodeOffset, out);
+ for (unsigned i = block->offsets().size(); i--;) {
+ unsigned bytecodeOffset = block->offsets()[i];
+ stepOverInstruction(
+ m_graph, bytecodeOffset, out,
+ [&] (unsigned index) {
+ // This is for uses.
+ if (out[index])
+ return;
+ result.m_killSets[bytecodeOffset].add(index);
+ out[index] = true;
+ },
+ [&] (unsigned index) {
+ // This is for defs.
+ out[index] = false;
+ });
}
}
}
void BytecodeLivenessAnalysis::dumpResults()
{
- Interpreter* interpreter = m_codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
- BytecodeBasicBlock* block = m_basicBlocks[i].get();
- dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength());
- dataLogF("Predecessors: ");
- for (unsigned j = 0; j < block->predecessors().size(); j++) {
- BytecodeBasicBlock* predecessor = block->predecessors()[j];
- dataLogF("%p ", predecessor);
+ CodeBlock* codeBlock = m_graph.codeBlock();
+ dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n");
+ Interpreter* interpreter = codeBlock->vm()->interpreter;
+ Instruction* instructionsBegin = codeBlock->instructions().begin();
+ unsigned i = 0;
+
+ unsigned numberOfBlocks = m_graph.size();
+ Vector<FastBitVector> predecessors(numberOfBlocks);
+ for (BytecodeBasicBlock* block : m_graph)
+ predecessors[block->index()].resize(numberOfBlocks);
+ for (BytecodeBasicBlock* block : m_graph) {
+ for (unsigned j = 0; j < block->successors().size(); j++) {
+ unsigned blockIndex = block->index();
+ unsigned successorIndex = block->successors()[j]->index();
+ predecessors[successorIndex][blockIndex] = true;
+ }
+ }
+
+ auto dumpBitVector = [] (FastBitVector& bits) {
+ for (unsigned j = 0; j < bits.numBits(); j++) {
+ if (bits[j])
+ dataLogF(" %u", j);
}
+ };
+
+ for (BytecodeBasicBlock* block : m_graph) {
+ dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i++, block, block->leaderOffset(), block->totalLength());
+
+ dataLogF("Predecessors:");
+ dumpBitVector(predecessors[block->index()]);
dataLogF("\n");
- dataLogF("Successors: ");
+
+ dataLogF("Successors:");
+ FastBitVector successors;
+ successors.resize(numberOfBlocks);
for (unsigned j = 0; j < block->successors().size(); j++) {
BytecodeBasicBlock* successor = block->successors()[j];
- dataLogF("%p ", successor);
+ successors[successor->index()] = true;
}
+ dumpBitVector(successors); // Dump in sorted order.
dataLogF("\n");
+
if (block->isEntryBlock()) {
dataLogF("Entry block %p\n", block);
continue;
@@ -327,38 +187,30 @@ void BytecodeLivenessAnalysis::dumpResults()
dataLogF("Exit block: %p\n", block);
continue;
}
- for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) {
+ for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset];
- dataLogF("Live variables: ");
+ dataLogF("Live variables:");
FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset);
- for (unsigned j = 0; j < liveBefore.numBits(); j++) {
- if (liveBefore.get(j))
- dataLogF("%u ", j);
- }
+ dumpBitVector(liveBefore);
dataLogF("\n");
- m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction);
+ codeBlock->dumpBytecode(WTF::dataFile(), codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction);
OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
unsigned opcodeLength = opcodeLengths[opcodeID];
bytecodeOffset += opcodeLength;
}
- dataLogF("Live variables: ");
+ dataLogF("Live variables:");
FastBitVector liveAfter = block->out();
- for (unsigned j = 0; j < liveAfter.numBits(); j++) {
- if (liveAfter.get(j))
- dataLogF("%u ", j);
- }
+ dumpBitVector(liveAfter);
dataLogF("\n");
}
}
void BytecodeLivenessAnalysis::compute()
{
- computeBytecodeBasicBlocks(m_codeBlock, m_basicBlocks);
- ASSERT(m_basicBlocks.size());
- runLivenessFixpoint();
+ runLivenessFixpoint(m_graph);
if (Options::dumpBytecodeLivenessResults())
dumpResults();
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
index 349912175..e12cd8edc 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,45 +23,64 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BytecodeLivenessAnalysis_h
-#define BytecodeLivenessAnalysis_h
+#pragma once
#include "BytecodeBasicBlock.h"
+#include "BytecodeGraph.h"
+#include "CodeBlock.h"
#include <wtf/FastBitVector.h>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
namespace JSC {
-class CodeBlock;
+class BytecodeKills;
class FullBytecodeLiveness;
-class BytecodeLivenessAnalysis {
+template<typename DerivedAnalysis>
+class BytecodeLivenessPropagation {
+protected:
+ template<typename Graph, typename UseFunctor, typename DefFunctor> void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor&, const DefFunctor&);
+
+ template<typename Graph> void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out);
+
+ template<typename Graph> bool computeLocalLivenessForBytecodeOffset(Graph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result);
+
+ template<typename Graph> bool computeLocalLivenessForBlock(Graph&, BytecodeBasicBlock*);
+
+ template<typename Graph> FastBitVector getLivenessInfoAtBytecodeOffset(Graph&, unsigned bytecodeOffset);
+
+ template<typename Graph> void runLivenessFixpoint(Graph&);
+};
+
+class BytecodeLivenessAnalysis : private BytecodeLivenessPropagation<BytecodeLivenessAnalysis> {
+ WTF_MAKE_FAST_ALLOCATED;
+ WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis);
public:
+ friend class BytecodeLivenessPropagation<BytecodeLivenessAnalysis>;
BytecodeLivenessAnalysis(CodeBlock*);
bool operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset);
FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset);
void computeFullLiveness(FullBytecodeLiveness& result);
+ void computeKills(BytecodeKills& result);
private:
void compute();
- void runLivenessFixpoint();
void dumpResults();
- void getLivenessInfoForNonCapturedVarsAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
+ void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
- CodeBlock* m_codeBlock;
- Vector<RefPtr<BytecodeBasicBlock> > m_basicBlocks;
-};
+ template<typename Functor> void computeDefsForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
+ template<typename Functor> void computeUsesForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
-inline bool operandIsAlwaysLive(CodeBlock*, int operand);
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock*, const FastBitVector& out, int operand);
-inline bool operandIsLive(CodeBlock*, const FastBitVector& out, int operand);
+ BytecodeGraph<CodeBlock> m_graph;
+};
-FastBitVector getLivenessInfo(CodeBlock*, const FastBitVector& out);
+inline bool operandIsAlwaysLive(int operand);
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand);
+inline bool operandIsLive(const FastBitVector& out, int operand);
+inline bool isValidRegisterForLiveness(int operand);
} // namespace JSC
-
-#endif // BytecodeLivenessAnalysis_h
diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
index 8824bd85c..3371237b8 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,38 +23,179 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BytecodeLivenessAnalysisInlines_h
-#define BytecodeLivenessAnalysisInlines_h
+#pragma once
+#include "BytecodeGraph.h"
#include "BytecodeLivenessAnalysis.h"
#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "Operations.h"
namespace JSC {
-inline bool operandIsAlwaysLive(CodeBlock* codeBlock, int operand)
+inline bool operandIsAlwaysLive(int operand)
{
- if (VirtualRegister(operand).isArgument())
- return true;
- return operand <= codeBlock->captureStart() && operand > codeBlock->captureEnd();
+ return !VirtualRegister(operand).isLocal();
}
-inline bool operandThatIsNotAlwaysLiveIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand)
+{
+ unsigned local = VirtualRegister(operand).toLocal();
+ if (local >= out.numBits())
+ return false;
+ return out[local];
+}
+
+inline bool operandIsLive(const FastBitVector& out, int operand)
+{
+ return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand);
+}
+
+inline bool isValidRegisterForLiveness(int operand)
{
VirtualRegister virtualReg(operand);
- if (virtualReg.offset() > codeBlock->captureStart())
- return out.get(virtualReg.toLocal());
- size_t index = virtualReg.toLocal() - codeBlock->captureCount();
- if (index >= out.numBits())
+ if (virtualReg.isConstant())
return false;
- return out.get(index);
+ return virtualReg.isLocal();
+}
+
+// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes
+// exception handlers in the uses.
+template<typename DerivedAnalysis>
+template<typename Graph, typename UseFunctor, typename DefFunctor>
+inline void BytecodeLivenessPropagation<DerivedAnalysis>::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor& use, const DefFunctor& def)
+{
+ // This abstractly execute the instruction in reverse. Instructions logically first use operands and
+ // then define operands. This logical ordering is necessary for operations that use and def the same
+ // operand, like:
+ //
+ // op_add loc1, loc1, loc2
+ //
+ // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add
+ // operation cannot travel forward in time to read the value that it will produce after reading that
+ // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of
+ // uses before defs).
+ //
+ // Since this is a liveness analysis, this ordering ends up being particularly important: if we did
+ // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
+ // first add it to the out set (the use), and then we'd remove it (the def).
+
+ auto* codeBlock = graph.codeBlock();
+ Interpreter* interpreter = codeBlock->vm()->interpreter;
+ auto* instructionsBegin = graph.instructions().begin();
+ auto* instruction = &instructionsBegin[bytecodeOffset];
+ OpcodeID opcodeID = interpreter->getOpcodeID(*instruction);
+
+ static_cast<DerivedAnalysis*>(this)->computeDefsForBytecodeOffset(
+ codeBlock, opcodeID, instruction, out,
+ [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+ if (isValidRegisterForLiveness(operand))
+ def(VirtualRegister(operand).toLocal());
+ });
+
+ static_cast<DerivedAnalysis*>(this)->computeUsesForBytecodeOffset(
+ codeBlock, opcodeID, instruction, out,
+ [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+ if (isValidRegisterForLiveness(operand))
+ use(VirtualRegister(operand).toLocal());
+ });
+
+ // If we have an exception handler, we want the live-in variables of the
+ // exception handler block to be included in the live-in of this particular bytecode.
+ if (auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
+ BytecodeBasicBlock* handlerBlock = graph.findBasicBlockWithLeaderOffset(handler->target);
+ ASSERT(handlerBlock);
+ handlerBlock->in().forEachSetBit(use);
+ }
}
-inline bool operandIsLive(CodeBlock* codeBlock, const FastBitVector& out, int operand)
+template<typename DerivedAnalysis>
+template<typename Graph>
+inline void BytecodeLivenessPropagation<DerivedAnalysis>::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out)
{
- return operandIsAlwaysLive(codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(codeBlock, out, operand);
+ stepOverInstruction(
+ graph, bytecodeOffset, out,
+ [&] (unsigned bitIndex) {
+ // This is the use functor, so we set the bit.
+ out[bitIndex] = true;
+ },
+ [&] (unsigned bitIndex) {
+ // This is the def functor, so we clear the bit.
+ out[bitIndex] = false;
+ });
}
-} // namespace JSC
+template<typename DerivedAnalysis>
+template<typename Graph>
+inline bool BytecodeLivenessPropagation<DerivedAnalysis>::computeLocalLivenessForBytecodeOffset(Graph& graph, BytecodeBasicBlock* block, unsigned targetOffset, FastBitVector& result)
+{
+ ASSERT(!block->isExitBlock());
+ ASSERT(!block->isEntryBlock());
-#endif // BytecodeLivenessAnalysisInlines_h
+ FastBitVector out = block->out();
+ for (int i = block->offsets().size() - 1; i >= 0; i--) {
+ unsigned bytecodeOffset = block->offsets()[i];
+ if (targetOffset > bytecodeOffset)
+ break;
+ stepOverInstruction(graph, bytecodeOffset, out);
+ }
+
+ return result.setAndCheck(out);
+}
+
+template<typename DerivedAnalysis>
+template<typename Graph>
+inline bool BytecodeLivenessPropagation<DerivedAnalysis>::computeLocalLivenessForBlock(Graph& graph, BytecodeBasicBlock* block)
+{
+ if (block->isExitBlock() || block->isEntryBlock())
+ return false;
+ return computeLocalLivenessForBytecodeOffset(graph, block, block->leaderOffset(), block->in());
+}
+
+template<typename DerivedAnalysis>
+template<typename Graph>
+inline FastBitVector BytecodeLivenessPropagation<DerivedAnalysis>::getLivenessInfoAtBytecodeOffset(Graph& graph, unsigned bytecodeOffset)
+{
+ BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+ ASSERT(block);
+ ASSERT(!block->isEntryBlock());
+ ASSERT(!block->isExitBlock());
+ FastBitVector out;
+ out.resize(block->out().numBits());
+ computeLocalLivenessForBytecodeOffset(graph, block, bytecodeOffset, out);
+ return out;
+}
+
+template<typename DerivedAnalysis>
+template<typename Graph>
+inline void BytecodeLivenessPropagation<DerivedAnalysis>::runLivenessFixpoint(Graph& graph)
+{
+ auto* codeBlock = graph.codeBlock();
+ unsigned numberOfVariables = codeBlock->numCalleeLocals();
+ for (BytecodeBasicBlock* block : graph) {
+ block->in().resize(numberOfVariables);
+ block->out().resize(numberOfVariables);
+ block->in().clearAll();
+ block->out().clearAll();
+ }
+
+ bool changed;
+ BytecodeBasicBlock* lastBlock = graph.last();
+ lastBlock->in().clearAll();
+ lastBlock->out().clearAll();
+ FastBitVector newOut;
+ newOut.resize(lastBlock->out().numBits());
+ do {
+ changed = false;
+ for (std::unique_ptr<BytecodeBasicBlock>& block : graph.basicBlocksInReverseOrder()) {
+ newOut.clearAll();
+ for (BytecodeBasicBlock* successor : block->successors())
+ newOut |= successor->in();
+ block->out() = newOut;
+ changed |= computeLocalLivenessForBlock(graph, block.get());
+ }
+ } while (changed);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp
new file mode 100644
index 000000000..6dadb6e74
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeRewriter.h"
+
+#include "HeapInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+#include <wtf/BubbleSort.h>
+
+namespace JSC {
+
+void BytecodeRewriter::applyModification()
+{
+ for (size_t insertionIndex = m_insertions.size(); insertionIndex--;) {
+ Insertion& insertion = m_insertions[insertionIndex];
+ if (insertion.type == Insertion::Type::Remove)
+ m_graph.instructions().remove(insertion.index.bytecodeOffset, insertion.length());
+ else {
+ if (insertion.includeBranch == IncludeBranch::Yes) {
+ int finalOffset = insertion.index.bytecodeOffset + calculateDifference(m_insertions.begin(), m_insertions.begin() + insertionIndex);
+ adjustJumpTargetsInFragment(finalOffset, insertion);
+ }
+ m_graph.instructions().insertVector(insertion.index.bytecodeOffset, insertion.instructions);
+ }
+ }
+ m_insertions.clear();
+}
+
+void BytecodeRewriter::execute()
+{
+ WTF::bubbleSort(m_insertions.begin(), m_insertions.end(), [] (const Insertion& lhs, const Insertion& rhs) {
+ return lhs.index < rhs.index;
+ });
+
+ UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+ codeBlock->applyModification(*this);
+}
+
+void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion)
+{
+ auto& fragment = insertion.instructions;
+ UnlinkedInstruction* instructionsBegin = fragment.data();
+ for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) {
+ UnlinkedInstruction& instruction = fragment[fragmentOffset];
+ OpcodeID opcodeID = instruction.u.opcode;
+ if (isBranch(opcodeID)) {
+ unsigned bytecodeOffset = finalOffset + fragmentOffset;
+ UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+ extractStoredJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, fragmentOffset, [&](int32_t& label) {
+ int absoluteOffset = adjustAbsoluteOffset(label);
+ label = absoluteOffset - static_cast<int>(bytecodeOffset);
+ });
+ }
+ fragmentOffset += opcodeLength(opcodeID);
+ }
+}
+
+void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector<UnlinkedInstruction>&& fragment)
+{
+ ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After);
+ m_insertions.append(Insertion {
+ insertionPoint,
+ Insertion::Type::Insert,
+ includeBranch,
+ 0,
+ WTFMove(fragment)
+ });
+}
+
+int BytecodeRewriter::adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint)
+{
+ if (startPoint < jumpTargetPoint) {
+ int jumpTarget = jumpTargetPoint.bytecodeOffset;
+ auto start = std::lower_bound(m_insertions.begin(), m_insertions.end(), startPoint, [&] (const Insertion& insertion, InsertionPoint startPoint) {
+ return insertion.index < startPoint;
+ });
+ if (start != m_insertions.end()) {
+ auto end = std::lower_bound(m_insertions.begin(), m_insertions.end(), jumpTargetPoint, [&] (const Insertion& insertion, InsertionPoint jumpTargetPoint) {
+ return insertion.index < jumpTargetPoint;
+ });
+ jumpTarget += calculateDifference(start, end);
+ }
+ return jumpTarget - startPoint.bytecodeOffset;
+ }
+
+ if (startPoint == jumpTargetPoint)
+ return 0;
+
+ return -adjustJumpTarget(jumpTargetPoint, startPoint);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h
new file mode 100644
index 000000000..035f900a7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki <utatane.tea@gmail.com>
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeGraph.h"
+#include "Bytecodes.h"
+#include "Opcode.h"
+#include "UnlinkedCodeBlock.h"
+#include <wtf/Insertion.h>
+
+namespace JSC {
+
+// BytecodeRewriter offers the ability to insert and remove the bytecodes including jump operations.
+//
+// We use the original bytecode offsets as labels. When you emit some jumps, you can specify the jump target by
+// using the original bytecode offsets. These bytecode offsets are later converted appropriate values by the
+// rewriter. And we also use the labels to represents the position the new bytecodes inserted.
+//
+// | [bytecode] | [bytecode] |
+// offsets A B C
+//
+// We can use the above "A", "B", and "C" offsets as labels. And the rewriter has the ability to insert bytecode fragments
+// before and after the label. For example, if you insert the fragment after "B", the layout becomes like this.
+//
+// | [bytecode] | [fragment] [bytecode] |
+// offsets A B C
+//
+// And even if you remove some original bytecodes, the offset remains as labels. For example, when you remove the A's bytecode,
+// the layout becomes like this.
+//
+// | | [bytecode] |
+// offsets A B C
+//
+// And still you can insert fragments before and after "A".
+//
+// | [fragment] | [bytecode] |
+// offsets A B C
+//
+// We can insert bytecode fragments "Before" and "After" the labels. This inserted position, either "Before" and "After",
+// has effect when the label is involved with jumps. For example, when you have jump to the position "B",
+//
+// | [bytecode] | [bytecode] |
+// offsets A B C
+// ^
+// jump to here.
+//
+// and you insert the bytecode before/after "B",
+//
+// | [bytecode] [before] | [after] [bytecode] |
+// offsets A B C
+// ^
+// jump to here.
+//
+// as you can see, the execution jumping into "B" does not execute [before] code.
+class BytecodeRewriter {
+WTF_MAKE_NONCOPYABLE(BytecodeRewriter);
+public:
+ enum class Position : int8_t {
+ EntryPoint = -2,
+ Before = -1,
+ LabelPoint = 0,
+ After = 1,
+ OriginalBytecodePoint = 2,
+ };
+
+ enum class IncludeBranch : uint8_t {
+ No = 0,
+ Yes = 1,
+ };
+
+ struct InsertionPoint {
+ int bytecodeOffset;
+ Position position;
+
+ InsertionPoint(int offset, Position pos)
+ : bytecodeOffset(offset)
+ , position(pos)
+ {
+ }
+
+ bool operator<(const InsertionPoint& other) const
+ {
+ if (bytecodeOffset == other.bytecodeOffset)
+ return position < other.position;
+ return bytecodeOffset < other.bytecodeOffset;
+ }
+
+ bool operator==(const InsertionPoint& other) const
+ {
+ return bytecodeOffset == other.bytecodeOffset && position == other.position;
+ }
+ };
+
+private:
+ struct Insertion {
+ enum class Type : uint8_t { Insert = 0, Remove = 1, };
+
+ size_t length() const
+ {
+ if (type == Type::Remove)
+ return removeLength;
+ return instructions.size();
+ }
+
+ InsertionPoint index;
+ Type type;
+ IncludeBranch includeBranch;
+ size_t removeLength;
+ Vector<UnlinkedInstruction> instructions;
+ };
+
+public:
+ class Fragment {
+ WTF_MAKE_NONCOPYABLE(Fragment);
+ public:
+ Fragment(Vector<UnlinkedInstruction>& fragment, IncludeBranch& includeBranch)
+ : m_fragment(fragment)
+ , m_includeBranch(includeBranch)
+ {
+ }
+
+ template<class... Args>
+ void appendInstruction(OpcodeID opcodeID, Args... args)
+ {
+ if (isBranch(opcodeID))
+ m_includeBranch = IncludeBranch::Yes;
+
+ UnlinkedInstruction instructions[sizeof...(args) + 1] = {
+ UnlinkedInstruction(opcodeID),
+ UnlinkedInstruction(args)...
+ };
+ m_fragment.append(instructions, sizeof...(args) + 1);
+ }
+
+ private:
+ Vector<UnlinkedInstruction>& m_fragment;
+ IncludeBranch& m_includeBranch;
+ };
+
+ BytecodeRewriter(BytecodeGraph<UnlinkedCodeBlock>& graph)
+ : m_graph(graph)
+ {
+ }
+
+ template<class Function>
+ void insertFragmentBefore(unsigned bytecodeOffset, Function function)
+ {
+ IncludeBranch includeBranch = IncludeBranch::No;
+ Vector<UnlinkedInstruction> instructions;
+ Fragment fragment(instructions, includeBranch);
+ function(fragment);
+ insertImpl(InsertionPoint(bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions));
+ }
+
+ template<class Function>
+ void insertFragmentAfter(unsigned bytecodeOffset, Function function)
+ {
+ IncludeBranch includeBranch = IncludeBranch::No;
+ Vector<UnlinkedInstruction> instructions;
+ Fragment fragment(instructions, includeBranch);
+ function(fragment);
+ insertImpl(InsertionPoint(bytecodeOffset, Position::After), includeBranch, WTFMove(instructions));
+ }
+
+ void removeBytecode(unsigned bytecodeOffset)
+ {
+ m_insertions.append(Insertion { InsertionPoint(bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_graph.instructions()[bytecodeOffset].u.opcode), { } });
+ }
+
+ void execute();
+
+ BytecodeGraph<UnlinkedCodeBlock>& graph() { return m_graph; }
+
+ int adjustAbsoluteOffset(int absoluteOffset)
+ {
+ return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint));
+ }
+
+ int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget)
+ {
+ return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint));
+ }
+
+private:
+ void insertImpl(InsertionPoint, IncludeBranch, Vector<UnlinkedInstruction>&& fragment);
+
+ friend class UnlinkedCodeBlock;
+ void applyModification();
+ void adjustJumpTargetsInFragment(unsigned finalOffset, Insertion&);
+
+ int adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint);
+ template<typename Iterator> int calculateDifference(Iterator begin, Iterator end);
+
+ BytecodeGraph<UnlinkedCodeBlock>& m_graph;
+ Vector<Insertion, 8> m_insertions;
+};
+
+template<typename Iterator>
+inline int BytecodeRewriter::calculateDifference(Iterator begin, Iterator end)
+{
+ int result = 0;
+ for (; begin != end; ++begin) {
+ if (begin->type == Insertion::Type::Remove)
+ result -= begin->length();
+ else
+ result += begin->length();
+ }
+ return result;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
index 45cb91a1c..99b939403 100644
--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,50 +23,45 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BytecodeUseDef_h
-#define BytecodeUseDef_h
+#pragma once
#include "CodeBlock.h"
+#include "Interpreter.h"
namespace JSC {
-template<typename Functor>
-void computeUsesForBytecodeOffset(
- CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+template<typename Block, typename Functor, typename Instruction>
+void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
{
- Interpreter* interpreter = codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = codeBlock->instructions().begin();
- Instruction* instruction = &instructionsBegin[bytecodeOffset];
- OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
+ if (opcodeID != op_enter && codeBlock->wasCompiledWithDebuggingOpcodes() && codeBlock->scopeRegister().isValid())
+ functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
+
switch (opcodeID) {
// No uses.
case op_new_regexp:
case op_new_array_buffer:
case op_throw_static_error:
case op_debug:
- case op_resolve_scope:
- case op_pop_scope:
case op_jneq_ptr:
- case op_new_func_exp:
case op_loop_hint:
case op_jmp:
case op_new_object:
- case op_init_lazy_reg:
- case op_get_callee:
case op_enter:
+ case op_argument_count:
case op_catch:
- case op_touch_entry:
+ case op_profile_control_flow:
+ case op_create_direct_arguments:
+ case op_create_cloned_arguments:
+ case op_get_rest_length:
+ case op_watchdog:
+ case op_get_argument:
return;
- case op_new_func:
- case op_new_captured_func:
- case op_create_activation:
- case op_create_arguments:
+ case op_assert:
+ case op_get_scope:
case op_to_this:
- case op_tear_off_activation:
- case op_profile_will_call:
- case op_profile_did_call:
+ case op_check_tdz:
+ case op_profile_type:
case op_throw:
- case op_push_with_scope:
case op_end:
case op_ret:
case op_jtrue:
@@ -74,11 +69,12 @@ void computeUsesForBytecodeOffset(
case op_jeq_null:
case op_jneq_null:
case op_dec:
- case op_inc: {
+ case op_inc:
+ case op_log_shadow_chicken_prologue: {
+ ASSERT(opcodeLengths[opcodeID] > 1);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
return;
}
- case op_ret_object_or_this:
case op_jlesseq:
case op_jgreater:
case op_jgreatereq:
@@ -86,91 +82,146 @@ void computeUsesForBytecodeOffset(
case op_jnlesseq:
case op_jngreater:
case op_jngreatereq:
- case op_jless: {
+ case op_jless:
+ case op_set_function_name:
+ case op_log_shadow_chicken_tail: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
case op_put_by_val_direct:
case op_put_by_val: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
case op_put_by_index:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_normal_out_of_line:
- case op_put_by_id_generic:
- case op_put_by_id_out_of_line:
case op_put_by_id:
- case op_put_to_scope: {
+ case op_put_to_scope:
+ case op_put_to_arguments: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
- case op_put_getter_setter: {
+ case op_put_by_id_with_this: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ return;
+ }
+ case op_put_by_val_with_this: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
return;
}
- case op_init_global_const_nop:
- case op_init_global_const:
- case op_push_name_scope:
+ case op_put_getter_by_id:
+ case op_put_setter_by_id: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ return;
+ }
+ case op_put_getter_setter_by_id: {
+ ASSERT(opcodeLengths[opcodeID] > 5);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+ return;
+ }
+ case op_put_getter_by_val:
+ case op_put_setter_by_val: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ return;
+ }
+ case op_define_data_property: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ return;
+ }
+ case op_define_accessor_property: {
+ ASSERT(opcodeLengths[opcodeID] > 5);
+ functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+ return;
+ }
+ case op_spread:
+ case op_get_property_enumerator:
+ case op_get_enumerable_length:
+ case op_new_func_exp:
+ case op_new_generator_func_exp:
+ case op_new_async_func_exp:
+ case op_to_index_string:
+ case op_create_lexical_environment:
+ case op_resolve_scope:
case op_get_from_scope:
case op_to_primitive:
+ case op_try_get_by_id:
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
case op_get_array_length:
- case op_get_string_length:
- case op_get_arguments_length:
case op_typeof:
+ case op_is_empty:
case op_is_undefined:
case op_is_boolean:
case op_is_number:
- case op_is_string:
case op_is_object:
+ case op_is_object_or_null:
+ case op_is_cell_with_type:
case op_is_function:
case op_to_number:
+ case op_to_string:
case op_negate:
case op_neq_null:
case op_eq_null:
case op_not:
case op_mov:
- case op_captured_mov:
case op_new_array_with_size:
case op_create_this:
- case op_get_pnames:
case op_del_by_id:
- case op_unsigned: {
+ case op_unsigned:
+ case op_new_func:
+ case op_new_generator_func:
+ case op_new_async_func:
+ case op_get_parent_scope:
+ case op_create_scoped_arguments:
+ case op_create_rest:
+ case op_get_from_arguments: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
+ case op_has_generic_property:
+ case op_has_indexed_property:
+ case op_enumerator_structure_pname:
+ case op_enumerator_generic_pname:
case op_get_by_val:
- case op_get_argument_by_val:
case op_in:
+ case op_overrides_has_instance:
case op_instanceof:
- case op_check_has_instance:
case op_add:
case op_mul:
case op_div:
case op_mod:
case op_sub:
+ case op_pow:
case op_lshift:
case op_rshift:
case op_urshift:
@@ -185,38 +236,49 @@ void computeUsesForBytecodeOffset(
case op_stricteq:
case op_neq:
case op_eq:
- case op_del_by_val: {
+ case op_push_with_scope:
+ case op_get_by_id_with_this:
+ case op_del_by_val:
+ case op_tail_call_forward_arguments: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
- case op_call_varargs: {
+ case op_get_by_val_with_this: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
return;
}
- case op_next_pname: {
+ case op_instanceof_custom:
+ case op_has_structure_property:
+ case op_construct_varargs:
+ case op_call_varargs:
+ case op_tail_call_varargs: {
+ ASSERT(opcodeLengths[opcodeID] > 4);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
return;
}
- case op_get_by_pname: {
+ case op_get_direct_pname: {
+ ASSERT(opcodeLengths[opcodeID] > 5);
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[6].u.operand);
return;
}
case op_switch_string:
case op_switch_char:
case op_switch_imm: {
+ ASSERT(opcodeLengths[opcodeID] > 3);
functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
+ case op_new_array_with_spread:
case op_new_array:
case op_strcat: {
int base = instruction[2].u.operand;
@@ -227,19 +289,21 @@ void computeUsesForBytecodeOffset(
}
case op_construct:
case op_call_eval:
- case op_call: {
+ case op_call:
+ case op_tail_call: {
functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
int lastArg = registerOffset + CallFrame::thisArgumentOffset();
- for (int i = opcodeID == op_construct ? 1 : 0; i < argCount; i++)
+ for (int i = 0; i < argCount; i++)
functor(codeBlock, instruction, opcodeID, lastArg + i);
+ if (opcodeID == op_call_eval)
+ functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
return;
}
- case op_tear_off_arguments: {
+ case op_yield: {
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
- functor(codeBlock, instruction, opcodeID, unmodifiedArgumentsRegister(VirtualRegister(instruction[1].u.operand)).offset());
- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
return;
}
default:
@@ -248,29 +312,18 @@ void computeUsesForBytecodeOffset(
}
}
-template<typename Functor>
-void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Functor& functor)
+template<typename Block, typename Instruction, typename Functor>
+void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
{
- Interpreter* interpreter = codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = codeBlock->instructions().begin();
- Instruction* instruction = &instructionsBegin[bytecodeOffset];
- OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode);
switch (opcodeID) {
// These don't define anything.
- case op_init_global_const:
- case op_init_global_const_nop:
- case op_push_name_scope:
- case op_push_with_scope:
case op_put_to_scope:
- case op_pop_scope:
case op_end:
- case op_profile_will_call:
- case op_profile_did_call:
case op_throw:
case op_throw_static_error:
+ case op_assert:
case op_debug:
case op_ret:
- case op_ret_object_or_this:
case op_jmp:
case op_jtrue:
case op_jfalse:
@@ -290,79 +343,99 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
case op_switch_char:
case op_switch_string:
case op_put_by_id:
- case op_put_by_id_out_of_line:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_normal_out_of_line:
- case op_put_by_id_generic:
- case op_put_getter_setter:
+ case op_put_by_id_with_this:
+ case op_put_by_val_with_this:
+ case op_put_getter_by_id:
+ case op_put_setter_by_id:
+ case op_put_getter_setter_by_id:
+ case op_put_getter_by_val:
+ case op_put_setter_by_val:
case op_put_by_val:
case op_put_by_val_direct:
case op_put_by_index:
- case op_tear_off_arguments:
- case op_touch_entry:
+ case op_define_data_property:
+ case op_define_accessor_property:
+ case op_profile_type:
+ case op_profile_control_flow:
+ case op_put_to_arguments:
+ case op_set_function_name:
+ case op_watchdog:
+ case op_log_shadow_chicken_prologue:
+ case op_log_shadow_chicken_tail:
+ case op_yield:
#define LLINT_HELPER_OPCODES(opcode, length) case opcode:
FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES);
#undef LLINT_HELPER_OPCODES
return;
// These all have a single destination for the first argument.
- case op_next_pname:
+ case op_argument_count:
+ case op_to_index_string:
+ case op_get_enumerable_length:
+ case op_has_indexed_property:
+ case op_has_structure_property:
+ case op_has_generic_property:
+ case op_get_direct_pname:
+ case op_get_property_enumerator:
+ case op_enumerator_structure_pname:
+ case op_enumerator_generic_pname:
+ case op_get_parent_scope:
+ case op_push_with_scope:
+ case op_create_lexical_environment:
case op_resolve_scope:
case op_strcat:
- case op_tear_off_activation:
case op_to_primitive:
- case op_catch:
case op_create_this:
case op_new_array:
+ case op_new_array_with_spread:
+ case op_spread:
case op_new_array_buffer:
case op_new_array_with_size:
case op_new_regexp:
case op_new_func:
- case op_new_captured_func:
case op_new_func_exp:
+ case op_new_generator_func:
+ case op_new_generator_func_exp:
+ case op_new_async_func:
+ case op_new_async_func_exp:
case op_call_varargs:
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments:
+ case op_construct_varargs:
case op_get_from_scope:
case op_call:
+ case op_tail_call:
case op_call_eval:
case op_construct:
+ case op_try_get_by_id:
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
+ case op_get_by_id_with_this:
+ case op_get_by_val_with_this:
case op_get_array_length:
- case op_get_string_length:
- case op_check_has_instance:
+ case op_overrides_has_instance:
case op_instanceof:
+ case op_instanceof_custom:
case op_get_by_val:
- case op_get_argument_by_val:
- case op_get_by_pname:
- case op_get_arguments_length:
case op_typeof:
+ case op_is_empty:
case op_is_undefined:
case op_is_boolean:
case op_is_number:
- case op_is_string:
case op_is_object:
+ case op_is_object_or_null:
+ case op_is_cell_with_type:
case op_is_function:
case op_in:
case op_to_number:
+ case op_to_string:
case op_negate:
case op_add:
case op_mul:
case op_div:
case op_mod:
case op_sub:
+ case op_pow:
case op_lshift:
case op_rshift:
case op_urshift:
@@ -383,33 +456,36 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
case op_eq_null:
case op_not:
case op_mov:
- case op_captured_mov:
case op_new_object:
case op_to_this:
- case op_get_callee:
- case op_init_lazy_reg:
- case op_create_activation:
- case op_create_arguments:
+ case op_check_tdz:
+ case op_get_scope:
+ case op_create_direct_arguments:
+ case op_create_scoped_arguments:
+ case op_create_cloned_arguments:
case op_del_by_id:
case op_del_by_val:
- case op_unsigned: {
+ case op_unsigned:
+ case op_get_from_arguments:
+ case op_get_argument:
+ case op_create_rest:
+ case op_get_rest_length: {
+ ASSERT(opcodeLengths[opcodeID] > 1);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
return;
}
- case op_get_pnames: {
+ case op_catch: {
+ ASSERT(opcodeLengths[opcodeID] > 2);
functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+ functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
return;
}
case op_enter: {
for (unsigned i = codeBlock->m_numVars; i--;)
functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset());
return;
- } }
+ }
+ }
}
} // namespace JSC
-
-#endif // BytecodeUseDef_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.cpp b/Source/JavaScriptCore/bytecode/CallEdge.cpp
new file mode 100644
index 000000000..dffff6dfd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallEdge.h"
+
+namespace JSC {
+
+void CallEdge::dump(PrintStream& out) const
+{
+ out.print("<", m_callee, ", count: ", m_count, ">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h
new file mode 100644
index 000000000..8c7abbcb8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallEdge.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CallVariant.h"
+
+namespace JSC {
+
+class CallEdge {
+public:
+ CallEdge();
+ CallEdge(CallVariant, uint32_t);
+
+ bool operator!() const { return !m_callee; }
+
+ CallVariant callee() const { return m_callee; }
+ uint32_t count() const { return m_count; }
+
+ CallEdge despecifiedClosure() const
+ {
+ return CallEdge(m_callee.despecifiedClosure(), m_count);
+ }
+
+ void dump(PrintStream&) const;
+
+private:
+ CallVariant m_callee;
+ uint32_t m_count;
+};
+
+inline CallEdge::CallEdge(CallVariant callee, uint32_t count)
+ : m_callee(callee)
+ , m_count(count)
+{
+}
+
+inline CallEdge::CallEdge()
+ : CallEdge(CallVariant(), 0)
+{
+}
+
+typedef Vector<CallEdge, 1> CallEdgeList;
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
index a4baa6100..7ffda05f4 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,35 +26,248 @@
#include "config.h"
#include "CallLinkInfo.h"
+#include "CallFrameShuffleData.h"
#include "DFGOperations.h"
#include "DFGThunks.h"
-#include "RepatchBuffer.h"
+#include "FunctionCodeBlock.h"
+#include "JSCInlines.h"
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "Repatch.h"
+#include <wtf/ListDump.h>
#if ENABLE(JIT)
namespace JSC {
-void CallLinkInfo::unlink(VM& vm, RepatchBuffer& repatchBuffer)
+CallLinkInfo::CallType CallLinkInfo::callTypeFor(OpcodeID opcodeID)
{
- ASSERT(isLinked());
+ if (opcodeID == op_call || opcodeID == op_call_eval)
+ return Call;
+ if (opcodeID == op_call_varargs)
+ return CallVarargs;
+ if (opcodeID == op_construct)
+ return Construct;
+ if (opcodeID == op_construct_varargs)
+ return ConstructVarargs;
+ if (opcodeID == op_tail_call)
+ return TailCall;
+ ASSERT(opcodeID == op_tail_call_varargs || op_tail_call_forward_arguments);
+ return TailCallVarargs;
+}
+
+CallLinkInfo::CallLinkInfo()
+ : m_hasSeenShouldRepatch(false)
+ , m_hasSeenClosure(false)
+ , m_clearedByGC(false)
+ , m_allowStubs(true)
+ , m_isLinked(false)
+ , m_callType(None)
+ , m_calleeGPR(255)
+ , m_maxNumArguments(0)
+ , m_slowPathCount(0)
+{
+}
+
+CallLinkInfo::~CallLinkInfo()
+{
+ clearStub();
- repatchBuffer.revertJumpReplacementToBranchPtrWithPatch(RepatchBuffer::startOfBranchPtrWithPatchOnRegister(hotPathBegin), static_cast<MacroAssembler::RegisterID>(calleeGPR), 0);
- if (isDFG) {
-#if ENABLE(DFG_JIT)
- repatchBuffer.relink(callReturnLocation, (callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator) : vm.getCTIStub(linkCallThunkGenerator)).code());
-#else
- RELEASE_ASSERT_NOT_REACHED();
-#endif
- } else
- repatchBuffer.relink(callReturnLocation, callType == Construct ? vm.getCTIStub(linkConstructThunkGenerator).code() : vm.getCTIStub(linkCallThunkGenerator).code());
- hasSeenShouldRepatch = false;
- callee.clear();
- stub.clear();
-
- // It will be on a list if the callee has a code block.
if (isOnList())
remove();
}
+void CallLinkInfo::clearStub()
+{
+ if (!stub())
+ return;
+
+ m_stub->clearCallNodesFor(this);
+ m_stub = nullptr;
+}
+
+void CallLinkInfo::unlink(VM& vm)
+{
+ // We could be called even if we're not linked anymore because of how polymorphic calls
+ // work. Each callsite within the polymorphic call stub may separately ask us to unlink().
+ if (isLinked())
+ unlinkFor(vm, *this);
+
+ // Either we were unlinked, in which case we should not have been on any list, or we unlinked
+ // ourselves so that we're not on any list anymore.
+ RELEASE_ASSERT(!isOnList());
+}
+
+CodeLocationNearCall CallLinkInfo::callReturnLocation()
+{
+ RELEASE_ASSERT(!isDirect());
+ return CodeLocationNearCall(m_callReturnLocationOrPatchableJump, Regular);
+}
+
+CodeLocationJump CallLinkInfo::patchableJump()
+{
+ RELEASE_ASSERT(callType() == DirectTailCall);
+ return CodeLocationJump(m_callReturnLocationOrPatchableJump);
+}
+
+CodeLocationDataLabelPtr CallLinkInfo::hotPathBegin()
+{
+ RELEASE_ASSERT(!isDirect());
+ return CodeLocationDataLabelPtr(m_hotPathBeginOrSlowPathStart);
+}
+
+CodeLocationLabel CallLinkInfo::slowPathStart()
+{
+ RELEASE_ASSERT(isDirect());
+ return m_hotPathBeginOrSlowPathStart;
+}
+
+void CallLinkInfo::setCallee(VM& vm, JSCell* owner, JSFunction* callee)
+{
+ RELEASE_ASSERT(!isDirect());
+ MacroAssembler::repatchPointer(hotPathBegin(), callee);
+ m_calleeOrCodeBlock.set(vm, owner, callee);
+ m_isLinked = true;
+}
+
+void CallLinkInfo::clearCallee()
+{
+ RELEASE_ASSERT(!isDirect());
+ MacroAssembler::repatchPointer(hotPathBegin(), nullptr);
+ m_calleeOrCodeBlock.clear();
+ m_isLinked = false;
+}
+
+JSFunction* CallLinkInfo::callee()
+{
+ RELEASE_ASSERT(!isDirect());
+ return jsCast<JSFunction*>(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setCodeBlock(VM& vm, JSCell* owner, FunctionCodeBlock* codeBlock)
+{
+ RELEASE_ASSERT(isDirect());
+ m_calleeOrCodeBlock.setMayBeNull(vm, owner, codeBlock);
+ m_isLinked = true;
+}
+
+void CallLinkInfo::clearCodeBlock()
+{
+ RELEASE_ASSERT(isDirect());
+ m_calleeOrCodeBlock.clear();
+ m_isLinked = false;
+}
+
+FunctionCodeBlock* CallLinkInfo::codeBlock()
+{
+ RELEASE_ASSERT(isDirect());
+ return jsCast<FunctionCodeBlock*>(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee)
+{
+ RELEASE_ASSERT(!isDirect());
+ m_lastSeenCalleeOrExecutable.set(vm, owner, callee);
+}
+
+void CallLinkInfo::clearLastSeenCallee()
+{
+ RELEASE_ASSERT(!isDirect());
+ m_lastSeenCalleeOrExecutable.clear();
+}
+
+JSFunction* CallLinkInfo::lastSeenCallee()
+{
+ RELEASE_ASSERT(!isDirect());
+ return jsCast<JSFunction*>(m_lastSeenCalleeOrExecutable.get());
+}
+
+bool CallLinkInfo::haveLastSeenCallee()
+{
+ RELEASE_ASSERT(!isDirect());
+ return !!m_lastSeenCalleeOrExecutable;
+}
+
+void CallLinkInfo::setExecutableDuringCompilation(ExecutableBase* executable)
+{
+ RELEASE_ASSERT(isDirect());
+ m_lastSeenCalleeOrExecutable.setWithoutWriteBarrier(executable);
+}
+
+ExecutableBase* CallLinkInfo::executable()
+{
+ RELEASE_ASSERT(isDirect());
+ return jsCast<ExecutableBase*>(m_lastSeenCalleeOrExecutable.get());
+}
+
+void CallLinkInfo::setMaxNumArguments(unsigned value)
+{
+ RELEASE_ASSERT(isDirect());
+ RELEASE_ASSERT(value);
+ m_maxNumArguments = value;
+}
+
+void CallLinkInfo::visitWeak(VM& vm)
+{
+ auto handleSpecificCallee = [&] (JSFunction* callee) {
+ if (Heap::isMarked(callee->executable()))
+ m_hasSeenClosure = true;
+ else
+ m_clearedByGC = true;
+ };
+
+ if (isLinked()) {
+ if (stub()) {
+ if (!stub()->visitWeak(vm)) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing closure call to ",
+ listDump(stub()->variants()), ", stub routine ", RawPointer(stub()),
+ ".\n");
+ }
+ unlink(vm);
+ m_clearedByGC = true;
+ }
+ } else if (!Heap::isMarked(m_calleeOrCodeBlock.get())) {
+ if (isDirect()) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing call to ", RawPointer(codeBlock()), " (",
+ pointerDump(codeBlock()), ").\n");
+ }
+ } else {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing call to ",
+ RawPointer(callee()), " (",
+ callee()->executable()->hashFor(specializationKind()),
+ ").\n");
+ }
+ handleSpecificCallee(callee());
+ }
+ unlink(vm);
+ } else if (isDirect() && !Heap::isMarked(m_lastSeenCalleeOrExecutable.get())) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Clearing call to ", RawPointer(executable()),
+ " because the executable is dead.\n");
+ }
+ unlink(vm);
+ // We should only get here once the owning CodeBlock is dying, since the executable must
+ // already be in the owner's weak references.
+ m_lastSeenCalleeOrExecutable.clear();
+ }
+ }
+ if (!isDirect() && haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) {
+ handleSpecificCallee(lastSeenCallee());
+ clearLastSeenCallee();
+ }
+}
+
+void CallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData)
+{
+ m_frameShuffleData = std::make_unique<CallFrameShuffleData>(shuffleData);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
index 0244497df..91d3dd8f7 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,94 +23,341 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CallLinkInfo_h
-#define CallLinkInfo_h
+#pragma once
-#include "ClosureCallStubRoutine.h"
+#include "CallMode.h"
#include "CodeLocation.h"
#include "CodeSpecializationKind.h"
-#include "JITWriteBarrier.h"
-#include "JSFunction.h"
-#include "Opcode.h"
+#include "PolymorphicCallStubRoutine.h"
#include "WriteBarrier.h"
-#include <wtf/Platform.h>
#include <wtf/SentinelLinkedList.h>
namespace JSC {
#if ENABLE(JIT)
-class RepatchBuffer;
+class FunctionCodeBlock;
+class JSFunction;
+enum OpcodeID : unsigned;
+struct CallFrameShuffleData;
-struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
- enum CallType { None, Call, CallVarargs, Construct };
- static CallType callTypeFor(OpcodeID opcodeID)
+class CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
+public:
+ enum CallType {
+ None,
+ Call,
+ CallVarargs,
+ Construct,
+ ConstructVarargs,
+ TailCall,
+ TailCallVarargs,
+ DirectCall,
+ DirectConstruct,
+ DirectTailCall
+ };
+
+ static CallType callTypeFor(OpcodeID opcodeID);
+
+ static bool isVarargsCallType(CallType callType)
{
- if (opcodeID == op_call || opcodeID == op_call_eval)
- return Call;
- if (opcodeID == op_construct)
- return Construct;
- ASSERT(opcodeID == op_call_varargs);
- return CallVarargs;
+ switch (callType) {
+ case CallVarargs:
+ case ConstructVarargs:
+ case TailCallVarargs:
+ return true;
+
+ default:
+ return false;
+ }
}
+
+ CallLinkInfo();
- CallLinkInfo()
- : hasSeenShouldRepatch(false)
- , isDFG(false)
- , hasSeenClosure(false)
- , callType(None)
+ ~CallLinkInfo();
+
+ static CodeSpecializationKind specializationKindFor(CallType callType)
{
+ return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs || callType == DirectConstruct);
}
-
- ~CallLinkInfo()
+ CodeSpecializationKind specializationKind() const
{
- if (isOnList())
- remove();
+ return specializationKindFor(static_cast<CallType>(m_callType));
}
- CodeSpecializationKind specializationKind() const
+ static CallMode callModeFor(CallType callType)
+ {
+ switch (callType) {
+ case Call:
+ case CallVarargs:
+ case DirectCall:
+ return CallMode::Regular;
+ case TailCall:
+ case TailCallVarargs:
+ case DirectTailCall:
+ return CallMode::Tail;
+ case Construct:
+ case ConstructVarargs:
+ case DirectConstruct:
+ return CallMode::Construct;
+ case None:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static bool isDirect(CallType callType)
+ {
+ switch (callType) {
+ case DirectCall:
+ case DirectTailCall:
+ case DirectConstruct:
+ return true;
+ case Call:
+ case CallVarargs:
+ case TailCall:
+ case TailCallVarargs:
+ case Construct:
+ case ConstructVarargs:
+ return false;
+ case None:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ CallMode callMode() const
+ {
+ return callModeFor(static_cast<CallType>(m_callType));
+ }
+
+ bool isDirect()
+ {
+ return isDirect(static_cast<CallType>(m_callType));
+ }
+
+ bool isTailCall() const
+ {
+ return callMode() == CallMode::Tail;
+ }
+
+ NearCallMode nearCallMode() const
+ {
+ return isTailCall() ? Tail : Regular;
+ }
+
+ bool isVarargs() const
+ {
+ return isVarargsCallType(static_cast<CallType>(m_callType));
+ }
+
+ bool isLinked() { return m_stub || m_calleeOrCodeBlock; }
+ void unlink(VM&);
+
+ void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR)
+ {
+ m_callType = callType;
+ m_codeOrigin = codeOrigin;
+ m_calleeGPR = calleeGPR;
+ }
+
+ void setCallLocations(
+ CodeLocationLabel callReturnLocationOrPatchableJump,
+ CodeLocationLabel hotPathBeginOrSlowPathStart,
+ CodeLocationNearCall hotPathOther)
{
- return specializationFromIsConstruct(callType == Construct);
+ m_callReturnLocationOrPatchableJump = callReturnLocationOrPatchableJump;
+ m_hotPathBeginOrSlowPathStart = hotPathBeginOrSlowPathStart;
+ m_hotPathOther = hotPathOther;
}
- CodeLocationNearCall callReturnLocation;
- CodeLocationDataLabelPtr hotPathBegin;
- CodeLocationNearCall hotPathOther;
- JITWriteBarrier<JSFunction> callee;
- WriteBarrier<JSFunction> lastSeenCallee;
- RefPtr<ClosureCallStubRoutine> stub;
- bool hasSeenShouldRepatch : 1;
- bool isDFG : 1;
- bool hasSeenClosure : 1;
- unsigned callType : 5; // CallType
- unsigned calleeGPR : 8;
- CodeOrigin codeOrigin;
+ bool allowStubs() const { return m_allowStubs; }
+
+ void disallowStubs()
+ {
+ m_allowStubs = false;
+ }
+
+ CodeLocationNearCall callReturnLocation();
+ CodeLocationJump patchableJump();
+ CodeLocationDataLabelPtr hotPathBegin();
+ CodeLocationLabel slowPathStart();
+
+ CodeLocationNearCall hotPathOther()
+ {
+ return m_hotPathOther;
+ }
+
+ void setCallee(VM&, JSCell*, JSFunction* callee);
+ void clearCallee();
+ JSFunction* callee();
+
+ void setCodeBlock(VM&, JSCell*, FunctionCodeBlock*);
+ void clearCodeBlock();
+ FunctionCodeBlock* codeBlock();
+
+ void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee);
+ void clearLastSeenCallee();
+ JSFunction* lastSeenCallee();
+ bool haveLastSeenCallee();
+
+ void setExecutableDuringCompilation(ExecutableBase*);
+ ExecutableBase* executable();
+
+ void setStub(Ref<PolymorphicCallStubRoutine>&& newStub)
+ {
+ clearStub();
+ m_stub = WTFMove(newStub);
+ }
+
+ void clearStub();
+
+ PolymorphicCallStubRoutine* stub()
+ {
+ return m_stub.get();
+ }
+
+ void setSlowStub(Ref<JITStubRoutine>&& newSlowStub)
+ {
+ m_slowStub = WTFMove(newSlowStub);
+ }
+
+ void clearSlowStub()
+ {
+ m_slowStub = nullptr;
+ }
- bool isLinked() { return stub || callee; }
- void unlink(VM&, RepatchBuffer&);
+ JITStubRoutine* slowStub()
+ {
+ return m_slowStub.get();
+ }
bool seenOnce()
{
- return hasSeenShouldRepatch;
+ return m_hasSeenShouldRepatch;
+ }
+
+ void clearSeen()
+ {
+ m_hasSeenShouldRepatch = false;
}
void setSeen()
{
- hasSeenShouldRepatch = true;
+ m_hasSeenShouldRepatch = true;
+ }
+
+ bool hasSeenClosure()
+ {
+ return m_hasSeenClosure;
+ }
+
+ void setHasSeenClosure()
+ {
+ m_hasSeenClosure = true;
+ }
+
+ bool clearedByGC()
+ {
+ return m_clearedByGC;
+ }
+
+ void setCallType(CallType callType)
+ {
+ m_callType = callType;
+ }
+
+ CallType callType()
+ {
+ return static_cast<CallType>(m_callType);
+ }
+
+ uint32_t* addressOfMaxNumArguments()
+ {
+ return &m_maxNumArguments;
+ }
+
+ uint32_t maxNumArguments()
+ {
+ return m_maxNumArguments;
+ }
+
+ void setMaxNumArguments(unsigned);
+
+ static ptrdiff_t offsetOfSlowPathCount()
+ {
+ return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount);
+ }
+
+ void setCalleeGPR(unsigned calleeGPR)
+ {
+ m_calleeGPR = calleeGPR;
+ }
+
+ unsigned calleeGPR()
+ {
+ return m_calleeGPR;
}
+
+ uint32_t slowPathCount()
+ {
+ return m_slowPathCount;
+ }
+
+ void setCodeOrigin(CodeOrigin codeOrigin)
+ {
+ m_codeOrigin = codeOrigin;
+ }
+
+ CodeOrigin codeOrigin()
+ {
+ return m_codeOrigin;
+ }
+
+ void visitWeak(VM&);
+
+ void setFrameShuffleData(const CallFrameShuffleData&);
+
+ const CallFrameShuffleData* frameShuffleData()
+ {
+ return m_frameShuffleData.get();
+ }
+
+private:
+ CodeLocationLabel m_callReturnLocationOrPatchableJump;
+ CodeLocationLabel m_hotPathBeginOrSlowPathStart;
+ CodeLocationNearCall m_hotPathOther;
+ WriteBarrier<JSCell> m_calleeOrCodeBlock;
+ WriteBarrier<JSCell> m_lastSeenCalleeOrExecutable;
+ RefPtr<PolymorphicCallStubRoutine> m_stub;
+ RefPtr<JITStubRoutine> m_slowStub;
+ std::unique_ptr<CallFrameShuffleData> m_frameShuffleData;
+ bool m_hasSeenShouldRepatch : 1;
+ bool m_hasSeenClosure : 1;
+ bool m_clearedByGC : 1;
+ bool m_allowStubs : 1;
+ bool m_isLinked : 1;
+ unsigned m_callType : 4; // CallType
+ unsigned m_calleeGPR : 8;
+ uint32_t m_maxNumArguments; // For varargs: the profiled maximum number of arguments. For direct: the number of stack slots allocated for arguments.
+ uint32_t m_slowPathCount;
+ CodeOrigin m_codeOrigin;
};
-inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
+inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo)
{
- return callLinkInfo->callReturnLocation.executableAddress();
+ return callLinkInfo.codeOrigin();
}
-inline unsigned getCallLinkInfoBytecodeIndex(CallLinkInfo* callLinkInfo)
-{
- return callLinkInfo->codeOrigin.bytecodeIndex;
-}
+typedef HashMap<CodeOrigin, CallLinkInfo*, CodeOriginApproximateHash> CallLinkInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap<int, void*> CallLinkInfoMap;
+
#endif // ENABLE(JIT)
} // namespace JSC
-
-#endif // CallLinkInfo_h
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index b64c967e9..cbc555df1 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,103 +26,312 @@
#include "config.h"
#include "CallLinkStatus.h"
+#include "CallLinkInfo.h"
#include "CodeBlock.h"
+#include "DFGJITCode.h"
+#include "InlineCallFrame.h"
+#include "Interpreter.h"
#include "LLIntCallLinkInfo.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
namespace JSC {
+static const bool verbose = false;
+
CallLinkStatus::CallLinkStatus(JSValue value)
- : m_callTarget(value)
- , m_executable(0)
- , m_structure(0)
- , m_couldTakeSlowPath(false)
+ : m_couldTakeSlowPath(false)
, m_isProved(false)
{
- if (!value || !value.isCell())
- return;
-
- m_structure = value.asCell()->structure();
-
- if (!value.asCell()->inherits(JSFunction::info()))
+ if (!value || !value.isCell()) {
+ m_couldTakeSlowPath = true;
return;
+ }
- m_executable = jsCast<JSFunction*>(value.asCell())->executable();
+ m_variants.append(CallVariant(value.asCell()));
}
-JSFunction* CallLinkStatus::function() const
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
-
- if (!m_callTarget.asCell()->inherits(JSFunction::info()))
- return 0;
-
- return jsCast<JSFunction*>(m_callTarget.asCell());
-}
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(DFG_JIT)
+ if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+ // We could force this to be a closure call, but instead we'll just assume that it
+ // takes slow path.
+ return takesSlowPath();
+ }
+#else
+ UNUSED_PARAM(locker);
+#endif
-InternalFunction* CallLinkStatus::internalFunction() const
-{
- if (!m_callTarget || !m_callTarget.isCell())
- return 0;
+ VM& vm = *profiledBlock->vm();
- if (!m_callTarget.asCell()->inherits(InternalFunction::info()))
- return 0;
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+ OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
+ if (op != op_call && op != op_construct && op != op_tail_call)
+ return CallLinkStatus();
- return jsCast<InternalFunction*>(m_callTarget.asCell());
-}
-
-Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
-{
- if (!m_executable)
- return NoIntrinsic;
+ LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
- return m_executable->intrinsicFor(kind);
+ return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
}
-CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
{
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(LLINT)
- Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
+ UNUSED_PARAM(map);
+#if ENABLE(DFG_JIT)
+ ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
- return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
+ CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
+ if (!callLinkInfo) {
+ if (exitSiteData.takesSlowPath)
+ return takesSlowPath();
+ return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
+ }
+
+ return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
#else
return CallLinkStatus();
#endif
}
-CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
+ ExitSiteData exitSiteData;
+#if ENABLE(DFG_JIT)
+ exitSiteData.takesSlowPath =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable));
+ exitSiteData.badFunction =
+ profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell));
+#else
+ UNUSED_PARAM(locker);
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
+#endif
+
+ return exitSiteData;
+}
+
#if ENABLE(JIT)
- if (!profiledBlock->hasBaselineJITProfiling())
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
+{
+ // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
+ UNUSED_PARAM(profiledBlock);
- if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
- return CallLinkStatus::takesSlowPath();
+ CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo);
+ result.m_maxNumArguments = callLinkInfo.maxNumArguments();
+ return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
+ const ConcurrentJSLocker&, CallLinkInfo& callLinkInfo)
+{
+ if (callLinkInfo.clearedByGC())
+ return takesSlowPath();
+
+ // Note that despite requiring that the locker is held, this code is racy with respect
+ // to the CallLinkInfo: it may get cleared while this code runs! This is because
+ // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
+ // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
+ // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
+ // itself to figure out which lock to lock.
+ //
+ // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
+ // path count, the stub, and the target - can all be asked racily. Stubs and targets can
+ // only be deleted at next GC, so if we load a non-null one, then it must contain data
+ // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
+ // is probably OK for now.
- CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex);
- if (callLinkInfo.stub)
- return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure());
+ // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
+ // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
+ // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
+ // fencing in place to make sure that we see the variants list after construction.
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
+ WTF::loadLoadFence();
+
+ CallEdgeList edges = stub->edges();
+
+ // Now that we've loaded the edges list, there are no further concurrency concerns. We will
+ // just manipulate and prune this list to our liking - mostly removing entries that are too
+ // infrequent and ensuring that it's sorted in descending order of frequency.
+
+ RELEASE_ASSERT(edges.size());
+
+ std::sort(
+ edges.begin(), edges.end(),
+ [] (CallEdge a, CallEdge b) {
+ return a.count() > b.count();
+ });
+ RELEASE_ASSERT(edges.first().count() >= edges.last().count());
+
+ double totalCallsToKnown = 0;
+ double totalCallsToUnknown = callLinkInfo.slowPathCount();
+ CallVariantList variants;
+ for (size_t i = 0; i < edges.size(); ++i) {
+ CallEdge edge = edges[i];
+ // If the call is at the tail of the distribution, then we don't optimize it and we
+ // treat it as if it was a call to something unknown. We define the tail as being either
+ // a call that doesn't belong to the N most frequent callees (N =
+ // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
+ // small.
+ if (i >= Options::maxPolymorphicCallVariantsForInlining()
+ || edge.count() < Options::frequentCallThreshold())
+ totalCallsToUnknown += edge.count();
+ else {
+ totalCallsToKnown += edge.count();
+ variants.append(edge.callee());
+ }
+ }
+
+ // Bail if we didn't find any calls that qualified.
+ RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
+ if (variants.isEmpty())
+ return takesSlowPath();
+
+ // We require that the distribution of callees is skewed towards a handful of common ones.
+ if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
+ return takesSlowPath();
+
+ RELEASE_ASSERT(totalCallsToKnown);
+ RELEASE_ASSERT(variants.size());
+
+ CallLinkStatus result;
+ result.m_variants = variants;
+ result.m_couldTakeSlowPath = !!totalCallsToUnknown;
+ result.m_isBasedOnStub = true;
+ return result;
+ }
- JSFunction* target = callLinkInfo.lastSeenCallee.get();
- if (!target)
- return computeFromLLInt(profiledBlock, bytecodeIndex);
+ CallLinkStatus result;
+
+ if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
+ CallVariant variant(target);
+ if (callLinkInfo.hasSeenClosure())
+ variant = variant.despecifiedClosure();
+ result.m_variants.append(variant);
+ }
- if (callLinkInfo.hasSeenClosure)
- return CallLinkStatus(target->executable(), target->structure());
+ result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();
- return CallLinkStatus(target);
-#else
- return CallLinkStatus();
+ return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
+ ExitSiteData exitSiteData)
+{
+ CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+ if (exitSiteData.badFunction) {
+ if (result.isBasedOnStub()) {
+ // If we have a polymorphic stub, then having an exit site is not quite so useful. In
+ // most cases, the information in the stub has higher fidelity.
+ result.makeClosureCall();
+ } else {
+ // We might not have a polymorphic stub for any number of reasons. When this happens, we
+ // are in less certain territory, so exit sites mean a lot.
+ result.m_couldTakeSlowPath = true;
+ }
+ }
+ if (exitSiteData.takesSlowPath)
+ result.m_couldTakeSlowPath = true;
+
+ return result;
+}
#endif
+
+void CallLinkStatus::computeDFGStatuses(
+ CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
+{
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
+ CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
+ for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
+ CallLinkInfo& info = **iter;
+ if (info.isDirect()) {
+ // If the DFG was able to get a direct call then probably so will we. However, there is
+ // a remote chance that it's bad news to lose information about what the DFG did. We'd
+ // ideally like to just know that the DFG had emitted a DirectCall.
+ continue;
+ }
+ CodeOrigin codeOrigin = info.codeOrigin();
+
+ // Check if we had already previously made a terrible mistake in the FTL for this
+ // code origin. Note that this is approximate because we could have a monovariant
+ // inline in the FTL that ended up failing. We should fix that at some point by
+ // having data structures to track the context of frequent exits. This is currently
+ // challenging because it would require creating a CodeOrigin-based database in
+ // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
+ // InlineCallFrames.
+ CodeBlock* currentBaseline =
+ baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+ ExitSiteData exitSiteData;
+ {
+ ConcurrentJSLocker locker(currentBaseline->m_lock);
+ exitSiteData = computeExitSiteData(
+ locker, currentBaseline, codeOrigin.bytecodeIndex);
+ }
+
+ {
+ ConcurrentJSLocker locker(dfgCodeBlock->m_lock);
+ map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData));
+ }
+ }
+#else
+ UNUSED_PARAM(dfgCodeBlock);
+#endif // ENABLE(DFG_JIT)
+
+ if (verbose) {
+ dataLog("Context map:\n");
+ ContextMap::iterator iter = map.begin();
+ ContextMap::iterator end = map.end();
+ for (; iter != end; ++iter) {
+ dataLog(" ", iter->key, ":\n");
+ dataLog(" ", iter->value, "\n");
+ }
+ }
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+ CodeBlock* profiledBlock, CodeOrigin codeOrigin,
+ const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
+{
+ auto iter = dfgMap.find(codeOrigin);
+ if (iter != dfgMap.end())
+ return iter->value;
+
+ return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
+}
+
+void CallLinkStatus::setProvenConstantCallee(CallVariant variant)
+{
+ m_variants = CallVariantList{ variant };
+ m_couldTakeSlowPath = false;
+ m_isProved = true;
+}
+
+bool CallLinkStatus::isClosureCall() const
+{
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].isClosureCall())
+ return true;
+ }
+ return false;
+}
+
+void CallLinkStatus::makeClosureCall()
+{
+ m_variants = despecifiedVariantList(m_variants);
}
void CallLinkStatus::dump(PrintStream& out) const
@@ -140,17 +349,14 @@ void CallLinkStatus::dump(PrintStream& out) const
if (m_couldTakeSlowPath)
out.print(comma, "Could Take Slow Path");
- if (m_callTarget)
- out.print(comma, "Known target: ", m_callTarget);
+ if (m_isBasedOnStub)
+ out.print(comma, "Based On Stub");
- if (m_executable) {
- out.print(comma, "Executable/CallHash: ", RawPointer(m_executable));
- if (!isCompilationThread())
- out.print("/", m_executable->hashFor(CodeForCall));
- }
+ if (!m_variants.isEmpty())
+ out.print(comma, listDump(m_variants));
- if (m_structure)
- out.print(comma, "Structure: ", RawPointer(m_structure));
+ if (m_maxNumArguments)
+ out.print(comma, "maxNumArguments = ", m_maxNumArguments);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
index 51965fe4a..353deaaf8 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,28 +23,29 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CallLinkStatus_h
-#define CallLinkStatus_h
+#pragma once
+#include "CallLinkInfo.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
#include "CodeSpecializationKind.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
#include "Intrinsic.h"
#include "JSCJSValue.h"
namespace JSC {
class CodeBlock;
-class ExecutableBase;
class InternalFunction;
class JSFunction;
class Structure;
+class CallLinkInfo;
class CallLinkStatus {
+ WTF_MAKE_FAST_ALLOCATED;
public:
CallLinkStatus()
- : m_executable(0)
- , m_structure(0)
- , m_couldTakeSlowPath(false)
- , m_isProved(false)
{
}
@@ -57,78 +58,77 @@ public:
explicit CallLinkStatus(JSValue);
- CallLinkStatus(ExecutableBase* executable, Structure* structure)
- : m_executable(executable)
- , m_structure(structure)
- , m_couldTakeSlowPath(false)
- , m_isProved(false)
+ CallLinkStatus(CallVariant variant)
+ : m_variants(1, variant)
{
- ASSERT(!!executable == !!structure);
}
- CallLinkStatus& setIsProved(bool isProved)
- {
- m_isProved = isProved;
- return *this;
- }
+ static CallLinkStatus computeFor(
+ CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&);
+
+ struct ExitSiteData {
+ bool takesSlowPath { false };
+ bool badFunction { false };
+ };
+ static ExitSiteData computeExitSiteData(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
- static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+ // Computes the status assuming that we never took slow path and never previously
+ // exited.
+ static CallLinkStatus computeFor(const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&);
+ static CallLinkStatus computeFor(
+ const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData);
+#endif
- CallLinkStatus& setHasBadFunctionExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite) {
- // Turn this into a closure call.
- m_callTarget = JSValue();
- }
- return *this;
- }
+ typedef HashMap<CodeOrigin, CallLinkStatus, CodeOriginApproximateHash> ContextMap;
- CallLinkStatus& setHasBadCacheExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite)
- *this = takesSlowPath();
- return *this;
- }
+ // Computes all of the statuses of the DFG code block. Doesn't include statuses that had
+ // no information. Currently we use this when compiling FTL code, to enable polyvariant
+ // inlining.
+ static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&);
- CallLinkStatus& setHasBadExecutableExitSite(bool didHaveExitSite)
- {
- ASSERT(!m_isProved);
- if (didHaveExitSite)
- *this = takesSlowPath();
- return *this;
- }
+ // Helper that first consults the ContextMap and then does computeFor().
+ static CallLinkStatus computeFor(
+ CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&);
- bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; }
+ void setProvenConstantCallee(CallVariant);
+
+ bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; }
bool operator!() const { return !isSet(); }
bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
- bool isClosureCall() const { return m_executable && !m_callTarget; }
-
- JSValue callTarget() const { return m_callTarget; }
- JSFunction* function() const;
- InternalFunction* internalFunction() const;
- Intrinsic intrinsicFor(CodeSpecializationKind) const;
- ExecutableBase* executable() const { return m_executable; }
- Structure* structure() const { return m_structure; }
+
+ void setCouldTakeSlowPath(bool value) { m_couldTakeSlowPath = value; }
+
+ CallVariantList variants() const { return m_variants; }
+ unsigned size() const { return m_variants.size(); }
+ CallVariant at(unsigned i) const { return m_variants[i]; }
+ CallVariant operator[](unsigned i) const { return at(i); }
bool isProved() const { return m_isProved; }
- bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; }
+ bool isBasedOnStub() const { return m_isBasedOnStub; }
+ bool canOptimize() const { return !m_variants.isEmpty(); }
+
+ bool isClosureCall() const; // Returns true if any callee is a closure call.
+
+ unsigned maxNumArguments() const { return m_maxNumArguments; }
void dump(PrintStream&) const;
private:
- static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex);
+ void makeClosureCall();
+
+ static CallLinkStatus computeFromLLInt(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+ static CallLinkStatus computeFromCallLinkInfo(
+ const ConcurrentJSLocker&, CallLinkInfo&);
+#endif
- JSValue m_callTarget;
- ExecutableBase* m_executable;
- Structure* m_structure;
- bool m_couldTakeSlowPath;
- bool m_isProved;
+ CallVariantList m_variants;
+ bool m_couldTakeSlowPath { false };
+ bool m_isProved { false };
+ bool m_isBasedOnStub { false };
+ unsigned m_maxNumArguments { 0 };
};
} // namespace JSC
-
-#endif // CallLinkStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallMode.cpp b/Source/JavaScriptCore/bytecode/CallMode.cpp
new file mode 100644
index 000000000..5757b1850
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallMode.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CallMode callMode)
+{
+ switch (callMode) {
+ case JSC::CallMode::Tail:
+ out.print("TailCall");
+ return;
+ case JSC::CallMode::Regular:
+ out.print("Call");
+ return;
+ case JSC::CallMode::Construct:
+ out.print("Construct");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallMode.h b/Source/JavaScriptCore/bytecode/CallMode.h
new file mode 100644
index 000000000..02d90e1a0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallMode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+enum class CallMode { Regular, Tail, Construct };
+
+enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+
+inline CodeSpecializationKind specializationKindFor(CallMode callMode)
+{
+ if (callMode == CallMode::Construct)
+ return CodeForConstruct;
+
+ return CodeForCall;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CallMode);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
index 3a7448efd..2d1b00cbe 100644
--- a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
+++ b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CallReturnOffsetToBytecodeOffset_h
-#define CallReturnOffsetToBytecodeOffset_h
-
-#include <wtf/Platform.h>
+#pragma once
namespace JSC {
@@ -55,6 +52,3 @@ inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc)
#endif
} // namespace JSC
-
-#endif // CallReturnOffsetToBytecodeOffset_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp
new file mode 100644
index 000000000..9745dde2b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallVariant.h"
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+void CallVariant::dump(PrintStream& out) const
+{
+ if (!*this) {
+ out.print("null");
+ return;
+ }
+
+ if (InternalFunction* internalFunction = this->internalFunction()) {
+ out.print("InternalFunction: ", JSValue(internalFunction));
+ return;
+ }
+
+ if (JSFunction* function = this->function()) {
+ out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")");
+ return;
+ }
+
+ out.print("Executable: ", *executable());
+}
+
+CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd)
+{
+ ASSERT(variantToAdd);
+ CallVariantList result;
+ for (CallVariant variant : list) {
+ ASSERT(variant);
+ if (!!variantToAdd) {
+ if (variant == variantToAdd)
+ variantToAdd = CallVariant();
+ else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) {
+ variant = variant.despecifiedClosure();
+ variantToAdd = CallVariant();
+ }
+ }
+ result.append(variant);
+ }
+ if (!!variantToAdd)
+ result.append(variantToAdd);
+
+ if (!ASSERT_DISABLED) {
+ for (unsigned i = 0; i < result.size(); ++i) {
+ for (unsigned j = i + 1; j < result.size(); ++j) {
+ if (result[i] != result[j])
+ continue;
+
+ dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ return result;
+}
+
+CallVariantList despecifiedVariantList(const CallVariantList& list)
+{
+ CallVariantList result;
+ for (CallVariant variant : list)
+ result = variantListWithVariant(result, variant.despecifiedClosure());
+ return result;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h
new file mode 100644
index 000000000..94e72bb32
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CallVariant.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "FunctionExecutable.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "NativeExecutable.h"
+
+namespace JSC {
+
+// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking
+// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal
+// non-function objects that nevertheless provide call traps, the call machinery wants to see a
+// callee in one of the following four forms:
+//
+// JSFunction callee: This means that we expect the callsite to always call a particular function
+// instance, that is associated with a particular lexical environment. This pinpoints not
+// just the code that will be called (i.e. the executable) but also the scope within which
+// the code runs.
+//
+// Executable callee: This corresponds to a call to a closure. In this case, we know that the
+// callsite will call a JSFunction, but we do not know which particular JSFunction. We do know
+// what code will be called - i.e. we know the executable.
+//
+// InternalFunction callee: JSC supports a special kind of native functions that support bizarre
+// semantics. These are always singletons. If we know that the callee is an InternalFunction
+// then we know both the code that will be called and the scope; in fact the "scope" is really
+// just the InternalFunction itself.
+//
+// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports
+// bizarre object callees, but it can't really tell you anything interesting about them other
+// than the fact that they don't fall into any of the above categories.
+//
+// This class serves as a kind of union over these four things. It does so by just holding a
+// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we
+// cannot use WriteBarrier<> here because this gets used inside the compiler.
+
+class CallVariant {
+public:
+ explicit CallVariant(JSCell* callee = nullptr)
+ : m_callee(callee)
+ {
+ }
+
+ CallVariant(WTF::HashTableDeletedValueType)
+ : m_callee(deletedToken())
+ {
+ }
+
+ bool operator!() const { return !m_callee; }
+
+ // If this variant refers to a function, change it to refer to its executable.
+ ALWAYS_INLINE CallVariant despecifiedClosure() const
+ {
+ if (m_callee->type() == JSFunctionType)
+ return CallVariant(jsCast<JSFunction*>(m_callee)->executable());
+ return *this;
+ }
+
+ JSCell* rawCalleeCell() const { return m_callee; }
+
+ InternalFunction* internalFunction() const
+ {
+ return jsDynamicCast<InternalFunction*>(*m_callee->vm(), m_callee);
+ }
+
+ JSFunction* function() const
+ {
+ return jsDynamicCast<JSFunction*>(*m_callee->vm(), m_callee);
+ }
+
+ bool isClosureCall() const { return !!jsDynamicCast<ExecutableBase*>(*m_callee->vm(), m_callee); }
+
+ ExecutableBase* executable() const
+ {
+ if (JSFunction* function = this->function())
+ return function->executable();
+ return jsDynamicCast<ExecutableBase*>(*m_callee->vm(), m_callee);
+ }
+
+ JSCell* nonExecutableCallee() const
+ {
+ RELEASE_ASSERT(!isClosureCall());
+ return m_callee;
+ }
+
+ Intrinsic intrinsicFor(CodeSpecializationKind kind) const
+ {
+ if (ExecutableBase* executable = this->executable())
+ return executable->intrinsicFor(kind);
+ return NoIntrinsic;
+ }
+
+ FunctionExecutable* functionExecutable() const
+ {
+ if (ExecutableBase* executable = this->executable())
+ return jsDynamicCast<FunctionExecutable*>(*m_callee->vm(), executable);
+ return nullptr;
+ }
+
+ NativeExecutable* nativeExecutable() const
+ {
+ if (ExecutableBase* executable = this->executable())
+ return jsDynamicCast<NativeExecutable*>(*m_callee->vm(), executable);
+ return nullptr;
+ }
+
+ const DOMJIT::Signature* signatureFor(CodeSpecializationKind kind) const
+ {
+ if (NativeExecutable* nativeExecutable = this->nativeExecutable())
+ return nativeExecutable->signatureFor(kind);
+ return nullptr;
+ }
+
+ void dump(PrintStream& out) const;
+
+ bool isHashTableDeletedValue() const
+ {
+ return m_callee == deletedToken();
+ }
+
+ bool operator==(const CallVariant& other) const
+ {
+ return m_callee == other.m_callee;
+ }
+
+ bool operator!=(const CallVariant& other) const
+ {
+ return !(*this == other);
+ }
+
+ bool operator<(const CallVariant& other) const
+ {
+ return m_callee < other.m_callee;
+ }
+
+ bool operator>(const CallVariant& other) const
+ {
+ return other < *this;
+ }
+
+ bool operator<=(const CallVariant& other) const
+ {
+ return !(*this < other);
+ }
+
+ bool operator>=(const CallVariant& other) const
+ {
+ return other <= *this;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<JSCell*>::hash(m_callee);
+ }
+
+private:
+ static JSCell* deletedToken() { return bitwise_cast<JSCell*>(static_cast<uintptr_t>(1)); }
+
+ JSCell* m_callee;
+};
+
+struct CallVariantHash {
+ static unsigned hash(const CallVariant& key) { return key.hash(); }
+ static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+typedef Vector<CallVariant, 1> CallVariantList;
+
+// Returns a new variant list by attempting to either append the given variant or merge it with one
+// of the variants we already have by despecifying closures.
+CallVariantList variantListWithVariant(const CallVariantList&, CallVariant);
+
+// Returns a new list where every element is despecified, and the list is deduplicated.
+CallVariantList despecifiedVariantList(const CallVariantList&);
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::CallVariant> {
+ typedef JSC::CallVariantHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::CallVariant> : SimpleClassHashTraits<JSC::CallVariant> { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index eec5b7076..44d7f83da 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2010, 2012-2017 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,35 +30,60 @@
#include "config.h"
#include "CodeBlock.h"
+#include "ArithProfile.h"
+#include "BasicBlockLocation.h"
#include "BytecodeGenerator.h"
+#include "BytecodeLivenessAnalysis.h"
#include "BytecodeUseDef.h"
#include "CallLinkStatus.h"
+#include "CodeBlockSet.h"
#include "DFGCapabilities.h"
#include "DFGCommon.h"
#include "DFGDriver.h"
-#include "DFGNode.h"
+#include "DFGJITCode.h"
#include "DFGWorklist.h"
#include "Debugger.h"
+#include "EvalCodeBlock.h"
+#include "FunctionCodeBlock.h"
+#include "FunctionExecutableDump.h"
+#include "GetPutInfo.h"
+#include "InlineCallFrame.h"
#include "Interpreter.h"
#include "JIT.h"
-#include "JITStubs.h"
-#include "JSActivation.h"
+#include "JITMathIC.h"
+#include "JSCInlines.h"
#include "JSCJSValue.h"
#include "JSFunction.h"
-#include "JSNameScope.h"
+#include "JSLexicalEnvironment.h"
+#include "JSModuleEnvironment.h"
+#include "LLIntData.h"
#include "LLIntEntrypoint.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
+#include "ModuleProgramCodeBlock.h"
+#include "PCToCodeOriginMap.h"
+#include "PolymorphicAccess.h"
+#include "ProfilerDatabase.h"
+#include "ProgramCodeBlock.h"
#include "ReduceWhitespace.h"
#include "Repatch.h"
-#include "RepatchBuffer.h"
#include "SlotVisitorInlines.h"
+#include "StackVisitor.h"
+#include "StructureStubInfo.h"
+#include "TypeLocationCache.h"
+#include "TypeProfiler.h"
#include "UnlinkedInstructionStream.h"
+#include "VMInlines.h"
#include <wtf/BagToHashMap.h>
#include <wtf/CommaPrinter.h>
+#include <wtf/SimpleStats.h>
#include <wtf/StringExtras.h>
#include <wtf/StringPrintStream.h>
+#include <wtf/text/UniquedStringImpl.h>
+
+#if ENABLE(JIT)
+#include "RegisterAtOffsetList.h"
+#endif
#if ENABLE(DFG_JIT)
#include "DFGOperations.h"
@@ -70,6 +95,11 @@
namespace JSC {
+const ClassInfo CodeBlock::s_info = {
+ "CodeBlock", 0, 0,
+ CREATE_METHOD_TABLE(CodeBlock)
+};
+
CString CodeBlock::inferredName() const
{
switch (codeType()) {
@@ -79,6 +109,8 @@ CString CodeBlock::inferredName() const
return "<eval>";
case FunctionCode:
return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
+ case ModuleCode:
+ return "<module>";
default:
CRASH();
return CString("", 0);
@@ -99,7 +131,7 @@ CodeBlockHash CodeBlock::hash() const
{
if (!m_hash) {
RELEASE_ASSERT(isSafeToComputeHash());
- m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
+ m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
}
return m_hash;
}
@@ -107,7 +139,7 @@ CodeBlockHash CodeBlock::hash() const
CString CodeBlock::sourceCodeForTools() const
{
if (codeType() != FunctionCode)
- return ownerExecutable()->source().toUTF8();
+ return ownerScriptExecutable()->source().toUTF8();
SourceProvider* provider = source();
FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
@@ -119,7 +151,7 @@ CString CodeBlock::sourceCodeForTools() const
unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
return toCString(
"function ",
- provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
+ provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
}
CString CodeBlock::sourceCodeOnOneLine() const
@@ -127,22 +159,42 @@ CString CodeBlock::sourceCodeOnOneLine() const
return reduceWhitespace(sourceCodeForTools());
}
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+CString CodeBlock::hashAsStringIfPossible() const
{
if (hasHash() || isSafeToComputeHash())
- out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
- else
- out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
+ return toCString(hash());
+ return "<no-hash>";
+}
+
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+{
+ out.print(inferredName(), "#", hashAsStringIfPossible());
+ out.print(":[", RawPointer(this), "->");
+ if (!!m_alternative)
+ out.print(RawPointer(alternative()), "->");
+ out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
if (codeType() == FunctionCode)
out.print(specializationKind());
out.print(", ", instructionCount());
if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
- out.print(" (SABI)");
- if (ownerExecutable()->neverInline())
+ out.print(" (ShouldAlwaysBeInlined)");
+ if (ownerScriptExecutable()->neverInline())
out.print(" (NeverInline)");
- if (ownerExecutable()->isStrictMode())
+ if (ownerScriptExecutable()->neverOptimize())
+ out.print(" (NeverOptimize)");
+ else if (ownerScriptExecutable()->neverFTLOptimize())
+ out.print(" (NeverFTLOptimize)");
+ if (ownerScriptExecutable()->didTryToEnterInLoop())
+ out.print(" (DidTryToEnterInLoop)");
+ if (ownerScriptExecutable()->isStrictMode())
out.print(" (StrictMode)");
+ if (m_didFailJITCompilation)
+ out.print(" (JITFail)");
+ if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+ out.print(" (FTLFail)");
+ if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+ out.print(" (HadFTLReplacement)");
out.print("]");
}
@@ -151,11 +203,6 @@ void CodeBlock::dump(PrintStream& out) const
dumpAssumingJITType(out, jitType());
}
-static CString constantName(int k, JSValue value)
-{
- return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
-}
-
static CString idName(int id0, const Identifier& ident)
{
return toCString(ident.impl(), "(@id", id0, ")");
@@ -163,19 +210,16 @@ static CString idName(int id0, const Identifier& ident)
CString CodeBlock::registerName(int r) const
{
- if (r == missingThisObjectMarker())
- return "<null>";
-
if (isConstantRegisterIndex(r))
- return constantName(r, getConstant(r));
+ return constantName(r);
- if (operandIsArgument(r)) {
- if (!VirtualRegister(r).toArgument())
- return "this";
- return toCString("arg", VirtualRegister(r).toArgument());
- }
+ return toCString(VirtualRegister(r));
+}
- return toCString("loc", VirtualRegister(r).toLocal());
+CString CodeBlock::constantName(int index) const
+{
+ JSValue value = getConstant(index);
+ return toCString(value, "(", VirtualRegister(index), ")");
}
static CString regexpToSourceString(RegExp* regExp)
@@ -188,6 +232,10 @@ static CString regexpToSourceString(RegExp* regExp)
postfix[index++] = 'i';
if (regExp->multiline())
postfix[index] = 'm';
+ if (regExp->sticky())
+ postfix[index++] = 'y';
+ if (regExp->unicode())
+ postfix[index++] = 'u';
return toCString("/", regExp->pattern().impl(), postfix);
}
@@ -197,15 +245,17 @@ static CString regexpName(int re, RegExp* regexp)
return toCString(regexpToSourceString(regexp), "(@re", re, ")");
}
-NEVER_INLINE static const char* debugHookName(int debugHookID)
+NEVER_INLINE static const char* debugHookName(int debugHookType)
{
- switch (static_cast<DebugHookID>(debugHookID)) {
+ switch (static_cast<DebugHookType>(debugHookType)) {
case DidEnterCallFrame:
return "didEnterCallFrame";
case WillLeaveCallFrame:
return "willLeaveCallFrame";
case WillExecuteStatement:
return "willExecuteStatement";
+ case WillExecuteExpression:
+ return "willExecuteExpression";
case WillExecuteProgram:
return "willExecuteProgram";
case DidExecuteProgram:
@@ -251,48 +301,20 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
case op_get_by_id:
op = "get_by_id";
break;
- case op_get_by_id_out_of_line:
- op = "get_by_id_out_of_line";
- break;
- case op_get_by_id_self:
- op = "get_by_id_self";
+ case op_get_by_id_proto_load:
+ op = "get_by_id_proto_load";
break;
- case op_get_by_id_proto:
- op = "get_by_id_proto";
- break;
- case op_get_by_id_chain:
- op = "get_by_id_chain";
- break;
- case op_get_by_id_getter_self:
- op = "get_by_id_getter_self";
- break;
- case op_get_by_id_getter_proto:
- op = "get_by_id_getter_proto";
- break;
- case op_get_by_id_getter_chain:
- op = "get_by_id_getter_chain";
- break;
- case op_get_by_id_custom_self:
- op = "get_by_id_custom_self";
- break;
- case op_get_by_id_custom_proto:
- op = "get_by_id_custom_proto";
- break;
- case op_get_by_id_custom_chain:
- op = "get_by_id_custom_chain";
- break;
- case op_get_by_id_generic:
- op = "get_by_id_generic";
+ case op_get_by_id_unset:
+ op = "get_by_id_unset";
break;
case op_get_array_length:
op = "array_length";
break;
- case op_get_string_length:
- op = "string_length";
- break;
default:
RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
op = 0;
+#endif
}
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -302,22 +324,19 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
it += 4; // Increment up to the value profiler.
}
-#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
-static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
+static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
{
if (!structure)
return;
out.printf("%s = %p", name, structure);
- PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
+ PropertyOffset offset = structure->getConcurrently(ident.impl());
if (offset != invalidOffset)
out.printf(" (offset = %d)", offset);
}
-#endif
-#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
-static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
+static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
{
out.printf("chain = %p: [", chain);
bool first = true;
@@ -328,11 +347,10 @@ static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain,
first = false;
else
out.printf(", ");
- dumpStructure(out, "struct", exec, currentStructure->get(), ident);
+ dumpStructure(out, "struct", currentStructure->get(), ident);
}
out.printf("]");
}
-#endif
void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
{
@@ -342,125 +360,131 @@ void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int l
UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
-#if ENABLE(LLINT)
if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
out.printf(" llint(array_length)");
- else if (Structure* structure = instruction[4].u.structure.get()) {
+ else if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
out.printf(" llint(");
- dumpStructure(out, "struct", exec, structure, ident);
+ dumpStructure(out, "struct", structure, ident);
out.printf(")");
+ if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_by_id_proto_load)
+ out.printf(" proto(%p)", instruction[6].u.pointer);
}
-#endif
#if ENABLE(JIT)
if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
StructureStubInfo& stubInfo = *stubPtr;
- if (stubInfo.seen) {
- out.printf(" jit(");
-
- Structure* baseStructure = 0;
- Structure* prototypeStructure = 0;
- StructureChain* chain = 0;
- PolymorphicAccessStructureList* structureList = 0;
- int listSize = 0;
-
- switch (stubInfo.accessType) {
- case access_get_by_id_self:
- out.printf("self");
- baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
- break;
- case access_get_by_id_proto:
- out.printf("proto");
- baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
- prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
- break;
- case access_get_by_id_chain:
- out.printf("chain");
- baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
- chain = stubInfo.u.getByIdChain.chain.get();
- break;
- case access_get_by_id_self_list:
- out.printf("self_list");
- structureList = stubInfo.u.getByIdSelfList.structureList;
- listSize = stubInfo.u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- out.printf("proto_list");
- structureList = stubInfo.u.getByIdProtoList.structureList;
- listSize = stubInfo.u.getByIdProtoList.listSize;
- break;
- case access_unset:
- out.printf("unset");
- break;
- case access_get_by_id_generic:
- out.printf("generic");
- break;
- case access_get_array_length:
- out.printf("array_length");
- break;
- case access_get_string_length:
- out.printf("string_length");
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- if (baseStructure) {
- out.printf(", ");
- dumpStructure(out, "struct", exec, baseStructure, ident);
- }
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
- if (prototypeStructure) {
- out.printf(", ");
- dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
- }
+ Structure* baseStructure = nullptr;
+ PolymorphicAccess* stub = nullptr;
- if (chain) {
- out.printf(", ");
- dumpChain(out, exec, chain, ident);
- }
+ switch (stubInfo.cacheType) {
+ case CacheType::GetByIdSelf:
+ out.printf("self");
+ baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
+ break;
+ case CacheType::Stub:
+ out.printf("stub");
+ stub = stubInfo.u.stub;
+ break;
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ case CacheType::ArrayLength:
+ out.printf("ArrayLength");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
- if (structureList) {
- out.printf(", list = %p: [", structureList);
- for (int i = 0; i < listSize; ++i) {
- if (i)
- out.printf(", ");
- out.printf("(");
- dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
- if (structureList->list[i].isChain) {
- if (structureList->list[i].u.chain.get()) {
- out.printf(", ");
- dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
- }
- } else {
- if (structureList->list[i].u.proto.get()) {
- out.printf(", ");
- dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
- }
- }
- out.printf(")");
- }
- out.printf("]");
+ if (baseStructure) {
+ out.printf(", ");
+ dumpStructure(out, "struct", baseStructure, ident);
+ }
+
+ if (stub)
+ out.print(", ", *stub);
+
+ out.printf(")");
+ }
+#else
+ UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
+{
+ Instruction* instruction = instructions().begin() + location;
+
+ const Identifier& ident = identifier(instruction[2].u.operand);
+
+ UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+
+ out.print(", ", instruction[8].u.putByIdFlags);
+
+ if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+ out.print(" llint(");
+ if (StructureID newStructureID = instruction[6].u.structureID) {
+ Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
+ dumpStructure(out, "prev", structure, ident);
+ out.print(", ");
+ dumpStructure(out, "next", newStructure, ident);
+ if (StructureChain* chain = instruction[7].u.structureChain.get()) {
+ out.print(", ");
+ dumpChain(out, chain, ident);
}
- out.printf(")");
+ } else
+ dumpStructure(out, "struct", structure, ident);
+ out.print(")");
+ }
+
+#if ENABLE(JIT)
+ if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+ StructureStubInfo& stubInfo = *stubPtr;
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
+
+ switch (stubInfo.cacheType) {
+ case CacheType::PutByIdReplace:
+ out.print("replace, ");
+ dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
+ break;
+ case CacheType::Stub: {
+ out.print("stub, ", *stubInfo.u.stub);
+ break;
+ }
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
+ out.printf(")");
}
#else
UNUSED_PARAM(map);
#endif
}
-void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
{
int dst = (++it)->u.operand;
int func = (++it)->u.operand;
int argCount = (++it)->u.operand;
int registerOffset = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, op);
- out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
+ out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset);
+ out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")");
if (cacheDumpMode == DumpCaches) {
-#if ENABLE(LLINT)
LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
if (callLinkInfo->lastSeenCallee) {
out.printf(
@@ -468,17 +492,21 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con
callLinkInfo->lastSeenCallee.get(),
callLinkInfo->lastSeenCallee->executable());
}
-#endif
#if ENABLE(JIT)
- if (numberOfCallLinkInfos()) {
- JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
+ if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
+ JSFunction* target = info->lastSeenCallee();
if (target)
out.printf(" jit(%p, exec %p)", target, target->executable());
}
+
+ if (jitType() != JITCode::FTLJIT)
+ out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
+#else
+ UNUSED_PARAM(map);
#endif
- out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
}
++it;
+ ++it;
dumpArrayProfiling(out, it, hasPrintedProfiling);
dumpValueProfiling(out, it, hasPrintedProfiling);
}
@@ -493,6 +521,31 @@ void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location,
it += 5;
}
+void CodeBlock::dumpSource()
+{
+ dumpSource(WTF::dataFile());
+}
+
+void CodeBlock::dumpSource(PrintStream& out)
+{
+ ScriptExecutable* executable = ownerScriptExecutable();
+ if (executable->isFunctionExecutable()) {
+ FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
+ StringView source = functionExecutable->source().provider()->getRange(
+ functionExecutable->parametersStartOffset(),
+ functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+
+ out.print("function ", inferredName(), source);
+ return;
+ }
+ out.print(executable->source().view());
+}
+
+void CodeBlock::dumpBytecode()
+{
+ dumpBytecode(WTF::dataFile());
+}
+
void CodeBlock::dumpBytecode(PrintStream& out)
{
// We only use the ExecState* for things that don't actually lead to JS execution,
@@ -509,34 +562,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
static_cast<unsigned long>(instructions().size()),
static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
- m_numParameters, m_numCalleeRegisters, m_numVars);
- if (symbolTable() && symbolTable()->captureCount()) {
- out.printf(
- "; %d captured var(s) (from r%d to r%d, inclusive)",
- symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
- }
- if (usesArguments()) {
- out.printf(
- "; uses arguments, in r%d, r%d",
- argumentsRegister().offset(),
- unmodifiedArgumentsRegister(argumentsRegister()).offset());
- }
- if (needsFullScopeChain() && codeType() == FunctionCode)
- out.printf("; activation in r%d", activationRegister().offset());
+ m_numParameters, m_numCalleeLocals, m_numVars);
+ out.print("; scope at ", scopeRegister());
out.printf("\n");
StubInfoMap stubInfos;
-#if ENABLE(JIT)
- {
- ConcurrentJITLocker locker(m_lock);
- getStubInfoMap(locker, stubInfos);
- }
-#endif
+ CallLinkInfoMap callLinkInfos;
+ getStubInfoMap(stubInfos);
+ getCallLinkInfoMap(callLinkInfos);
const Instruction* begin = instructions().begin();
const Instruction* end = instructions().end();
for (const Instruction* it = begin; it != end; ++it)
- dumpBytecode(out, exec, begin, it, stubInfos);
+ dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
if (numberOfIdentifiers()) {
out.printf("\nIdentifiers:\n");
@@ -551,7 +589,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf("\nConstants:\n");
size_t i = 0;
do {
- out.printf(" k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
+ const char* sourceCodeRepresentationDescription = nullptr;
+ switch (m_constantsSourceCodeRepresentation[i]) {
+ case SourceCodeRepresentation::Double:
+ sourceCodeRepresentationDescription = ": in source as double";
+ break;
+ case SourceCodeRepresentation::Integer:
+ sourceCodeRepresentationDescription = ": in source as integer";
+ break;
+ case SourceCodeRepresentation::Other:
+ sourceCodeRepresentationDescription = "";
+ break;
+ }
+ out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
++i;
} while (i < m_constantRegisters.size());
}
@@ -565,14 +615,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
} while (i < count);
}
- if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
- out.printf("\nException Handlers:\n");
- unsigned i = 0;
- do {
- out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
- ++i;
- } while (i < m_rareData->m_exceptionHandlers.size());
- }
+ dumpExceptionHandlers(out);
if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
out.printf("Switch Jump Tables:\n");
@@ -598,7 +641,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf(" %1d = {\n", i);
StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
- out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
+ out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
out.printf(" }\n");
++i;
} while (i < m_rareData->m_stringSwitchJumpTables.size());
@@ -607,6 +650,20 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf("\n");
}
+void CodeBlock::dumpExceptionHandlers(PrintStream& out)
+{
+ if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+ out.printf("\nException Handlers:\n");
+ unsigned i = 0;
+ do {
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+ out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
+ i + 1, handler.start, handler.end, handler.target, handler.typeName());
+ ++i;
+ } while (i < m_rareData->m_exceptionHandlers.size());
+ }
+}
+
void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
{
if (hasPrintedProfiling) {
@@ -620,7 +677,7 @@ void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
++it;
CString description = it->u.profile->briefDescription(locker);
@@ -632,7 +689,7 @@ void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, boo
void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
++it;
if (!it->u.arrayProfile)
@@ -653,52 +710,113 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase
out.print(name, profile->m_counter);
}
-void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
+void CodeBlock::dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)
+{
+ if (!profile)
+ return;
+
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print("results: ", *profile);
+}
+
+void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+{
+ out.printf("[%4d] %-17s ", location, op);
+}
+
+void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+{
+ printLocationAndOp(out, exec, location, it, op);
+ out.printf("%s", registerName(operand).data());
+}
+
+void CodeBlock::dumpBytecode(
+ PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
int location = it - begin;
bool hasPrintedProfiling = false;
- switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+ OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
+ switch (opcode) {
case op_enter: {
printLocationAndOp(out, exec, location, it, "enter");
break;
}
- case op_touch_entry: {
- printLocationAndOp(out, exec, location, it, "touch_entry");
+ case op_get_scope: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
break;
}
- case op_create_activation: {
+ case op_create_direct_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
+ printLocationAndOp(out, exec, location, it, "create_direct_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_create_arguments: {
+ case op_create_scoped_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_init_lazy_reg: {
+ case op_create_cloned_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
+ printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_get_callee: {
+ case op_argument_count: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
- ++it;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "argument_count", r0);
+ break;
+ }
+ case op_get_argument: {
+ int r0 = (++it)->u.operand;
+ int index = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "argument", r0);
+ out.printf(", %d", index);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_create_rest: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ printLocationAndOp(out, exec, location, it, "create_rest");
+ out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
+ out.printf("ArgumentsOffset: %u", argumentOffset);
+ break;
+ }
+ case op_get_rest_length: {
+ int r0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_rest_length");
+ out.printf("%s, ", registerName(r0).data());
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ out.printf("ArgumentsOffset: %u", argumentOffset);
break;
}
case op_create_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
unsigned inferredInlineCapacity = (++it)->u.operand;
+ unsigned cachedFunction = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "create_this");
- out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
+ out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
break;
}
case op_to_this: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
- ++it; // Skip value profile.
+ Structure* structure = (++it)->u.structure.get();
+ if (structure)
+ out.print(", cache(struct = ", RawPointer(structure), ")");
+ out.print(", ", (++it)->u.toThisStatus);
+ break;
+ }
+ case op_check_tdz: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
break;
}
case op_new_object: {
@@ -718,6 +836,30 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
++it; // Skip array allocation profile.
break;
}
+ case op_new_array_with_spread: {
+ int dst = (++it)->u.operand;
+ int argv = (++it)->u.operand;
+ int argc = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_array_with_spread");
+ out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc);
+ unsigned bitVectorIndex = (++it)->u.unsignedValue;
+ const BitVector& bitVector = m_unlinkedCode->bitVector(bitVectorIndex);
+ out.print("BitVector:", bitVectorIndex, ":");
+ for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) {
+ if (bitVector.get(i))
+ out.print("1");
+ else
+ out.print("0");
+ }
+ break;
+ }
+ case op_spread: {
+ int dst = (++it)->u.operand;
+ int arg = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "spread");
+ out.printf("%s, %s", registerName(dst).data(), registerName(arg).data());
+ break;
+ }
case op_new_array_with_size: {
int dst = (++it)->u.operand;
int length = (++it)->u.operand;
@@ -753,12 +895,20 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_captured_mov: {
+ case op_profile_type: {
int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "captured_mov");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
++it;
+ ++it;
+ ++it;
+ ++it;
+ printLocationAndOp(out, exec, location, it, "op_profile_type");
+ out.printf("%s", registerName(r0).data());
+ break;
+ }
+ case op_profile_control_flow: {
+ BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
+ printLocationAndOp(out, exec, location, it, "profile_control_flow");
+ out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
break;
}
case op_not: {
@@ -817,10 +967,16 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_to_number: {
printUnaryOp(out, exec, location, it, "to_number");
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_to_string: {
+ printUnaryOp(out, exec, location, it, "to_string");
break;
}
case op_negate: {
printUnaryOp(out, exec, location, it, "negate");
+ ++it; // op_negate has an extra operand for the ArithProfile.
break;
}
case op_add: {
@@ -842,6 +998,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printBinaryOp(out, exec, location, it, "mod");
break;
}
+ case op_pow: {
+ printBinaryOp(out, exec, location, it, "pow");
+ break;
+ }
case op_sub: {
printBinaryOp(out, exec, location, it, "sub");
++it;
@@ -874,13 +1034,12 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
++it;
break;
}
- case op_check_has_instance: {
+ case op_overrides_has_instance: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "check_has_instance");
- out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
+ printLocationAndOp(out, exec, location, it, "overrides_has_instance");
+ out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
case op_instanceof: {
@@ -891,6 +1050,15 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
+ case op_instanceof_custom: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "instanceof_custom");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+ break;
+ }
case op_unsigned: {
printUnaryOp(out, exec, location, it, "unsigned");
break;
@@ -899,6 +1067,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "typeof");
break;
}
+ case op_is_empty: {
+ printUnaryOp(out, exec, location, it, "is_empty");
+ break;
+ }
case op_is_undefined: {
printUnaryOp(out, exec, location, it, "is_undefined");
break;
@@ -911,106 +1083,155 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "is_number");
break;
}
- case op_is_string: {
- printUnaryOp(out, exec, location, it, "is_string");
+ case op_is_cell_with_type: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int type = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "is_cell_with_type");
+ out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type);
break;
}
case op_is_object: {
printUnaryOp(out, exec, location, it, "is_object");
break;
}
+ case op_is_object_or_null: {
+ printUnaryOp(out, exec, location, it, "is_object_or_null");
+ break;
+ }
case op_is_function: {
printUnaryOp(out, exec, location, it, "is_function");
break;
}
case op_in: {
printBinaryOp(out, exec, location, it, "in");
+ dumpArrayProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_init_global_const_nop: {
- printLocationAndOp(out, exec, location, it, "init_global_const_nop");
- it++;
- it++;
- it++;
- it++;
- break;
- }
- case op_init_global_const: {
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
+ case op_try_get_by_id: {
int r0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "init_global_const");
- out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
- it++;
- it++;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "try_get_by_id");
+ out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
- case op_get_array_length:
- case op_get_string_length: {
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
+ case op_get_array_length: {
printGetByIdOp(out, exec, location, it);
- printGetByIdCacheStatus(out, exec, location, map);
+ printGetByIdCacheStatus(out, exec, location, stubInfos);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_get_by_id_with_this: {
+ printLocationAndOp(out, exec, location, it, "get_by_id_with_this");
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data());
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_arguments_length: {
- printUnaryOp(out, exec, location, it, "get_arguments_length");
- it++;
+ case op_get_by_val_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_by_val_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_put_by_id: {
printPutByIdOp(out, exec, location, it, "put_by_id");
+ printPutByIdCacheStatus(out, location, stubInfos);
break;
}
- case op_put_by_id_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
+ case op_put_by_id_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_by_id_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data());
break;
}
- case op_put_by_id_replace: {
- printPutByIdOp(out, exec, location, it, "put_by_id_replace");
+ case op_put_by_val_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_by_val_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
break;
}
- case op_put_by_id_transition: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition");
+ case op_put_getter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_direct: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
+ case op_put_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_setter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_direct_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
+ case op_put_getter_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
+ out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
break;
}
- case op_put_by_id_transition_normal: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
+ case op_put_getter_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
- case op_put_by_id_transition_normal_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
+ case op_put_setter_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_setter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
- case op_put_by_id_generic: {
- printPutByIdOp(out, exec, location, it, "put_by_id_generic");
+ case op_define_data_property: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "define_data_property");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
break;
}
- case op_put_getter_setter: {
+ case op_define_accessor_property: {
int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "put_getter_setter");
- out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
+ int r3 = (++it)->u.operand;
+ int r4 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "define_accessor_property");
+ out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data());
break;
}
case op_del_by_id: {
@@ -1031,27 +1252,6 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_argument_by_val: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_argument_by_val");
- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
- ++it;
- dumpValueProfiling(out, it, hasPrintedProfiling);
- break;
- }
- case op_get_by_pname: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- int r3 = (++it)->u.operand;
- int r4 = (++it)->u.operand;
- int r5 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_by_pname");
- out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
- break;
- }
case op_put_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -1114,6 +1314,7 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
int offset = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "jneq_ptr");
out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
+ ++it;
break;
}
case op_jless: {
@@ -1184,6 +1385,23 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printLocationAndOp(out, exec, location, it, "loop_hint");
break;
}
+ case op_watchdog: {
+ printLocationAndOp(out, exec, location, it, "watchdog");
+ break;
+ }
+ case op_log_shadow_chicken_prologue: {
+ int r0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
+ out.printf("%s", registerName(r0).data());
+ break;
+ }
+ case op_log_shadow_chicken_tail: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+ break;
+ }
case op_switch_imm: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
@@ -1210,73 +1428,108 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_new_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- int shouldCheck = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func");
- out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
- case op_new_captured_func: {
+ case op_new_generator_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "new_captured_func");
- out.printf("%s, f%d", registerName(r0).data(), f0);
- ++it;
+ printLocationAndOp(out, exec, location, it, "new_generator_func");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_async_func: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_async_func");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
case op_new_func_exp: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func_exp");
- out.printf("%s, f%d", registerName(r0).data(), f0);
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_generator_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_async_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_async_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_set_function_name: {
+ int funcReg = (++it)->u.operand;
+ int nameReg = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "set_function_name");
+ out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
break;
}
case op_call: {
- printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+ break;
+ }
+ case op_tail_call: {
+ printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_call_eval: {
- printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
- case op_call_varargs: {
+
+ case op_construct_varargs:
+ case op_call_varargs:
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments: {
int result = (++it)->u.operand;
int callee = (++it)->u.operand;
int thisValue = (++it)->u.operand;
int arguments = (++it)->u.operand;
int firstFreeRegister = (++it)->u.operand;
+ int varArgOffset = (++it)->u.operand;
++it;
- printLocationAndOp(out, exec, location, it, "call_varargs");
- out.printf("%s, %s, %s, %s, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
+ const char* opName;
+ if (opcode == op_call_varargs)
+ opName = "call_varargs";
+ else if (opcode == op_construct_varargs)
+ opName = "construct_varargs";
+ else if (opcode == op_tail_call_varargs)
+ opName = "tail_call_varargs";
+ else if (opcode == op_tail_call_forward_arguments)
+ opName = "tail_call_forward_arguments";
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ printLocationAndOp(out, exec, location, it, opName);
+ out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_tear_off_activation: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
- break;
- }
- case op_tear_off_arguments: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "tear_off_arguments");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
+
case op_ret: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
break;
}
- case op_ret_object_or_this: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "constructor_ret");
- out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
case op_construct: {
- printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_strcat: {
@@ -1294,49 +1547,120 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_get_pnames: {
- int r0 = it[1].u.operand;
- int r1 = it[2].u.operand;
- int r2 = it[3].u.operand;
- int r3 = it[4].u.operand;
- int offset = it[5].u.operand;
- printLocationAndOp(out, exec, location, it, "get_pnames");
- out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_get_pnames) - 1;
+ case op_get_enumerable_length: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
break;
}
- case op_next_pname: {
- int dest = it[1].u.operand;
+ case op_has_indexed_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ ArrayProfile* arrayProfile = it[4].u.arrayProfile;
+ printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
+ out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
+ it += OPCODE_LENGTH(op_has_indexed_property) - 1;
+ break;
+ }
+ case op_has_structure_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int enumerator = it[4].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_structure_property");
+ out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
+ it += OPCODE_LENGTH(op_has_structure_property) - 1;
+ break;
+ }
+ case op_has_generic_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_generic_property");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
+ it += OPCODE_LENGTH(op_has_generic_property) - 1;
+ break;
+ }
+ case op_get_direct_pname: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int index = it[4].u.operand;
+ int enumerator = it[5].u.operand;
+ ValueProfile* profile = it[6].u.profile;
+ printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
+ out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
+ it += OPCODE_LENGTH(op_get_direct_pname) - 1;
+ break;
+
+ }
+ case op_get_property_enumerator: {
+ int dst = it[1].u.operand;
int base = it[2].u.operand;
- int i = it[3].u.operand;
- int size = it[4].u.operand;
- int iter = it[5].u.operand;
- int offset = it[6].u.operand;
- printLocationAndOp(out, exec, location, it, "next_pname");
- out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_next_pname) - 1;
+ printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
+ break;
+ }
+ case op_enumerator_structure_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
+ break;
+ }
+ case op_enumerator_generic_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
+ break;
+ }
+ case op_to_index_string: {
+ int dst = it[1].u.operand;
+ int index = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_to_index_string");
+ out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_to_index_string) - 1;
break;
}
case op_push_with_scope: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
+ int dst = (++it)->u.operand;
+ int newScope = (++it)->u.operand;
+ int currentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "push_with_scope");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
break;
}
- case op_pop_scope: {
- printLocationAndOp(out, exec, location, it, "pop_scope");
+ case op_get_parent_scope: {
+ int dst = (++it)->u.operand;
+ int parentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_parent_scope");
+ out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
break;
}
- case op_push_name_scope: {
- int id0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- unsigned attributes = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "push_name_scope");
- out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
+ case op_create_lexical_environment: {
+ int dst = (++it)->u.operand;
+ int scope = (++it)->u.operand;
+ int symbolTable = (++it)->u.operand;
+ int initialValue = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_lexical_environment");
+ out.printf("%s, %s, %s, %s",
+ registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
break;
}
case op_catch: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "catch");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
case op_throw: {
@@ -1346,26 +1670,24 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_throw_static_error: {
int k0 = (++it)->u.operand;
- int k1 = (++it)->u.operand;
+ ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue);
printLocationAndOp(out, exec, location, it, "throw_static_error");
- out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
+ out.printf("%s, ", constantName(k0).data());
+ out.print(k1);
break;
}
case op_debug: {
- int debugHookID = (++it)->u.operand;
+ int debugHookType = (++it)->u.operand;
int hasBreakpointFlag = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "debug");
- out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
+ out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag);
break;
}
- case op_profile_will_call: {
- int function = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
- break;
- }
- case op_profile_did_call: {
- int function = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
+ case op_assert: {
+ int condition = (++it)->u.operand;
+ int line = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "assert");
+ out.printf("%s, %d", registerName(condition).data(), line);
break;
}
case op_end: {
@@ -1375,53 +1697,81 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_resolve_scope: {
int r0 = (++it)->u.operand;
+ int scope = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
- ++it; // depth
+ ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
+ int depth = (++it)->u.operand;
+ void* pointer = (++it)->u.pointer;
printLocationAndOp(out, exec, location, it, "resolve_scope");
- out.printf("%s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
- ++it;
+ out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
break;
}
case op_get_from_scope: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
- ++it; // Skip value profile.
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "get_from_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
+ out.print(registerName(r0), ", ", registerName(r1));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_put_to_scope: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "put_to_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
+ out.print(registerName(r0));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
+ break;
+ }
+ case op_get_from_arguments: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_from_arguments");
+ out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_put_to_arguments: {
+ int r0 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_to_arguments");
+ out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
break;
}
-#if ENABLE(LLINT_C_LOOP)
default:
RELEASE_ASSERT_NOT_REACHED();
-#endif
}
dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
- dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+ {
+ dumpArithProfile(out, arithProfileForBytecodeOffset(location), hasPrintedProfiling);
+ }
#if ENABLE(DFG_JIT)
Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
if (!exitSites.isEmpty()) {
out.print(" !! frequent exits: ");
CommaPrinter comma;
- for (unsigned i = 0; i < exitSites.size(); ++i)
- out.print(comma, exitSites[i].kind());
+ for (auto& exitSite : exitSites)
+ out.print(comma, exitSite.kind(), " ", exitSite.jitType());
}
#else // ENABLE(DFG_JIT)
UNUSED_PARAM(location);
@@ -1429,11 +1779,13 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.print("\n");
}
-void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
+void CodeBlock::dumpBytecode(
+ PrintStream& out, unsigned bytecodeOffset,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
ExecState* exec = m_globalObject->globalExec();
const Instruction* it = instructions().begin() + bytecodeOffset;
- dumpBytecode(out, exec, instructions().begin(), it);
+ dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
}
#define FOR_EACH_MEMBER_VECTOR(macro) \
@@ -1444,63 +1796,84 @@ void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
macro(functionExpressions) \
macro(constantRegisters)
-#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
- macro(regexps) \
- macro(functions) \
- macro(exceptionHandlers) \
- macro(switchJumpTables) \
- macro(stringSwitchJumpTables) \
- macro(evalCodeCache) \
- macro(expressionInfo) \
- macro(lineInfo) \
- macro(callReturnIndexVector)
-
template<typename T>
static size_t sizeInBytes(const Vector<T>& vector)
{
return vector.capacity() * sizeof(T);
}
-CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
- : m_globalObject(other.m_globalObject)
- , m_heap(other.m_heap)
- , m_numCalleeRegisters(other.m_numCalleeRegisters)
+namespace {
+
+class PutToScopeFireDetail : public FireDetail {
+public:
+ PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
+ : m_codeBlock(codeBlock)
+ , m_ident(ident)
+ {
+ }
+
+ void dump(PrintStream& out) const override
+ {
+ out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
+ }
+
+private:
+ CodeBlock* m_codeBlock;
+ const Identifier& m_ident;
+};
+
+} // anonymous namespace
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
+ : JSCell(*vm, structure)
+ , m_globalObject(other.m_globalObject)
+ , m_numCalleeLocals(other.m_numCalleeLocals)
, m_numVars(other.m_numVars)
- , m_isConstructor(other.m_isConstructor)
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+ , m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(other.m_isConstructor)
+ , m_isStrictMode(other.m_isStrictMode)
+ , m_codeType(other.m_codeType)
+ , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
+ , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+ , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
, m_vm(other.m_vm)
, m_instructions(other.m_instructions)
, m_thisRegister(other.m_thisRegister)
- , m_argumentsRegister(other.m_argumentsRegister)
- , m_activationRegister(other.m_activationRegister)
- , m_isStrictMode(other.m_isStrictMode)
- , m_needsActivation(other.m_needsActivation)
+ , m_scopeRegister(other.m_scopeRegister)
+ , m_hash(other.m_hash)
, m_source(other.m_source)
, m_sourceOffset(other.m_sourceOffset)
, m_firstLineColumnOffset(other.m_firstLineColumnOffset)
- , m_codeType(other.m_codeType)
, m_constantRegisters(other.m_constantRegisters)
+ , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
, m_functionDecls(other.m_functionDecls)
, m_functionExprs(other.m_functionExprs)
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
- , m_hash(other.m_hash)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
-
- if (SymbolTable* symbolTable = other.symbolTable())
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
-
+ m_visitWeaklyHasBeenCalled = false;
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
+
setNumParameters(other.numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
+{
+ Base::finishCreation(vm);
+
optimizeAfterWarmUp();
jitAfterWarmUp();
@@ -1513,87 +1886,94 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
}
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
+ heap()->m_codeBlocks->add(this);
}
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
- , m_heap(&m_globalObject->vm().heap)
- , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : JSCell(*vm, structure)
+ , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
+ , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
, m_numVars(unlinkedCodeBlock->m_numVars)
- , m_isConstructor(unlinkedCodeBlock->isConstructor())
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+ , m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(unlinkedCodeBlock->isConstructor())
+ , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+ , m_codeType(unlinkedCodeBlock->codeType())
+ , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
+ , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
, m_vm(unlinkedCodeBlock->vm())
, m_thisRegister(unlinkedCodeBlock->thisRegister())
- , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
- , m_activationRegister(unlinkedCodeBlock->activationRegister())
- , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
- , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
- , m_source(sourceProvider)
+ , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
+ , m_source(WTFMove(sourceProvider))
, m_sourceOffset(sourceOffset)
, m_firstLineColumnOffset(firstLineColumnOffset)
- , m_codeType(unlinkedCodeBlock->codeType())
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
+ m_visitWeaklyHasBeenCalled = false;
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
- bool didCloneSymbolTable = false;
-
- if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
- if (codeType() == FunctionCode && symbolTable->captureCount()) {
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->clone(*m_vm));
- didCloneSymbolTable = true;
- } else
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
- }
-
ASSERT(m_source);
setNumParameters(unlinkedCodeBlock->numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope)
+{
+ Base::finishCreation(vm);
- setConstantRegisters(unlinkedCodeBlock->constantRegisters());
+ if (vm.typeProfiler() || vm.controlFlowProfiler())
+ vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
+
+ setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
if (unlinkedCodeBlock->usesGlobalObject())
- m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().offset()].set(*m_vm, ownerExecutable, m_globalObject.get());
- m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
+ m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
+
+ for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+ LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
+ if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+ m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
+ }
+
+ // We already have the cloned symbol table for the module environment since we need to instantiate
+ // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
+ if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
+ SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
+ if (m_vm->typeProfiler()) {
+ ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
+ clonedSymbolTable->prepareForTypeProfiling(locker);
+ }
+ replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
+ }
+
+ bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+ m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
- }
-
- m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+ }
+
+ m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
}
if (unlinkedCodeBlock->hasRareData()) {
@@ -1607,15 +1987,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
m_rareData->m_exceptionHandlers.resizeToFit(count);
- size_t nonLocalScopeDepth = scope->depth();
for (size_t i = 0; i < count; i++) {
- const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
- m_rareData->m_exceptionHandlers[i].start = handler.start;
- m_rareData->m_exceptionHandlers[i].end = handler.end;
- m_rareData->m_exceptionHandlers[i].target = handler.target;
- m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
-#if ENABLE(JIT) && ENABLE(LLINT)
- m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
+ const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+ handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+ handler.initialize(unlinkedHandler);
#endif
}
}
@@ -1627,7 +2005,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
for (; ptr != end; ++ptr) {
OffsetLocation offset;
- offset.branchOffset = ptr->value;
+ offset.branchOffset = ptr->value.branchOffset;
m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
}
}
@@ -1645,58 +2023,83 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
// Allocate metadata buffers for the bytecode
-#if ENABLE(LLINT)
if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
- m_llintCallLinkInfos.resizeToFit(size);
-#endif
+ m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
m_arrayProfiles.grow(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
- m_arrayAllocationProfiles.resizeToFit(size);
+ m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
- m_valueProfiles.resizeToFit(size);
+ m_valueProfiles = RefCountedArray<ValueProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
- m_objectAllocationProfiles.resizeToFit(size);
+ m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
+
+#if ENABLE(JIT)
+ setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
+#endif
// Copy and translate the UnlinkedInstructions
unsigned instructionCount = unlinkedCodeBlock->instructions().count();
UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
- Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
+ // Bookkeep the strongly referenced module environments.
+ HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
+
+ RefCountedArray<Instruction> instructions(instructionCount);
+
+ unsigned valueProfileCount = 0;
+ auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
+ unsigned valueProfileIndex = valueProfileCount++;
+ ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = bytecodeOffset;
+ instructions[bytecodeOffset + opLength - 1] = profile;
+ };
+
for (unsigned i = 0; !instructionReader.atEnd(); ) {
const UnlinkedInstruction* pc = instructionReader.next();
unsigned opLength = opcodeLength(pc[0].u.opcode);
- instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
+ instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
for (size_t j = 1; j < opLength; ++j) {
if (sizeof(int32_t) != sizeof(intptr_t))
instructions[i + j].u.pointer = 0;
instructions[i + j].u.operand = pc[j].u.operand;
}
switch (pc[0].u.opcode) {
+ case op_has_indexed_property: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
case op_call_varargs:
- case op_get_by_val:
- case op_get_argument_by_val: {
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments:
+ case op_construct_varargs:
+ case op_get_by_val: {
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
FALLTHROUGH;
}
- case op_get_by_id: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
- break;
- }
- case op_put_by_val: {
- int arrayProfileIndex = pc[opLength - 1].u.operand;
- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ case op_get_direct_pname:
+ case op_get_by_id:
+ case op_get_by_id_with_this:
+ case op_try_get_by_id:
+ case op_get_by_val_with_this:
+ case op_get_from_arguments:
+ case op_to_number:
+ case op_get_argument: {
+ linkValueProfile(i, opLength);
break;
}
+
+ case op_in:
+ case op_put_by_val:
case op_put_by_val_direct: {
int arrayProfileIndex = pc[opLength - 1].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
@@ -1717,125 +2120,216 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
int inferredInlineCapacity = pc[opLength - 2].u.operand;
instructions[i + opLength - 1] = objectAllocationProfile;
- objectAllocationProfile->initialize(*vm(),
- m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+ objectAllocationProfile->initialize(vm,
+ m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
break;
}
case op_call:
+ case op_tail_call:
case op_call_eval: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
+ linkValueProfile(i, opLength);
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
break;
}
case op_construct: {
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
- break;
- }
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
- case op_get_array_length:
- case op_get_string_length:
- CRASH();
-
- case op_init_global_const_nop: {
- ASSERT(codeType() == GlobalCode);
- Identifier ident = identifier(pc[4].u.operand);
- SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
- if (entry.isNull())
- break;
-
- instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
- instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
+ linkValueProfile(i, opLength);
break;
}
+ case op_get_array_length:
+ CRASH();
case op_resolve_scope: {
- const Identifier& ident = identifier(pc[2].u.operand);
- ResolveType type = static_cast<ResolveType>(pc[3].u.operand);
-
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
- instructions[i + 3].u.operand = op.type;
- instructions[i + 4].u.operand = op.depth;
- if (op.activation)
- instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
+ const Identifier& ident = identifier(pc[3].u.operand);
+ ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
+ RELEASE_ASSERT(type != LocalClosureVar);
+ int localScopeDepth = pc[5].u.operand;
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+ instructions[i + 4].u.operand = op.type;
+ instructions[i + 5].u.operand = op.depth;
+ if (op.lexicalEnvironment) {
+ if (op.type == ModuleVar) {
+ // Keep the linked module environment strongly referenced.
+ if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
+ addConstant(op.lexicalEnvironment);
+ instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+ } else
+ instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+ } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
+ instructions[i + 6].u.jsCell.set(vm, this, constantScope);
+ else
+ instructions[i + 6].u.pointer = nullptr;
break;
}
case op_get_from_scope: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
+ linkValueProfile(i, opLength);
+
+ // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ ASSERT(!isInitialization(getPutInfo.initializationMode()));
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ break;
+ }
- // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
const Identifier& ident = identifier(pc[3].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == ModuleVar)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
break;
}
case op_put_to_scope: {
- // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ // Only do watching if the property we're putting to is not anonymous.
+ if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
+ int symbolTableIndex = pc[5].u.operand;
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[2].u.operand);
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ auto iter = symbolTable->find(locker, ident.impl());
+ ASSERT(iter != symbolTable->end(locker));
+ iter->value.prepareToWatch();
+ instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+ } else
+ instructions[i + 5].u.watchpointSet = nullptr;
+ break;
+ }
+
const Identifier& ident = identifier(pc[2].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
if (op.watchpointSet)
- op.watchpointSet->invalidate();
+ op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
} else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+
break;
}
-
- case op_captured_mov:
- case op_new_captured_func: {
- if (pc[3].u.index == UINT_MAX) {
- instructions[i + 3].u.watchpointSet = 0;
+
+ case op_profile_type: {
+ RELEASE_ASSERT(vm.typeProfiler());
+ // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+ size_t instructionOffset = i + opLength - 1;
+ unsigned divotStart, divotEnd;
+ GlobalVariableID globalVariableID = 0;
+ RefPtr<TypeSet> globalTypeSet;
+ bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+ VirtualRegister profileRegister(pc[1].u.operand);
+ ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
+ SymbolTable* symbolTable = nullptr;
+
+ switch (flag) {
+ case ProfileTypeBytecodeClosureVar: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ int localScopeDepth = pc[2].u.operand;
+ ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
+ // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
+ // we're abstractly "read"ing from a JSScope.
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+
+ if (op.type == ClosureVar || op.type == ModuleVar)
+ symbolTable = op.lexicalEnvironment->symbolTable();
+ else if (op.type == GlobalVar)
+ symbolTable = m_globalObject.get()->symbolTable();
+
+ UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
+ if (symbolTable) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
+ } else
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+
+ break;
+ }
+ case ProfileTypeBytecodeLocallyResolved: {
+ int symbolTableIndex = pc[2].u.operand;
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[4].u.operand);
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
+
+ break;
+ }
+ case ProfileTypeBytecodeDoesNotHaveGlobalID:
+ case ProfileTypeBytecodeFunctionArgument: {
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+ break;
+ }
+ case ProfileTypeBytecodeFunctionReturnStatement: {
+ RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+ globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
+ globalVariableID = TypeProfilerReturnStatement;
+ if (!shouldAnalyze) {
+ // Because a return statement can be added implicitly to return undefined at the end of a function,
+ // and these nodes don't emit expression ranges because they aren't in the actual source text of
+ // the user's program, give the type profiler some range to identify these return statements.
+ // Currently, the text offset that is used as identification is "f" in the function keyword
+ // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+ divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+ shouldAnalyze = true;
+ }
break;
}
- StringImpl* uid = identifier(pc[3].u.index).impl();
- RELEASE_ASSERT(didCloneSymbolTable);
- ConcurrentJITLocker locker(m_symbolTable->m_lock);
- SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
- ASSERT(iter != m_symbolTable->end(locker));
- iter->value.prepareToWatch();
- instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
+ }
+
+ std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+ ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
+ TypeLocation* location = locationPair.first;
+ bool isNewLocation = locationPair.second;
+
+ if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+ location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+
+ if (shouldAnalyze && isNewLocation)
+ vm.typeProfiler()->insertNewLocation(location);
+
+ instructions[i + 2].u.location = location;
+ break;
+ }
+
+ case op_debug: {
+ if (pc[1].u.index == DidReachBreakpoint)
+ m_hasDebuggerStatement = true;
+ break;
+ }
+
+ case op_create_rest: {
+ int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
+ ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
+ // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
+ m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
break;
}
@@ -1844,7 +2338,11 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
i += opLength;
}
- m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+
+ if (vm.controlFlowProfiler())
+ insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+ m_instructions = WTFMove(instructions);
// Set optimization thresholds only after m_instructions is initialized, since these
// rely on the instruction count (and are in theory permitted to also inspect the
@@ -1854,71 +2352,89 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
// If the concurrent thread will want the code block's hash, then compute it here
// synchronously.
- if (Options::showDisassembly()
- || Options::showDFGDisassembly()
- || Options::dumpBytecodeAtDFGTime()
- || Options::dumpGraphAtEachPhase()
- || Options::verboseCompilation()
- || Options::logCompilationChanges()
- || Options::validateGraph()
- || Options::validateGraphAtEachPhase()
- || Options::verboseOSR()
- || Options::verboseCompilationQueue()
- || Options::reportCompileTimes()
- || Options::verboseCFA())
+ if (Options::alwaysComputeHash())
hash();
if (Options::dumpGeneratedBytecodes())
dumpBytecode();
-
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+
+ heap()->m_codeBlocks->add(this);
+ heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
}
CodeBlock::~CodeBlock()
{
if (m_vm->m_perBytecodeProfiler)
m_vm->m_perBytecodeProfiler->notifyDestruction(this);
-
+
+ if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
+ unlinkedCodeBlock()->setDidOptimize(FalseTriState);
+
#if ENABLE(VERBOSE_VALUE_PROFILE)
dumpValueProfiles();
#endif
-#if ENABLE(LLINT)
- while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
- m_incomingLLIntCalls.begin()->remove();
-#endif // ENABLE(LLINT)
-#if ENABLE(JIT)
// We may be destroyed before any CodeBlocks that refer to us are destroyed.
// Consider that two CodeBlocks become unreachable at the same time. There
// is no guarantee about the order in which the CodeBlocks are destroyed.
// So, if we don't remove incoming calls, and get destroyed before the
// CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
// destructor will try to remove nodes from our (no longer valid) linked list.
- while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->remove();
+ unlinkIncomingCalls();
// Note that our outgoing calls will be removed from other CodeBlocks'
// m_incomingCalls linked lists through the execution of the ~CallLinkInfo
// destructors.
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
- (*iter)->deref();
+#if ENABLE(JIT)
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ stub->aboutToDie();
+ stub->deref();
+ }
#endif // ENABLE(JIT)
}
-void CodeBlock::setNumParameters(int newValue)
+void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
{
- m_numParameters = newValue;
+ ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
+ size_t count = constants.size();
+ m_constantRegisters.resizeToFit(count);
+ bool hasTypeProfiler = !!m_vm->typeProfiler();
+ for (size_t i = 0; i < count; i++) {
+ JSValue constant = constants[i].get();
+
+ if (!constant.isEmpty()) {
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constant)) {
+ if (hasTypeProfiler) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ symbolTable->prepareForTypeProfiling(locker);
+ }
- m_argumentValueProfiles.resizeToFit(newValue);
+ SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
+ if (wasCompiledWithDebuggingOpcodes())
+ clone->setRareDataCodeBlock(this);
+
+ constant = clone;
+ }
+ }
+
+ m_constantRegisters[i].set(*m_vm, this, constant);
+ }
+
+ m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
}
-void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
+void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
{
- EvalCacheMap::iterator end = m_cacheMap.end();
- for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
- visitor.append(&ptr->value);
+ m_alternative.set(vm, this, alternative);
+}
+
+void CodeBlock::setNumParameters(int newValue)
+{
+ m_numParameters = newValue;
+
+ m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
}
CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
@@ -1927,77 +2443,46 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
if (jitType() != JITCode::DFGJIT)
return 0;
DFG::JITCode* jitCode = m_jitCode->dfg();
- return jitCode->osrEntryBlock.get();
+ return jitCode->osrEntryBlock();
#else // ENABLE(FTL_JIT)
return 0;
#endif // ENABLE(FTL_JIT)
}
-void CodeBlock::visitAggregate(SlotVisitor& visitor)
-{
-#if ENABLE(PARALLEL_GC)
- // I may be asked to scan myself more than once, and it may even happen concurrently.
- // To this end, use a CAS loop to check if I've been called already. Only one thread
- // may proceed past this point - whichever one wins the CAS race.
- unsigned oldValue;
- do {
- oldValue = m_visitAggregateHasBeenCalled;
- if (oldValue) {
- // Looks like someone else won! Return immediately to ensure that we don't
- // trace the same CodeBlock concurrently. Doing so is hazardous since we will
- // be mutating the state of ValueProfiles, which contain JSValues, which can
- // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
- // that are nearly impossible to track down.
-
- // Also note that it must be safe to return early as soon as we see the
- // value true (well, (unsigned)1), since once a GC thread is in this method
- // and has won the CAS race (i.e. was responsible for setting the value true)
- // it will definitely complete the rest of this method before declaring
- // termination.
- return;
- }
- } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
-#endif // ENABLE(PARALLEL_GC)
-
- if (!!m_alternative)
- m_alternative->visitAggregate(visitor);
+void CodeBlock::visitWeakly(SlotVisitor& visitor)
+{
+ ConcurrentJSLocker locker(m_lock);
+ if (m_visitWeaklyHasBeenCalled)
+ return;
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->visitAggregate(visitor);
+ m_visitWeaklyHasBeenCalled = true;
- visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
- if (m_jitCode)
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
- if (m_instructions.size()) {
- // Divide by refCount() because m_instructions points to something that is shared
- // by multiple CodeBlocks, and we only want to count it towards the heap size once.
- // Having each CodeBlock report only its proportional share of the size is one way
- // of accomplishing this.
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+ if (Heap::isMarkedConcurrently(this))
+ return;
+
+ if (shouldVisitStrongly(locker)) {
+ visitor.appendUnbarriered(this);
+ return;
}
+
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
- visitor.append(&m_unlinkedCode);
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return;
- // There are three things that may use unconditional finalizers: lazy bytecode freeing,
- // inline cache clearing, and jettisoning. The probability of us wanting to do at
- // least one of those things is probably quite close to 1. So we add one no matter what
- // and when it runs, it figures out whether it has any work to do.
- visitor.addUnconditionalFinalizer(this);
+ // If we jettison ourselves we'll install our alternative, so make sure that it
+ // survives GC even if we don't.
+ visitor.append(m_alternative);
// There are two things that we use weak reference harvesters for: DFG fixpoint for
// jettisoning, and trying to find structures that would be live based on some
// inline cache. So it makes sense to register them regardless.
- visitor.addWeakReferenceHarvester(this);
- m_allTransitionsHaveBeenMarked = false;
-
- if (shouldImmediatelyAssumeLivenessDuringScan()) {
- // This code block is live, so scan all references strongly and return.
- stronglyVisitStrongReferences(visitor);
- stronglyVisitWeakReferences(visitor);
- propagateTransitions(visitor);
- return;
- }
-
+ visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
+
#if ENABLE(DFG_JIT)
// We get here if we're live in the sense that our owner executable is live,
// but we're not yet live for sure in another sense: we may yet decide that this
@@ -2007,17 +2492,149 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
// either us marking additional objects, or by other objects being marked for
// other reasons, that this iteration should run again; it will notify us of this
// decision by calling harvestWeakReferences().
-
+
+ m_allTransitionsHaveBeenMarked = false;
+ propagateTransitions(locker, visitor);
+
m_jitCode->dfgCommon()->livenessHasBeenProved = false;
-
- propagateTransitions(visitor);
- determineLiveness(visitor);
-#else // ENABLE(DFG_JIT)
- RELEASE_ASSERT_NOT_REACHED();
+ determineLiveness(locker, visitor);
#endif // ENABLE(DFG_JIT)
}
-void CodeBlock::propagateTransitions(SlotVisitor& visitor)
+size_t CodeBlock::estimatedSize(JSCell* cell)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
+ if (thisObject->m_jitCode)
+ extraMemoryAllocated += thisObject->m_jitCode->size();
+ return Base::estimatedSize(cell) + extraMemoryAllocated;
+}
+
+void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ JSCell::visitChildren(thisObject, visitor);
+ thisObject->visitChildren(visitor);
+}
+
+void CodeBlock::visitChildren(SlotVisitor& visitor)
+{
+ ConcurrentJSLocker locker(m_lock);
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ visitor.appendUnbarriered(otherBlock);
+
+ if (m_jitCode)
+ visitor.reportExtraMemoryVisited(m_jitCode->size());
+ if (m_instructions.size()) {
+ unsigned refCount = m_instructions.refCount();
+ if (!refCount) {
+ dataLog("CodeBlock: ", RawPointer(this), "\n");
+ dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
+ dataLog("refCount: ", refCount, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
+ }
+
+ stronglyVisitStrongReferences(locker, visitor);
+ stronglyVisitWeakReferences(locker, visitor);
+
+ m_allTransitionsHaveBeenMarked = false;
+ propagateTransitions(locker, visitor);
+}
+
+bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
+{
+ if (Options::forceCodeBlockLiveness())
+ return true;
+
+ if (shouldJettisonDueToOldAge(locker))
+ return false;
+
+ // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+ // their weak references go stale. So if a basline JIT CodeBlock gets
+ // scanned, we can assume that this means that it's live.
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return true;
+
+ return false;
+}
+
+bool CodeBlock::shouldJettisonDueToWeakReference()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return false;
+ return !Heap::isMarked(this);
+}
+
+static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
+{
+ if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
+ switch (jitType) {
+ case JITCode::InterpreterThunk:
+ return std::chrono::milliseconds(10);
+ case JITCode::BaselineJIT:
+ return std::chrono::milliseconds(10 + 20);
+ case JITCode::DFGJIT:
+ return std::chrono::milliseconds(40);
+ case JITCode::FTLJIT:
+ return std::chrono::milliseconds(120);
+ default:
+ return std::chrono::milliseconds::max();
+ }
+ }
+
+ switch (jitType) {
+ case JITCode::InterpreterThunk:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
+ case JITCode::BaselineJIT:
+ // Effectively 10 additional seconds, since BaselineJIT and
+ // InterpreterThunk share a CodeBlock.
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
+ case JITCode::DFGJIT:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
+ case JITCode::FTLJIT:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
+ default:
+ return std::chrono::milliseconds::max();
+ }
+}
+
+bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
+{
+ if (Heap::isMarkedConcurrently(this))
+ return false;
+
+ if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
+ return true;
+
+ if (timeSinceCreation() < timeToLive(jitType()))
+ return false;
+
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+ if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
+ return false;
+
+ if (!Heap::isMarkedConcurrently(transition.m_from.get()))
+ return false;
+
+ return true;
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
@@ -2026,19 +2643,23 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
bool allAreMarkedSoFar = true;
-#if ENABLE(LLINT)
Interpreter* interpreter = m_vm->interpreter;
if (jitType() == JITCode::InterpreterThunk) {
const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line: {
- if (Heap::isMarked(instruction[4].u.structure.get()))
- visitor.append(&instruction[6].u.structure);
+ case op_put_by_id: {
+ StructureID oldStructureID = instruction[4].u.structureID;
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!oldStructureID || !newStructureID)
+ break;
+ Structure* oldStructure =
+ m_vm->heap.structureIDTable().get(oldStructureID);
+ Structure* newStructure =
+ m_vm->heap.structureIDTable().get(newStructureID);
+ if (Heap::isMarkedConcurrently(oldStructure))
+ visitor.appendUnbarriered(newStructure);
else
allAreMarkedSoFar = false;
break;
@@ -2048,69 +2669,42 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
}
}
}
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
if (JITCode::isJIT(jitType())) {
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
- switch (stubInfo.accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if ((!origin || Heap::isMarked(origin))
- && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
- visitor.append(&stubInfo.u.putByIdTransition.structure);
- else
- allAreMarkedSoFar = false;
- break;
- }
-
- case access_put_by_id_list: {
- PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if (origin && !Heap::isMarked(origin)) {
- allAreMarkedSoFar = false;
- break;
- }
- for (unsigned j = list->size(); j--;) {
- PutByIdAccess& access = list->m_list[j];
- if (!access.isTransition())
- continue;
- if (Heap::isMarked(access.oldStructure()))
- visitor.append(&access.m_newStructure);
- else
- allAreMarkedSoFar = false;
- }
- break;
- }
-
- default:
- break;
- }
- }
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
+ allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
}
#endif // ENABLE(JIT)
#if ENABLE(DFG_JIT)
if (JITCode::isOptimizingJIT(jitType())) {
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- if ((!dfgCommon->transitions[i].m_codeOrigin
- || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
- && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
+ for (auto& weakReference : dfgCommon->weakStructureReferences)
+ allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
+
+ for (auto& transition : dfgCommon->transitions) {
+ if (shouldMarkTransition(transition)) {
// If the following three things are live, then the target of the
// transition is also live:
+ //
// - This code block. We know it's live already because otherwise
// we wouldn't be scanning ourselves.
+ //
// - The code origin of the transition. Transitions may arise from
// code that was inlined. They are not relevant if the user's
// object that is required for the inlinee to run is no longer
// live.
+ //
// - The source of the transition. The transition checks if some
// heap location holds the source, and if so, stores the target.
// Hence the source must be live for the transition to be live.
- visitor.append(&dfgCommon->transitions[i].m_to);
+ //
+ // We also short-circuit the liveness if the structure is harmless
+ // to mark (i.e. its global object and prototype are both already
+ // live).
+
+ visitor.append(transition.m_to);
} else
allAreMarkedSoFar = false;
}
@@ -2121,13 +2715,10 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
m_allTransitionsHaveBeenMarked = true;
}
-void CodeBlock::determineLiveness(SlotVisitor& visitor)
+void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
- if (shouldImmediatelyAssumeLivenessDuringScan())
- return;
-
#if ENABLE(DFG_JIT)
// Check if we have any remaining work to do.
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
@@ -2139,11 +2730,21 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
// GC we still have not proved liveness, then this code block is toast.
bool allAreLiveSoFar = true;
for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
- if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
+ JSCell* reference = dfgCommon->weakReferences[i].get();
+ ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
+ if (!Heap::isMarkedConcurrently(reference)) {
allAreLiveSoFar = false;
break;
}
}
+ if (allAreLiveSoFar) {
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+ if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+ }
// If some weak references are dead, then this fixpoint iteration was
// unsuccessful.
@@ -2153,261 +2754,346 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
// All weak references are live. Record this information so we don't
// come back here again, and scan the strong references.
dfgCommon->livenessHasBeenProved = true;
- stronglyVisitStrongReferences(visitor);
+ visitor.appendUnbarriered(this);
#endif // ENABLE(DFG_JIT)
}
-void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
{
- propagateTransitions(visitor);
- determineLiveness(visitor);
+ CodeBlock* codeBlock =
+ bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
+
+ codeBlock->propagateTransitions(NoLockingNecessary, visitor);
+ codeBlock->determineLiveness(NoLockingNecessary, visitor);
}
-void CodeBlock::finalizeUnconditionally()
+void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
+{
+ instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+ instruction[4].u.pointer = nullptr;
+ instruction[5].u.pointer = nullptr;
+ instruction[6].u.pointer = nullptr;
+}
+
+void CodeBlock::finalizeLLIntInlineCaches()
{
Interpreter* interpreter = m_vm->interpreter;
- if (JITCode::couldBeInterpreted(jitType())) {
- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
- for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
- Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
- switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
- case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_put_by_id:
- case op_put_by_id_out_of_line:
- if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
- curInstruction[4].u.structure.clear();
- curInstruction[5].u.operand = 0;
+ const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+ for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
break;
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
- if (Heap::isMarked(curInstruction[4].u.structure.get())
- && Heap::isMarked(curInstruction[6].u.structure.get())
- && Heap::isMarked(curInstruction[7].u.structureChain.get()))
- break;
- if (Options::verboseOSR()) {
- dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
- curInstruction[4].u.structure.get(),
- curInstruction[6].u.structure.get(),
- curInstruction[7].u.structureChain.get());
- }
- curInstruction[4].u.structure.clear();
- curInstruction[6].u.structure.clear();
- curInstruction[7].u.structureChain.clear();
- curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt property access.\n");
+ clearLLIntGetByIdCache(curInstruction);
+ break;
+ }
+ case op_put_by_id: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ StructureID newStructureID = curInstruction[6].u.structureID;
+ StructureChain* chain = curInstruction[7].u.structureChain.get();
+ if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
+ (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
+ (!chain || Heap::isMarked(chain)))
break;
- case op_get_array_length:
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt put transition.\n");
+ curInstruction[4].u.structureID = 0;
+ curInstruction[5].u.operand = 0;
+ curInstruction[6].u.structureID = 0;
+ curInstruction[7].u.structureChain.clear();
+ break;
+ }
+ case op_get_array_length:
+ break;
+ case op_to_this:
+ if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
break;
- case op_to_this:
- if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
- curInstruction[2].u.structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+ curInstruction[2].u.structure.clear();
+ curInstruction[3].u.toThisStatus = merge(
+ curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+ break;
+ case op_create_this: {
+ auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+ if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
break;
- case op_get_callee:
- if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
- curInstruction[2].u.jsCell.clear();
+ JSCell* cachedFunction = cacheWriteBarrier.get();
+ if (Heap::isMarked(cachedFunction))
break;
- case op_resolve_scope: {
- WriteBarrierBase<JSActivation>& activation = curInstruction[5].u.activation;
- if (!activation || Heap::isMarked(activation.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing dead activation %p.\n", activation.get());
- activation.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+ cacheWriteBarrier.clear();
+ break;
+ }
+ case op_resolve_scope: {
+ // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+ // are for outer functions, and we refer to those functions strongly, and they refer
+ // to the symbol table strongly. But it's nice to be on the safe side.
+ WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
+ if (!symbolTable || Heap::isMarked(symbolTable.get()))
break;
- }
- case op_get_from_scope:
- case op_put_to_scope: {
- ResolveModeAndType modeAndType =
- ResolveModeAndType(curInstruction[4].u.operand);
- if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
- continue;
- WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
- if (!structure || Heap::isMarked(structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing scope access with structure %p.\n", structure.get());
- structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+ symbolTable.clear();
+ break;
+ }
+ case op_get_from_scope:
+ case op_put_to_scope: {
+ GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
+ if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
+ || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+ continue;
+ WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
+ if (!structure || Heap::isMarked(structure.get()))
break;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
+ if (Options::verboseOSR())
+ dataLogF("Clearing scope access with structure %p.\n", structure.get());
+ structure.clear();
+ break;
}
-
-#if ENABLE(LLINT)
- for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
- if (Options::verboseOSR())
- dataLog("Clearing LLInt call from ", *this, "\n");
- m_llintCallLinkInfos[i].unlink();
- }
- if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
- m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ default:
+ OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
}
-#endif // ENABLE(LLINT)
}
-#if ENABLE(DFG_JIT)
- // Check if we're not live. If we are, then jettison.
- if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
- if (Options::verboseOSR())
- dataLog(*this, " has dead weak references, jettisoning during GC.\n");
+ // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
+ // then cleared the cache without GCing in between.
+ m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
+ return !Heap::isMarked(pair.key);
+ });
- if (DFG::shouldShowDisassembly()) {
- dataLog(*this, " will be jettisoned because of the following dead references:\n");
- DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
- JSCell* origin = transition.m_codeOrigin.get();
- JSCell* from = transition.m_from.get();
- JSCell* to = transition.m_to.get();
- if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
- continue;
- dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
- }
- for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
- JSCell* weak = dfgCommon->weakReferences[i].get();
- if (Heap::isMarked(weak))
- continue;
- dataLog(" Weak reference ", RawPointer(weak), ".\n");
- }
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (Options::verboseOSR())
+ dataLog("Clearing LLInt call from ", *this, "\n");
+ m_llintCallLinkInfos[i].unlink();
}
-
- jettison();
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ }
+}
+
+void CodeBlock::finalizeBaselineJITInlineCaches()
+{
+#if ENABLE(JIT)
+ for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+ (*iter)->visitWeak(*vm());
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo& stubInfo = **iter;
+ stubInfo.visitWeakReferences(this);
+ }
+#endif
+}
+
+void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
+{
+ CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
+
+ codeBlock->updateAllPredictions();
+
+ if (!Heap::isMarked(codeBlock)) {
+ if (codeBlock->shouldJettisonDueToWeakReference())
+ codeBlock->jettison(Profiler::JettisonDueToWeakReference);
+ else
+ codeBlock->jettison(Profiler::JettisonDueToOldAge);
return;
}
-#endif // ENABLE(DFG_JIT)
+
+ if (JITCode::couldBeInterpreted(codeBlock->jitType()))
+ codeBlock->finalizeLLIntInlineCaches();
#if ENABLE(JIT)
- // Handle inline caches.
- if (!!jitCode()) {
- RepatchBuffer repatchBuffer(this);
- for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
- if (callLinkInfo(i).isLinked()) {
- if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
- if (!Heap::isMarked(stub->structure())
- || !Heap::isMarked(stub->executable())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing closure call from ", *this, " to ",
- stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
- ", stub routine ", RawPointer(stub), ".\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing call from ", *this, " to ",
- RawPointer(callLinkInfo(i).callee.get()), " (",
- callLinkInfo(i).callee.get()->executable()->hashFor(
- callLinkInfo(i).specializationKind()),
- ").\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- }
- if (!!callLinkInfo(i).lastSeenCallee
- && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
- callLinkInfo(i).lastSeenCallee.clear();
- }
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
-
- if (stubInfo.visitWeakReferences())
- continue;
-
- resetStubDuringGCInternal(repatchBuffer, stubInfo);
- }
+ if (!!codeBlock->jitCode())
+ codeBlock->finalizeBaselineJITInlineCaches();
+#endif
+}
+
+void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType()))
+ toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getStubInfoMap(locker, result);
+}
+
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType()))
+ toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getCallLinkInfoMap(locker, result);
+}
+
+void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType())) {
+ for (auto* byValInfo : m_byValInfos)
+ result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
}
+#else
+ UNUSED_PARAM(result);
#endif
}
+void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getByValInfoMap(locker, result);
+}
+
#if ENABLE(JIT)
-StructureStubInfo* CodeBlock::addStubInfo()
+StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
{
- ConcurrentJITLocker locker(m_lock);
- return m_stubInfos.add();
+ ConcurrentJSLocker locker(m_lock);
+ return m_stubInfos.add(accessType);
}
-void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
{
- toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+ return m_addICs.add(arithProfile);
}
-void CodeBlock::resetStub(StructureStubInfo& stubInfo)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
{
- if (stubInfo.accessType == access_unset)
- return;
-
- ConcurrentJITLocker locker(m_lock);
-
- RepatchBuffer repatchBuffer(this);
- resetStubInternal(repatchBuffer, stubInfo);
+ return m_mulICs.add(arithProfile);
}
-void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
{
- AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
-
- if (Options::verboseOSR()) {
- // This can be called from GC destructor calls, so we don't try to do a full dump
- // of the CodeBlock.
- dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
+ return m_subICs.add(arithProfile);
+}
+
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
+{
+ return m_negICs.add(arithProfile);
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+ for (StructureStubInfo* stubInfo : m_stubInfos) {
+ if (stubInfo->codeOrigin == codeOrigin)
+ return stubInfo;
}
-
- RELEASE_ASSERT(JITCode::isJIT(jitType()));
-
- if (isGetByIdAccess(accessType))
- resetGetByID(repatchBuffer, stubInfo);
- else if (isPutByIdAccess(accessType))
- resetPutByID(repatchBuffer, stubInfo);
- else {
- RELEASE_ASSERT(isInAccess(accessType));
- resetIn(repatchBuffer, stubInfo);
+ return nullptr;
+}
+
+ByValInfo* CodeBlock::addByValInfo()
+{
+ ConcurrentJSLocker locker(m_lock);
+ return m_byValInfos.add();
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+ ConcurrentJSLocker locker(m_lock);
+ return m_callLinkInfos.add();
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ if ((*iter)->codeOrigin() == CodeOrigin(index))
+ return *iter;
}
-
- stubInfo.reset();
+ return nullptr;
}
-void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void CodeBlock::resetJITData()
{
- resetStubInternal(repatchBuffer, stubInfo);
- stubInfo.resetByGC = true;
+ RELEASE_ASSERT(!JITCode::isJIT(jitType()));
+ ConcurrentJSLocker locker(m_lock);
+
+ // We can clear these because no other thread will have references to any stub infos, call
+ // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+ // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
+ // don't have JIT code.
+ m_stubInfos.clear();
+ m_callLinkInfos.clear();
+ m_byValInfos.clear();
+
+ // We can clear this because the DFG's queries to these data structures are guarded by whether
+ // there is JIT code.
+ m_rareCaseProfiles.clear();
}
#endif
-void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
- visitor.append(&m_globalObject);
- visitor.append(&m_ownerExecutable);
- visitor.append(&m_symbolTable);
- visitor.append(&m_unlinkedCode);
+ // We strongly visit OSR exits targets because we don't want to deal with
+ // the complexity of generating an exit target CodeBlock on demand and
+ // guaranteeing that it matches the details of the CodeBlock we compiled
+ // the OSR exit against.
+
+ visitor.append(m_alternative);
+
+#if ENABLE(DFG_JIT)
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->inlineCallFrames) {
+ for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
+ ASSERT(inlineCallFrame->baselineCodeBlock);
+ visitor.append(inlineCallFrame->baselineCodeBlock);
+ }
+ }
+#endif
+}
+
+void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
+{
+ UNUSED_PARAM(locker);
+
+ visitor.append(m_globalObject);
+ visitor.append(m_ownerExecutable);
+ visitor.append(m_unlinkedCode);
if (m_rareData)
- m_rareData->m_evalCodeCache.visitAggregate(visitor);
+ m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
- for (size_t i = 0; i < m_functionExprs.size(); ++i)
- visitor.append(&m_functionExprs[i]);
- for (size_t i = 0; i < m_functionDecls.size(); ++i)
- visitor.append(&m_functionDecls[i]);
- for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
- m_objectAllocationProfiles[i].visitAggregate(visitor);
+ for (auto& functionExpr : m_functionExprs)
+ visitor.append(functionExpr);
+ for (auto& functionDecl : m_functionDecls)
+ visitor.append(functionDecl);
+ for (auto& objectAllocationProfile : m_objectAllocationProfiles)
+ objectAllocationProfile.visitAggregate(visitor);
- updateAllPredictions();
+#if ENABLE(JIT)
+ for (ByValInfo* byValInfo : m_byValInfos)
+ visitor.append(byValInfo->cachedSymbol);
+#endif
+
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType()))
+ visitOSRExitTargets(locker, visitor);
+#endif
}
-void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
@@ -2417,15 +3103,20 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- if (!!dfgCommon->transitions[i].m_codeOrigin)
- visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
- visitor.append(&dfgCommon->transitions[i].m_from);
- visitor.append(&dfgCommon->transitions[i].m_to);
+ for (auto& transition : dfgCommon->transitions) {
+ if (!!transition.m_codeOrigin)
+ visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+ visitor.append(transition.m_from);
+ visitor.append(transition.m_to);
}
-
- for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
- visitor.append(&dfgCommon->weakReferences[i]);
+
+ for (auto& weakReference : dfgCommon->weakReferences)
+ visitor.append(weakReference);
+
+ for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
+ visitor.append(weakStructureReference);
+
+ dfgCommon->livenessHasBeenProved = true;
#endif
}
@@ -2474,87 +3165,56 @@ bool CodeBlock::hasOptimizedReplacement()
}
#endif
-bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
{
- if (operand.isArgument())
- return operand.toArgument() && usesArguments();
-
- if (inlineCallFrame)
- return inlineCallFrame->capturedVars.get(operand.toLocal());
-
- // The activation object isn't in the captured region, but it's "captured"
- // in the sense that stores to its location can be observed indirectly.
- if (needsActivation() && operand == activationRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == argumentsRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
- return true;
-
- // We're in global code so there are no locals to capture
- if (!symbolTable())
- return false;
-
- return symbolTable()->isCaptured(operand.offset());
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+ return handlerForIndex(bytecodeOffset, requiredHandler);
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
+HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
{
- // We'll be adding this to the stack pointer to get a registers pointer that looks
- // like it would have looked in the baseline engine. For example, if bytecode would
- // have put the first captured variable at offset -5 but we put it at offset -1, then
- // we'll have an offset of 4.
- int32_t offset = 0;
-
- // Compute where we put the captured variables. This offset will point the registers
- // pointer directly at the first captured var.
- offset += machineCaptureStart;
-
- // Now compute the offset needed to make the runtime see the captured variables at the
- // same offset that the bytecode would have used.
- offset -= symbolTable()->captureStart();
-
- return offset;
+ if (!m_rareData)
+ return 0;
+ return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters()
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
{
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
#if ENABLE(DFG_JIT)
- return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+ ASSERT(!!handlerForIndex(originalCallSite.bits()));
+ CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+ return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
#else
+ // We never create new on-the-fly exception handling
+ // call sites outside the DFG/FTL inline caches.
+ UNUSED_PARAM(originalCallSite);
RELEASE_ASSERT_NOT_REACHED();
- return 0;
+ return CallSiteIndex(0u);
#endif
}
-HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
{
- RELEASE_ASSERT(bytecodeOffset < instructions().size());
-
- if (!m_rareData)
- return 0;
-
+ RELEASE_ASSERT(m_rareData);
Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ unsigned index = callSiteIndex.bits();
for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
- // Handlers are ordered innermost first, so the first handler we encounter
- // that contains the source address is the correct handler to use.
- if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
- return &exceptionHandlers[i];
+ HandlerInfo& handler = exceptionHandlers[i];
+ if (handler.start <= index && handler.end > index) {
+ exceptionHandlers.remove(i);
+ return;
+ }
}
- return 0;
+ RELEASE_ASSERT_NOT_REACHED();
}
unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
RELEASE_ASSERT(bytecodeOffset < instructions().size());
- return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+ return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
}
unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
@@ -2568,12 +3228,12 @@ unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
return column;
}
-void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
{
m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
divot += m_sourceOffset;
column += line ? 1 : firstLineColumnOffset();
- line += m_ownerExecutable->lineNo();
+ line += ownerScriptExecutable()->firstLine();
}
bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
@@ -2599,11 +3259,13 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
{
+ ConcurrentJSLocker locker(m_lock);
+
m_rareCaseProfiles.shrinkToFit();
- m_specialFastCaseProfiles.shrinkToFit();
if (shrinkMode == EarlyShrink) {
m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
if (m_rareData) {
m_rareData->m_switchJumpTables.shrinkToFit();
@@ -2612,175 +3274,154 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
} // else don't shrink these, because we would have already pointed pointers into these tables.
}
-unsigned CodeBlock::addOrFindConstant(JSValue v)
-{
- unsigned result;
- if (findConstant(v, result))
- return result;
- return addConstant(v);
-}
-
-bool CodeBlock::findConstant(JSValue v, unsigned& index)
-{
- unsigned numberOfConstants = numberOfConstantRegisters();
- for (unsigned i = 0; i < numberOfConstants; ++i) {
- if (getConstant(FirstConstantRegisterIndex + i) == v) {
- index = i;
- return true;
- }
- }
- index = numberOfConstants;
- return false;
-}
-
#if ENABLE(JIT)
-void CodeBlock::unlinkCalls()
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
{
- if (!!m_alternative)
- m_alternative->unlinkCalls();
-#if ENABLE(LLINT)
- for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked())
- m_llintCallLinkInfos[i].unlink();
- }
-#endif
- if (!m_callLinkInfos.size())
- return;
- if (!m_vm->canUseJIT())
- return;
- RepatchBuffer repatchBuffer(this);
- for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
- if (!m_callLinkInfos[i].isLinked())
- continue;
- m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
- }
+ noticeIncomingCall(callerFrame);
+ m_incomingCalls.push(incoming);
}
-void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
{
noticeIncomingCall(callerFrame);
- m_incomingCalls.push(incoming);
+ m_incomingPolymorphicCalls.push(incoming);
}
#endif // ENABLE(JIT)
void CodeBlock::unlinkIncomingCalls()
{
-#if ENABLE(LLINT)
while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
m_incomingLLIntCalls.begin()->unlink();
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
- if (m_incomingCalls.isEmpty())
- return;
- RepatchBuffer repatchBuffer(this);
while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
+ m_incomingCalls.begin()->unlink(*vm());
+ while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+ m_incomingPolymorphicCalls.begin()->unlink(*vm());
#endif // ENABLE(JIT)
}
-#if ENABLE(LLINT)
void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
{
noticeIncomingCall(callerFrame);
m_incomingLLIntCalls.push(incoming);
}
-#endif // ENABLE(LLINT)
-void CodeBlock::clearEvalCache()
+CodeBlock* CodeBlock::newReplacement()
{
- if (!!m_alternative)
- m_alternative->clearEvalCache();
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->clearEvalCache();
- if (!m_rareData)
- return;
- m_rareData->m_evalCodeCache.clear();
+ return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
}
-void CodeBlock::install()
+#if ENABLE(JIT)
+CodeBlock* CodeBlock::replacement()
{
- ownerExecutable()->installCode(this);
-}
+ const ClassInfo* classInfo = this->classInfo(*vm());
-PassRefPtr<CodeBlock> CodeBlock::newReplacement()
-{
- return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
-}
+ if (classInfo == FunctionCodeBlock::info())
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
-const SlowArgument* CodeBlock::machineSlowArguments()
-{
- if (!JITCode::isOptimizingJIT(jitType()))
- return symbolTable()->slowArguments();
-
-#if ENABLE(DFG_JIT)
- return jitCode()->dfgCommon()->slowArguments.get();
-#else // ENABLE(DFG_JIT)
- return 0;
-#endif // ENABLE(DFG_JIT)
-}
+ if (classInfo == EvalCodeBlock::info())
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-#if ENABLE(JIT)
-CodeBlock* ProgramCodeBlock::replacement()
-{
- return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ProgramCodeBlock::info())
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* EvalCodeBlock::replacement()
-{
- return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* FunctionCodeBlock::replacement()
-{
- return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
}
-DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
{
- return DFG::programCapabilityLevel(this);
-}
+ const ClassInfo* classInfo = this->classInfo(*vm());
-DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
-{
- return DFG::evalCapabilityLevel(this);
+ if (classInfo == FunctionCodeBlock::info()) {
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+ }
+
+ if (classInfo == EvalCodeBlock::info())
+ return DFG::evalCapabilityLevel(this);
+
+ if (classInfo == ProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return DFG::CannotCompile;
}
-DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+#endif // ENABLE(JIT)
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
{
- if (m_isConstructor)
- return DFG::functionForConstructCapabilityLevel(this);
- return DFG::functionForCallCapabilityLevel(this);
-}
+#if !ENABLE(DFG_JIT)
+ UNUSED_PARAM(mode);
+ UNUSED_PARAM(detail);
#endif
+
+ CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
-void CodeBlock::jettison(ReoptimizationMode mode)
-{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
#if ENABLE(DFG_JIT)
- if (DFG::shouldShowDisassembly()) {
+ if (DFG::shouldDumpDisassembly()) {
dataLog("Jettisoning ", *this);
if (mode == CountReoptimization)
dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason);
+ if (detail)
+ dataLog(", ", *detail);
dataLog(".\n");
}
- DeferGCForAWhile deferGC(*m_heap);
- RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ if (reason == Profiler::JettisonDueToWeakReference) {
+ if (DFG::shouldDumpDisassembly()) {
+ dataLog(*this, " will be jettisoned because of the following dead references:\n");
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ for (auto& transition : dfgCommon->transitions) {
+ JSCell* origin = transition.m_codeOrigin.get();
+ JSCell* from = transition.m_from.get();
+ JSCell* to = transition.m_to.get();
+ if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+ continue;
+ dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+ }
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+ JSCell* weak = dfgCommon->weakReferences[i].get();
+ if (Heap::isMarked(weak))
+ continue;
+ dataLog(" Weak reference ", RawPointer(weak), ".\n");
+ }
+ }
+ }
+#endif // ENABLE(DFG_JIT)
+
+ DeferGCForAWhile deferGC(*heap());
// We want to accomplish two things here:
// 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
// we should OSR exit at the top of the next bytecode instruction after the return.
// 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
-
- // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
- // whether the invalidation has already happened.
- if (!jitCode()->dfgCommon()->invalidate()) {
- // Nothing to do since we've already been invalidated. That means that we cannot be
- // the optimized replacement.
- RELEASE_ASSERT(this != replacement());
- return;
+
+#if ENABLE(DFG_JIT)
+ if (reason != Profiler::JettisonDueToOldAge) {
+ if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+ compilation->setJettisonReason(reason, detail);
+
+ // This accomplishes (1), and does its own book-keeping about whether it has already happened.
+ if (!jitCode()->dfgCommon()->invalidate()) {
+ // We've already been invalidated.
+ RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
+ return;
+ }
}
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did invalidate ", *this, "\n");
// Count the reoptimization if that's what the user wanted.
@@ -2788,24 +3429,35 @@ void CodeBlock::jettison(ReoptimizationMode mode)
// FIXME: Maybe this should call alternative().
// https://bugs.webkit.org/show_bug.cgi?id=123677
baselineAlternative()->countReoptimization();
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did count reoptimization for ", *this, "\n");
}
- // Now take care of the entrypoint.
if (this != replacement()) {
// This means that we were never the entrypoint. This can happen for OSR entry code
// blocks.
return;
}
- alternative()->optimizeAfterWarmUp();
- tallyFrequentExitSites();
- alternative()->install();
- if (DFG::shouldShowDisassembly())
+
+ if (alternative())
+ alternative()->optimizeAfterWarmUp();
+
+ if (reason != Profiler::JettisonDueToOldAge)
+ tallyFrequentExitSites();
+#endif // ENABLE(DFG_JIT)
+
+ // Jettison can happen during GC. We don't want to install code to a dead executable
+ // because that would add a dead object to the remembered set.
+ if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
+ return;
+
+ // This accomplishes (2).
+ ownerScriptExecutable()->installCode(
+ m_globalObject->vm(), alternative(), codeType(), specializationKind());
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did install baseline version of ", *this, "\n");
-#else // ENABLE(DFG_JIT)
- UNUSED_PARAM(mode);
- UNREACHABLE_FOR_PLATFORM();
#endif // ENABLE(DFG_JIT)
}
@@ -2813,28 +3465,82 @@ JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
return globalObject();
- return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+ return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
}
+class RecursionCheckFunctor {
+public:
+ RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+ : m_startCallFrame(startCallFrame)
+ , m_codeBlock(codeBlock)
+ , m_depthToCheck(depthToCheck)
+ , m_foundStartCallFrame(false)
+ , m_didRecurse(false)
+ { }
+
+ StackVisitor::Status operator()(StackVisitor& visitor) const
+ {
+ CallFrame* currentCallFrame = visitor->callFrame();
+
+ if (currentCallFrame == m_startCallFrame)
+ m_foundStartCallFrame = true;
+
+ if (m_foundStartCallFrame) {
+ if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+ m_didRecurse = true;
+ return StackVisitor::Done;
+ }
+
+ if (!m_depthToCheck--)
+ return StackVisitor::Done;
+ }
+
+ return StackVisitor::Continue;
+ }
+
+ bool didRecurse() const { return m_didRecurse; }
+
+private:
+ CallFrame* m_startCallFrame;
+ CodeBlock* m_codeBlock;
+ mutable unsigned m_depthToCheck;
+ mutable bool m_foundStartCallFrame;
+ mutable bool m_didRecurse;
+};
+
void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
{
CodeBlock* callerCodeBlock = callerFrame->codeBlock();
if (Options::verboseCallLink())
- dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
+ dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
+#if ENABLE(DFG_JIT)
if (!m_shouldAlwaysBeInlined)
return;
+
+ if (!callerCodeBlock) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is native.\n");
+ return;
+ }
-#if ENABLE(DFG_JIT)
if (!hasBaselineJITProfiling())
return;
if (!DFG::mightInlineFunction(this))
return;
- if (!canInline(m_capabilityLevelState))
+ if (!canInline(capabilityLevelState()))
+ return;
+
+ if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is too large.\n");
return;
+ }
if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
// If the caller is still in the interpreter, then we can't expect inlining to
@@ -2843,7 +3549,14 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// any of its callers.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is in LLInt.\n");
+ dataLog(" Clearing SABI because caller is in LLInt.\n");
+ return;
+ }
+
+ if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI bcause caller was already optimized.\n");
return;
}
@@ -2853,40 +3566,72 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// delay eval optimization by a *lot*.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is not a function.\n");
+ dataLog(" Clearing SABI because caller is not a function.\n");
return;
}
-
- ExecState* frame = callerFrame;
- for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
- if (frame->isVMEntrySentinel())
- break;
- if (frame->codeBlock() == this) {
- // Recursive calls won't be inlined.
- if (Options::verboseCallLink())
- dataLog(" Marking SABI because recursion was detected.\n");
- m_shouldAlwaysBeInlined = false;
- return;
- }
+
+ // Recursive calls won't be inlined.
+ RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
+ vm()->topCallFrame->iterate(functor);
+
+ if (functor.didRecurse()) {
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because recursion was detected.\n");
+ m_shouldAlwaysBeInlined = false;
+ return;
}
- RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
+ if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
+ dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
+ CRASH();
+ }
- if (canCompile(callerCodeBlock->m_capabilityLevelState))
+ if (canCompile(callerCodeBlock->capabilityLevelState()))
return;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because the caller is not a DFG candidate.\n");
+ dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
m_shouldAlwaysBeInlined = false;
#endif
}
-#if ENABLE(JIT)
unsigned CodeBlock::reoptimizationRetryCounter() const
{
+#if ENABLE(JIT)
ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
return m_reoptimizationRetryCounter;
+#else
+ return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+ m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
+{
+ m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+
+static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
+{
+ static const unsigned cpuRegisterSize = sizeof(void*);
+ return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+}
+
+size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
+}
+
+size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
}
void CodeBlock::countReoptimization()
@@ -2899,6 +3644,11 @@ void CodeBlock::countReoptimization()
unsigned CodeBlock::numberOfDFGCompiles()
{
ASSERT(JITCode::isBaselineCode(jitType()));
+ if (Options::testTheFTL()) {
+ if (m_didFailFTLCompilation)
+ return 1000000;
+ return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+ }
return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
}
@@ -2979,13 +3729,16 @@ double CodeBlock::optimizationThresholdScalingFactor()
ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+
+ result *= codeTypeThresholdMultiplier();
+
if (Options::verboseOSR()) {
dataLog(
*this, ": instruction count is ", instructionCount,
", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
"\n");
}
- return result * codeTypeThresholdMultiplier();
+ return result;
}
static int32_t clipThreshold(double threshold)
@@ -3010,7 +3763,7 @@ int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
bool CodeBlock::checkIfOptimizationThresholdReached()
{
#if ENABLE(DFG_JIT)
- if (DFG::Worklist* worklist = m_vm->worklist.get()) {
+ if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
== DFG::Worklist::Compiled) {
optimizeNextInvocation();
@@ -3076,8 +3829,22 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
#if ENABLE(DFG_JIT)
void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
{
- RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
- RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
+ JITCode::JITType type = jitType();
+ if (type != JITCode::BaselineJIT) {
+ dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ CodeBlock* theReplacement = replacement();
+ if ((result == CompilationSuccessful) != (theReplacement != this)) {
+ dataLog(*this, ": we have result = ", result, " but ");
+ if (theReplacement == this)
+ dataLog("we are our own replacement.\n");
+ else
+ dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
switch (result) {
case CompilationSuccessful:
RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
@@ -3100,6 +3867,8 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu
optimizeAfterWarmUp();
return;
}
+
+ dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
RELEASE_ASSERT_NOT_REACHED();
}
@@ -3141,26 +3910,74 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
}
#endif
-ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
{
- for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
- if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
- return &m_arrayProfiles[i];
+ for (auto& m_arrayProfile : m_arrayProfiles) {
+ if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
+ return &m_arrayProfile;
}
return 0;
}
-ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
{
- ArrayProfile* result = getArrayProfile(bytecodeOffset);
+ ConcurrentJSLocker locker(m_lock);
+ return getArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+{
+ m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+ return &m_arrayProfiles.last();
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
+{
+ ConcurrentJSLocker locker(m_lock);
+ return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
+{
+ ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
if (result)
return result;
- return addArrayProfile(bytecodeOffset);
+ return addArrayProfile(locker, bytecodeOffset);
}
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+ ConcurrentJSLocker locker(m_lock);
+ return getOrAddArrayProfile(locker, bytecodeOffset);
+}
+
+#if ENABLE(DFG_JIT)
+Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
+{
+ return m_jitCode->dfgCommon()->codeOrigins;
+}
+
+size_t CodeBlock::numberOfDFGIdentifiers() const
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return 0;
+
+ return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+}
+
+const Identifier& CodeBlock::identifier(int index) const
+{
+ size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+ if (static_cast<unsigned>(index) < unlinkedIdentifiers)
+ return m_unlinkedCode->identifier(index);
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+}
+#endif // ENABLE(DFG_JIT)
+
void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
numberOfLiveNonArgumentValueProfiles = 0;
numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
@@ -3192,7 +4009,7 @@ void CodeBlock::updateAllValueProfilePredictions()
void CodeBlock::updateAllArrayPredictions()
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
for (unsigned i = m_arrayProfiles.size(); i--;)
m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
@@ -3253,12 +4070,8 @@ void CodeBlock::tallyFrequentExitSites()
switch (jitType()) {
case JITCode::DFGJIT: {
DFG::JITCode* jitCode = m_jitCode->dfg();
- for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
- DFG::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
- }
+ for (auto& exit : jitCode->osrExit)
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
break;
}
@@ -3270,9 +4083,7 @@ void CodeBlock::tallyFrequentExitSites()
FTL::JITCode* jitCode = m_jitCode->ftl();
for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
FTL::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
}
break;
}
@@ -3308,21 +4119,14 @@ void CodeBlock::dumpValueProfiles()
RareCaseProfile* profile = rareCaseProfile(i);
dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
- dataLog("SpecialFastCaseProfile for ", *this, ":\n");
- for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
- RareCaseProfile* profile = specialFastCaseProfile(i);
- dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
- }
}
#endif // ENABLE(VERBOSE_VALUE_PROFILE)
unsigned CodeBlock::frameRegisterCount()
{
switch (jitType()) {
-#if ENABLE(LLINT)
case JITCode::InterpreterThunk:
return LLInt::frameRegisterCountFor(this);
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
case JITCode::BaselineJIT:
@@ -3341,6 +4145,11 @@ unsigned CodeBlock::frameRegisterCount()
}
}
+int CodeBlock::stackPointerOffset()
+{
+ return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
size_t CodeBlock::predictedMachineCodeSize()
{
// This will be called from CodeBlock::CodeBlock before either m_vm or the
@@ -3349,12 +4158,12 @@ size_t CodeBlock::predictedMachineCodeSize()
if (!m_vm)
return 0;
- if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+ if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
return 0; // It's as good of a prediction as we'll get.
// Be conservative: return a size that will be an overestimation 84% of the time.
- double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
- m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+ double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
// Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
// here is OK, since this whole method is just a heuristic.
@@ -3400,72 +4209,35 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID)
String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
{
- ConcurrentJITLocker locker(symbolTable()->m_lock);
- SymbolTable::Map::iterator end = symbolTable()->end(locker);
- for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
- if (ptr->value.getIndex() == virtualRegister.offset()) {
- // FIXME: This won't work from the compilation thread.
- // https://bugs.webkit.org/show_bug.cgi?id=115300
- return String(ptr->key);
+ for (auto& constantRegister : m_constantRegisters) {
+ if (constantRegister.get().isEmpty())
+ continue;
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ auto end = symbolTable->end(locker);
+ for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
+ if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
+ // FIXME: This won't work from the compilation thread.
+ // https://bugs.webkit.org/show_bug.cgi?id=115300
+ return ptr->key.get();
+ }
+ }
}
}
- if (needsActivation() && virtualRegister == activationRegister())
- return ASCIILiteral("activation");
if (virtualRegister == thisRegister())
return ASCIILiteral("this");
- if (usesArguments()) {
- if (virtualRegister == argumentsRegister())
- return ASCIILiteral("arguments");
- if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
- return ASCIILiteral("real arguments");
- }
if (virtualRegister.isArgument())
- return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
+ return String::format("arguments[%3d]", virtualRegister.toArgument());
return "";
}
-namespace {
-
-struct VerifyCapturedDef {
- void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
- {
- unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
-
- if (codeBlock->isConstantRegisterIndex(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- switch (opcodeID) {
- case op_enter:
- case op_captured_mov:
- case op_init_lazy_reg:
- case op_create_arguments:
- case op_new_captured_func:
- return;
- default:
- break;
- }
-
- VirtualRegister virtualReg(operand);
- if (!virtualReg.isLocal())
- return;
-
- if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- return;
- }
-};
-
-} // anonymous namespace
+ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+{
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset].u.opcode);
+ unsigned length = opcodeLength(opcodeID);
+ return instructions()[bytecodeOffset + length - 1].u.profile;
+}
void CodeBlock::validate()
{
@@ -3473,7 +4245,7 @@ void CodeBlock::validate()
FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
- if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
+ if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
beginValidationDidFail();
dataLog(" Wrong number of bits in result!\n");
dataLog(" Result: ", liveAtHead, "\n");
@@ -3481,39 +4253,16 @@ void CodeBlock::validate()
endValidationDidFail();
}
- for (unsigned i = m_numCalleeRegisters; i--;) {
- bool isCaptured = false;
+ for (unsigned i = m_numCalleeLocals; i--;) {
VirtualRegister reg = virtualRegisterForLocal(i);
- if (captureCount())
- isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
-
- if (isCaptured) {
- if (!liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
- } else {
- if (liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be dead.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
+ if (liveAtHead[i]) {
+ beginValidationDidFail();
+ dataLog(" Variable ", reg, " is expected to be dead.\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ endValidationDidFail();
}
}
-
- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
- Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
- OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- VerifyCapturedDef verifyCapturedDef;
- computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
-
- bytecodeOffset += opcodeLength(opcodeID);
- }
}
void CodeBlock::beginValidationDidFail()
@@ -3535,15 +4284,293 @@ void CodeBlock::addBreakpoint(unsigned numBreakpoints)
{
m_numBreakpoints += numBreakpoints;
ASSERT(m_numBreakpoints);
- if (jitType() == JITCode::DFGJIT)
- jettison();
+ if (JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerBreakpoint);
}
void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
{
m_steppingMode = mode;
- if (mode == SteppingModeEnabled && jitType() == JITCode::DFGJIT)
- jettison();
+ if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+ m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+{
+ RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
+ if (profile)
+ return profile->m_counter;
+ return 0;
}
+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return arithProfileForPC(instructions().begin() + bytecodeOffset);
+}
+
+ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
+{
+ auto opcodeID = vm()->interpreter->getOpcodeID(pc[0].u.opcode);
+ switch (opcodeID) {
+ case op_negate:
+ return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
+ case op_bitor:
+ case op_bitand:
+ case op_bitxor:
+ case op_add:
+ case op_mul:
+ case op_sub:
+ case op_div:
+ return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
+ default:
+ break;
+ }
+
+ return nullptr;
+}
+
+bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
+{
+ if (!hasBaselineJITProfiling())
+ return false;
+ ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
+ if (!profile)
+ return false;
+ return profile->tookSpecialFastPath();
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+ DFG::CapabilityLevel result = computeCapabilityLevel();
+ m_capabilityLevelState = result;
+ return result;
+}
+#endif
+
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
+{
+ if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
+ return;
+ const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+ for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
+ // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
+ // the next op_profile_control_flow will give us the text range of a single basic block.
+ size_t startIdx = bytecodeOffsets[i];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
+ int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+ int basicBlockEndOffset;
+ if (i + 1 < offsetsLength) {
+ size_t endIdx = bytecodeOffsets[i + 1];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
+ basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+ } else {
+ basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+ basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
+ }
+
+ // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
+ // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
+ // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
+ // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
+ // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
+ // program. The condition:
+ // (basicBlockEndOffset < basicBlockStartOffset)
+ // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
+ // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
+ // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
+ // internal data structure, so if any of them execute, it will record the same textual basic block in the
+ // JavaScript program as executing.
+ // At the bytecode level, this situation looks like:
+ // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
+ // ...
+ // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
+ // ...
+ // m: op_profile_control_flow
+ if (basicBlockEndOffset < basicBlockStartOffset) {
+ RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
+ instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+ continue;
+ }
+
+ BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+
+ // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
+ // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
+ // This is necessary because in the original source text of a JavaScript program,
+ // function literals form new basic blocks boundaries, but they aren't represented
+ // inside the CodeBlock's instruction stream.
+ auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
+ const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
+ int functionStart = executable->typeProfilingStartOffset();
+ int functionEnd = executable->typeProfilingEndOffset();
+ if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
+ basicBlockLocation->insertGap(functionStart, functionEnd);
+ };
+
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
+ insertFunctionGaps(executable);
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
+ insertFunctionGaps(executable);
+
+ instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+ }
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map)
+{
+ m_pcToCodeOriginMap = WTFMove(map);
+}
+
+std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
+{
+ if (m_pcToCodeOriginMap) {
+ if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
+ return codeOrigin;
+ }
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ if (stub->containsPC(pc))
+ return std::optional<CodeOrigin>(stub->codeOrigin);
+ }
+
+ if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
+ return codeOrigin;
+
+ return std::nullopt;
+}
+#endif // ENABLE(JIT)
+
+std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+{
+ std::optional<unsigned> bytecodeOffset;
+ JITCode::JITType jitType = this->jitType();
+ if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+#if USE(JSVALUE64)
+ bytecodeOffset = callSiteIndex.bits();
+#else
+ Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
+ bytecodeOffset = instruction - instructions().begin();
+#endif
+ } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
+ CodeOrigin origin = codeOrigin(callSiteIndex);
+ bytecodeOffset = origin.bytecodeIndex;
+#else
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+ return bytecodeOffset;
+}
+
+int32_t CodeBlock::thresholdForJIT(int32_t threshold)
+{
+ switch (unlinkedCodeBlock()->didOptimize()) {
+ case MixedTriState:
+ return threshold;
+ case FalseTriState:
+ return threshold * 4;
+ case TrueTriState:
+ return threshold / 2;
+ }
+ ASSERT_NOT_REACHED();
+ return threshold;
+}
+
+void CodeBlock::jitAfterWarmUp()
+{
+ m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
+}
+
+void CodeBlock::jitSoon()
+{
+ m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
+}
+
+void CodeBlock::dumpMathICStats()
+{
+#if ENABLE(MATH_IC_STATS)
+ double numAdds = 0.0;
+ double totalAddSize = 0.0;
+ double numMuls = 0.0;
+ double totalMulSize = 0.0;
+ double numNegs = 0.0;
+ double totalNegSize = 0.0;
+ double numSubs = 0.0;
+ double totalSubSize = 0.0;
+
+ auto countICs = [&] (CodeBlock* codeBlock) {
+ for (JITAddIC* addIC : codeBlock->m_addICs) {
+ numAdds++;
+ totalAddSize += addIC->codeSize();
+ }
+
+ for (JITMulIC* mulIC : codeBlock->m_mulICs) {
+ numMuls++;
+ totalMulSize += mulIC->codeSize();
+ }
+
+ for (JITNegIC* negIC : codeBlock->m_negICs) {
+ numNegs++;
+ totalNegSize += negIC->codeSize();
+ }
+
+ for (JITSubIC* subIC : codeBlock->m_subICs) {
+ numSubs++;
+ totalSubSize += subIC->codeSize();
+ }
+
+ return false;
+ };
+ heap()->forEachCodeBlock(countICs);
+
+ dataLog("Num Adds: ", numAdds, "\n");
+ dataLog("Total Add size in bytes: ", totalAddSize, "\n");
+ dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
+ dataLog("\n");
+ dataLog("Num Muls: ", numMuls, "\n");
+ dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
+ dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
+ dataLog("\n");
+ dataLog("Num Negs: ", numNegs, "\n");
+ dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
+ dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
+ dataLog("\n");
+ dataLog("Num Subs: ", numSubs, "\n");
+ dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
+ dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
+
+ dataLog("-----------------------\n");
+#endif
+}
+
+BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
+{
+ std::unique_ptr<BytecodeLivenessAnalysis> analysis = std::make_unique<BytecodeLivenessAnalysis>(this);
+ {
+ ConcurrentJSLocker locker(m_lock);
+ if (!m_livenessAnalysis)
+ m_livenessAnalysis = WTFMove(analysis);
+ return *m_livenessAnalysis;
+ }
+}
+
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index 0d9868079..2a2966460 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -27,54 +27,49 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlock_h
-#define CodeBlock_h
+#pragma once
#include "ArrayProfile.h"
#include "ByValInfo.h"
#include "BytecodeConventions.h"
-#include "BytecodeLivenessAnalysis.h"
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeBlockHash.h"
-#include "CodeBlockSet.h"
-#include "ConcurrentJITLock.h"
#include "CodeOrigin.h"
#include "CodeType.h"
#include "CompactJITCodeMap.h"
+#include "ConcurrentJSLock.h"
#include "DFGCommon.h"
-#include "DFGCommonData.h"
#include "DFGExitProfile.h"
-#include "DFGMinifiedGraph.h"
-#include "DFGOSREntry.h"
-#include "DFGOSRExit.h"
-#include "DFGVariableEventStream.h"
#include "DeferredCompilationCallback.h"
-#include "EvalCodeCache.h"
+#include "DirectEvalCodeCache.h"
+#include "EvalExecutable.h"
#include "ExecutionCounter.h"
#include "ExpressionRangeInfo.h"
+#include "FunctionExecutable.h"
#include "HandlerInfo.h"
-#include "ObjectAllocationProfile.h"
-#include "Options.h"
-#include "Operations.h"
-#include "PutPropertySlot.h"
#include "Instruction.h"
#include "JITCode.h"
-#include "JITWriteBarrier.h"
+#include "JITMathICForwards.h"
+#include "JSCell.h"
#include "JSGlobalObject.h"
#include "JumpTable.h"
#include "LLIntCallLinkInfo.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
#include "LazyOperandValueProfile.h"
-#include "ProfilerCompilation.h"
-#include "RegExpObject.h"
-#include "StructureStubInfo.h"
+#include "ModuleProgramExecutable.h"
+#include "ObjectAllocationProfile.h"
+#include "Options.h"
+#include "ProfilerJettisonReason.h"
+#include "ProgramExecutable.h"
+#include "PutPropertySlot.h"
#include "UnconditionalFinalizer.h"
#include "ValueProfile.h"
#include "VirtualRegister.h"
#include "Watchpoint.h"
#include <wtf/Bag.h>
+#include <wtf/FastBitVector.h>
#include <wtf/FastMalloc.h>
-#include <wtf/PassOwnPtr.h>
#include <wtf/RefCountedArray.h>
#include <wtf/RefPtr.h>
#include <wtf/SegmentedVector.h>
@@ -83,33 +78,55 @@
namespace JSC {
+class BytecodeLivenessAnalysis;
+class CodeBlockSet;
class ExecState;
+class JSModuleEnvironment;
class LLIntOffsetsExtractor;
-class RepatchBuffer;
+class PCToCodeOriginMap;
+class RegisterAtOffsetList;
+class StructureStubInfo;
+
+enum class AccessType : int8_t;
-inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
+struct ArithProfile;
-static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
-class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
- WTF_MAKE_FAST_ALLOCATED;
+class CodeBlock : public JSCell {
+ typedef JSCell Base;
friend class BytecodeLivenessAnalysis;
friend class JIT;
friend class LLIntOffsetsExtractor;
+
+ class UnconditionalFinalizer : public JSC::UnconditionalFinalizer {
+ void finalizeUnconditionally() override;
+ };
+
+ class WeakReferenceHarvester : public JSC::WeakReferenceHarvester {
+ void visitWeakReferences(SlotVisitor&) override;
+ };
+
public:
enum CopyParsedBlockTag { CopyParsedBlock };
+
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ DECLARE_INFO;
+
protected:
- CodeBlock(CopyParsedBlockTag, CodeBlock& other);
-
- CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
+ CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
+ CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, RefPtr<SourceProvider>&&, unsigned sourceOffset, unsigned firstLineColumnOffset);
+
+ void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
+ void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
WriteBarrier<JSGlobalObject> m_globalObject;
- Heap* m_heap;
public:
- JS_EXPORT_PRIVATE virtual ~CodeBlock();
+ JS_EXPORT_PRIVATE ~CodeBlock();
UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
@@ -117,39 +134,74 @@ public:
CodeBlockHash hash() const;
bool hasHash() const;
bool isSafeToComputeHash() const;
+ CString hashAsStringIfPossible() const;
CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
- void dump(PrintStream&) const;
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
int numParameters() const { return m_numParameters; }
void setNumParameters(int newValue);
+ int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
+
+ int numCalleeLocals() const { return m_numCalleeLocals; }
+
int* addressOfNumParameters() { return &m_numParameters; }
static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
- CodeBlock* alternative() { return m_alternative.get(); }
- PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
- void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
+ CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
+ void setAlternative(VM&, CodeBlock*);
+
+ template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
+ {
+ Functor f(std::forward<Functor>(functor));
+ Vector<CodeBlock*, 4> codeBlocks;
+ codeBlocks.append(this);
+
+ while (!codeBlocks.isEmpty()) {
+ CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+ f(currentCodeBlock);
+
+ if (CodeBlock* alternative = currentCodeBlock->alternative())
+ codeBlocks.append(alternative);
+ if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+ codeBlocks.append(osrEntryBlock);
+ }
+ }
CodeSpecializationKind specializationKind() const
{
return specializationFromIsConstruct(m_isConstructor);
}
-
- CodeBlock* baselineAlternative();
+
+ CodeBlock* alternativeForJettison();
+ JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
// FIXME: Get rid of this.
// https://bugs.webkit.org/show_bug.cgi?id=123677
CodeBlock* baselineVersion();
- void visitAggregate(SlotVisitor&);
-
- void dumpBytecode(PrintStream& = WTF::dataFile());
- void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
+ static size_t estimatedSize(JSCell*);
+ static void visitChildren(JSCell*, SlotVisitor&);
+ void visitChildren(SlotVisitor&);
+ void visitWeakly(SlotVisitor&);
+ void clearVisitWeaklyHasBeenCalled();
+
+ void dumpSource();
+ void dumpSource(PrintStream&);
+
+ void dumpBytecode();
+ void dumpBytecode(PrintStream&);
+ void dumpBytecode(
+ PrintStream&, unsigned bytecodeOffset,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+ void dumpExceptionHandlers(PrintStream&);
void printStructures(PrintStream&, const Instruction*);
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
+ void dumpMathICStats();
+
bool isStrictMode() const { return m_isStrictMode; }
ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
@@ -169,71 +221,85 @@ public:
return index >= m_numVars;
}
- HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+ HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+ void removeExceptionHandlerForCallSite(CallSiteIndex);
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
- int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
-#if ENABLE(JIT)
- StructureStubInfo* addStubInfo();
- Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
- Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
+ std::optional<unsigned> bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
- void resetStub(StructureStubInfo&);
+ void getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result);
+ void getStubInfoMap(StubInfoMap& result);
- void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
+ void getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result);
+ void getCallLinkInfoMap(CallLinkInfoMap& result);
- ByValInfo& getByValInfo(unsigned bytecodeIndex)
- {
- return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
- }
+ void getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result);
+ void getByValInfoMap(ByValInfoMap& result);
+
+#if ENABLE(JIT)
+ StructureStubInfo* addStubInfo(AccessType);
+ JITAddIC* addJITAddIC(ArithProfile*);
+ JITMulIC* addJITMulIC(ArithProfile*);
+ JITNegIC* addJITNegIC(ArithProfile*);
+ JITSubIC* addJITSubIC(ArithProfile*);
+ Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+ Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
+
+ // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
+ // stub info.
+ StructureStubInfo* findStubInfo(CodeOrigin);
- CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
- {
- return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
- }
+ ByValInfo* addByValInfo();
- CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
- {
- ASSERT(!JITCode::isOptimizingJIT(jitType()));
- return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
- }
+ CallLinkInfo* addCallLinkInfo();
+ Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+ Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+ // This is a slow function call used primarily for compiling OSR exits in the case
+ // that there had been inlining. Chances are if you want to use this, you're really
+ // looking for a CallLinkInfoMap to amortize the cost of calling this.
+ CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+
+ // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
+ // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
+ // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
+ // would be able to get rid of this silly function.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
+ void resetJITData();
#endif // ENABLE(JIT)
void unlinkIncomingCalls();
#if ENABLE(JIT)
- void unlinkCalls();
-
void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
-
- bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
- {
- return m_incomingCalls.isOnList(incoming);
- }
+ void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
#endif // ENABLE(JIT)
-#if ENABLE(LLINT)
void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#endif // ENABLE(LLINT)
- void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap)
{
- m_jitCodeMap = jitCodeMap;
+ m_jitCodeMap = WTFMove(jitCodeMap);
}
CompactJITCodeMap* jitCodeMap()
{
return m_jitCodeMap.get();
}
+ static void clearLLIntGetByIdCache(Instruction*);
+
unsigned bytecodeOffset(Instruction* returnAddress)
{
RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
- bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
+ typedef JSC::Instruction Instruction;
+ typedef RefCountedArray<Instruction>& UnpackedInstructions;
unsigned numberOfInstructions() const { return m_instructions.size(); }
RefCountedArray<Instruction>& instructions() { return m_instructions; }
@@ -245,28 +311,19 @@ public:
unsigned instructionCount() const { return m_instructions.size(); }
- int argumentIndexAfterCapture(size_t argument);
-
- bool hasSlowArguments();
- const SlowArgument* machineSlowArguments();
-
- // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
- void install();
-
// Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
- PassRefPtr<CodeBlock> newReplacement();
+ CodeBlock* newReplacement();
- void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
+ void setJITCode(Ref<JITCode>&& code)
{
- ASSERT(m_heap->isDeferred());
- m_heap->reportExtraMemoryCost(code->size());
- ConcurrentJITLocker locker(m_lock);
+ ASSERT(heap()->isDeferred());
+ heap()->reportExtraMemoryAllocated(code->size());
+ ConcurrentJSLocker locker(m_lock);
WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
- m_jitCode = code;
- m_jitCodeWithArityCheck = codeWithArityCheck;
+ m_jitCode = WTFMove(code);
}
- PassRefPtr<JITCode> jitCode() { return m_jitCode; }
- MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ RefPtr<JITCode> jitCode() { return m_jitCode; }
+ static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
JITCode::JITType jitType() const
{
JITCode* jitCode = m_jitCode.get();
@@ -282,103 +339,44 @@ public:
}
#if ENABLE(JIT)
- virtual CodeBlock* replacement() = 0;
+ CodeBlock* replacement();
- virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
- DFG::CapabilityLevel capabilityLevel()
- {
- DFG::CapabilityLevel result = capabilityLevelInternal();
- m_capabilityLevelState = result;
- return result;
- }
- DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
+ DFG::CapabilityLevel computeCapabilityLevel();
+ DFG::CapabilityLevel capabilityLevel();
+ DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
- void jettison(ReoptimizationMode = DontCountReoptimization);
+ void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
- ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); }
+ ScriptExecutable* ownerScriptExecutable() const { return jsCast<ScriptExecutable*>(m_ownerExecutable.get()); }
- void setVM(VM* vm) { m_vm = vm; }
- VM* vm() { return m_vm; }
+ VM* vm() const { return m_vm; }
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
VirtualRegister thisRegister() const { return m_thisRegister; }
- bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
bool usesEval() const { return m_unlinkedCode->usesEval(); }
- void setArgumentsRegister(VirtualRegister argumentsRegister)
- {
- ASSERT(argumentsRegister.isValid());
- m_argumentsRegister = argumentsRegister;
- ASSERT(usesArguments());
- }
- VirtualRegister argumentsRegister() const
- {
- ASSERT(usesArguments());
- return m_argumentsRegister;
- }
- VirtualRegister uncheckedArgumentsRegister()
- {
- if (!usesArguments())
- return VirtualRegister();
- return argumentsRegister();
- }
- void setActivationRegister(VirtualRegister activationRegister)
- {
- m_activationRegister = activationRegister;
- }
-
- VirtualRegister activationRegister() const
- {
- ASSERT(needsFullScopeChain());
- return m_activationRegister;
- }
-
- VirtualRegister uncheckedActivationRegister()
+ void setScopeRegister(VirtualRegister scopeRegister)
{
- if (!needsFullScopeChain())
- return VirtualRegister();
- return activationRegister();
+ ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
+ m_scopeRegister = scopeRegister;
}
- bool usesArguments() const { return m_argumentsRegister.isValid(); }
-
- bool needsActivation() const
+ VirtualRegister scopeRegister() const
{
- return m_needsActivation;
+ return m_scopeRegister;
}
- unsigned captureCount() const
+ CodeType codeType() const
{
- if (!symbolTable())
- return 0;
- return symbolTable()->captureCount();
- }
-
- int captureStart() const
- {
- if (!symbolTable())
- return 0;
- return symbolTable()->captureStart();
- }
-
- int captureEnd() const
- {
- if (!symbolTable())
- return 0;
- return symbolTable()->captureEnd();
+ return static_cast<CodeType>(m_codeType);
}
- bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
-
- int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
- int framePointerOffsetToGetActivationRegisters();
-
- CodeType codeType() const { return m_unlinkedCode->codeType(); }
PutPropertySlot::Context putByIdContext() const
{
if (codeType() == EvalCode)
@@ -393,20 +391,8 @@ public:
size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
- void clearEvalCache();
-
String nameForRegister(VirtualRegister);
-#if ENABLE(JIT)
- void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
- size_t numberOfByValInfos() const { return m_byValInfos.size(); }
- ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
-
- void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.resizeToFit(size); }
- size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
- CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
-#endif
-
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
@@ -422,20 +408,12 @@ public:
unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
- ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
- {
- ValueProfile* result = binarySearch<ValueProfile, int>(
- m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
- getValueProfileBytecodeOffset<ValueProfile>);
- ASSERT(result->m_bytecodeOffset != -1);
- ASSERT(instructions()[bytecodeOffset + opcodeLength(
- m_vm->interpreter->getOpcodeID(
- instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
- return result;
- }
- SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
+ SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
{
- return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
+ if (ValueProfile* valueProfile = valueProfileForBytecodeOffset(bytecodeOffset))
+ return valueProfile->computeUpdatedPrediction(locker);
+ return SpecNone;
}
unsigned totalNumberOfValueProfiles()
@@ -449,25 +427,16 @@ public:
return valueProfile(index - numberOfArgumentValueProfiles());
}
- RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
- {
- m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_rareCaseProfiles.last();
- }
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
- RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+ unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
bool likelyToTakeSlowCase(int bytecodeOffset)
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::likelyToTakeSlowCaseMinimumCount();
}
@@ -475,68 +444,22 @@ public:
{
if (!hasBaselineJITProfiling())
return false;
- unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
return value >= Options::couldTakeSlowCaseMinimumCount();
}
- RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
- {
- m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_specialFastCaseProfiles.last();
- }
- unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
- RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
- RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
- {
- return tryBinarySearch<RareCaseProfile, int>(
- m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
- }
+ ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset);
+ ArithProfile* arithProfileForPC(Instruction*);
- bool likelyToTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool couldTakeSpecialFastCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
- }
-
- bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount - specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool likelyToTakeAnySlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
- unsigned value = slowCaseCount + specialFastCaseCount;
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
+ bool couldTakeSpecialFastCase(int bytecodeOffset);
unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
- ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
- {
- m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
- return &m_arrayProfiles.last();
- }
+ ArrayProfile* addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+ ArrayProfile* addArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+ ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
// Exception handling support
@@ -547,10 +470,7 @@ public:
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
#if ENABLE(DFG_JIT)
- Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
- {
- return m_jitCode->dfgCommon()->codeOrigins;
- }
+ Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins();
// Having code origins implies that there has been some inlining.
bool hasCodeOrigins()
@@ -558,30 +478,34 @@ public:
return JITCode::isOptimizingJIT(jitType());
}
- bool canGetCodeOrigin(unsigned index)
+ bool canGetCodeOrigin(CallSiteIndex index)
{
if (!hasCodeOrigins())
return false;
- return index < codeOrigins().size();
+ return index.bits() < codeOrigins().size();
}
- CodeOrigin codeOrigin(unsigned index)
+ CodeOrigin codeOrigin(CallSiteIndex index)
{
- return codeOrigins()[index];
+ return codeOrigins()[index.bits()];
}
bool addFrequentExitSite(const DFG::FrequentExitSite& site)
{
ASSERT(JITCode::isBaselineCode(jitType()));
- ConcurrentJITLocker locker(m_lock);
- return m_exitProfile.add(locker, site);
+ ConcurrentJSLocker locker(m_lock);
+ return m_exitProfile.add(locker, this, site);
}
-
- bool hasExitSite(const DFG::FrequentExitSite& site) const
+
+ bool hasExitSite(const ConcurrentJSLocker& locker, const DFG::FrequentExitSite& site) const
{
- ConcurrentJITLocker locker(m_lock);
return m_exitProfile.hasExitSite(locker, site);
}
+ bool hasExitSite(const DFG::FrequentExitSite& site) const
+ {
+ ConcurrentJSLocker locker(m_lock);
+ return hasExitSite(locker, site);
+ }
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
@@ -589,44 +513,26 @@ public:
{
return m_lazyOperandValueProfiles;
}
-#else // ENABLE(DFG_JIT)
- bool addFrequentExitSite(const DFG::FrequentExitSite&)
- {
- return false;
- }
#endif // ENABLE(DFG_JIT)
// Constant Pool
#if ENABLE(DFG_JIT)
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
- size_t numberOfDFGIdentifiers() const
- {
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
-
- return m_jitCode->dfgCommon()->dfgIdentifiers.size();
- }
-
- const Identifier& identifier(int index) const
- {
- size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
- if (static_cast<unsigned>(index) < unlinkedIdentifiers)
- return m_unlinkedCode->identifier(index);
- ASSERT(JITCode::isOptimizingJIT(jitType()));
- return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
- }
+ size_t numberOfDFGIdentifiers() const;
+ const Identifier& identifier(int index) const;
#else
size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+ Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
unsigned addConstant(JSValue v)
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
+ m_constantRegisters.last().set(m_globalObject->vm(), this, v);
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
@@ -634,19 +540,19 @@ public:
{
unsigned result = m_constantRegisters.size();
m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
- bool findConstant(JSValue, unsigned& result);
- unsigned addOrFindConstant(JSValue);
WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
- ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+ static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
int numberOfFunctionDecls() { return m_functionDecls.size(); }
FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
-
+
RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
unsigned numberOfConstantBuffers() const
@@ -673,15 +579,19 @@ public:
return constantBufferAsVector(index).data();
}
+ Heap* heap() const { return &m_vm->heap; }
JSGlobalObject* globalObject() { return m_globalObject.get(); }
JSGlobalObject* globalObjectFor(CodeOrigin);
BytecodeLivenessAnalysis& livenessAnalysis()
{
- if (!m_livenessAnalysis)
- m_livenessAnalysis = std::make_unique<BytecodeLivenessAnalysis>(this);
- return *m_livenessAnalysis;
+ {
+ ConcurrentJSLocker locker(m_lock);
+ if (!!m_livenessAnalysis)
+ return *m_livenessAnalysis;
+ }
+ return livenessAnalysisSlow();
}
void validate();
@@ -702,10 +612,7 @@ public:
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
-
- SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
- EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+ DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
enum ShrinkMode {
// Shrink prior to generating machine code that may point directly into vectors.
@@ -731,21 +638,18 @@ public:
m_llintExecuteCounter.deferIndefinitely();
}
- void jitAfterWarmUp()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
- }
-
- void jitSoon()
- {
- m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
- }
+ int32_t thresholdForJIT(int32_t threshold);
+ void jitAfterWarmUp();
+ void jitSoon();
- const ExecutionCounter& llintExecuteCounter() const
+ const BaselineExecutionCounter& llintExecuteCounter() const
{
return m_llintExecuteCounter;
}
+ typedef HashMap<Structure*, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
+ StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
+
// Functions for controlling when tiered compilation kicks in. This
// controls both when the optimizing compiler is invoked and when OSR
// entry happens. Two triggers exist: the loop trigger and the return
@@ -767,9 +671,13 @@ public:
// When we observe a lot of speculation failures, we trigger a
// reoptimization. But each time, we increase the optimization trigger
// to avoid thrashing.
- unsigned reoptimizationRetryCounter() const;
+ JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
void countReoptimization();
#if ENABLE(JIT)
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
+ size_t calleeSaveSpaceAsVirtualRegisters();
+
unsigned numberOfDFGCompiles();
int32_t codeTypeThresholdMultiplier() const;
@@ -781,11 +689,11 @@ public:
return &m_jitExecuteCounter.m_counter;
}
- static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
- static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
- static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
+ static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+ static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+ static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
- const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+ const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
@@ -855,7 +763,14 @@ public:
uint32_t exitCountThresholdForReoptimizationFromLoop();
bool shouldReoptimizeNow();
bool shouldReoptimizeFromLoopNow();
+
+ void setCalleeSaveRegisters(RegisterSet);
+ void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
+
+ RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); }
#else // No JIT
+ static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
+ static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };
void optimizeAfterWarmUp() { }
unsigned numberOfDFGCompiles() { return 0; }
#endif
@@ -866,10 +781,11 @@ public:
void updateAllPredictions();
unsigned frameRegisterCount();
+ int stackPointerOffset();
bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
- int hasDebuggerRequests() const { return !!m_debuggerRequests; }
+ bool hasDebuggerRequests() const { return m_debuggerRequests; }
void* debuggerRequestsAddress() { return &m_debuggerRequests; }
void addBreakpoint(unsigned numBreakpoints);
@@ -885,13 +801,18 @@ public:
};
void setSteppingMode(SteppingMode);
- void clearDebuggerRequests() { m_debuggerRequests = 0; }
+ void clearDebuggerRequests()
+ {
+ m_steppingMode = SteppingModeDisabled;
+ m_numBreakpoints = 0;
+ }
+ bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
+
// FIXME: Make these remaining members private.
- int m_numCalleeRegisters;
+ int m_numCalleeLocals;
int m_numVars;
- bool m_isConstructor;
// This is intentionally public; it's the responsibility of anyone doing any
// of the following to hold the lock:
@@ -909,21 +830,67 @@ public:
// Another exception to the rules is that the GC can do whatever it wants
// without holding any locks, because the GC is guaranteed to wait until any
// concurrent compilation threads finish what they're doing.
- mutable ConcurrentJITLock m_lock;
-
- bool m_shouldAlwaysBeInlined;
- bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
-
- bool m_didFailFTLCompilation;
+ mutable ConcurrentJSLock m_lock;
+
+ bool m_visitWeaklyHasBeenCalled;
+
+ bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+
+#if ENABLE(JIT)
+ unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
+#endif
+
+ bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+ bool m_didFailJITCompilation : 1;
+ bool m_didFailFTLCompilation : 1;
+ bool m_hasBeenCompiledWithFTL : 1;
+ bool m_isConstructor : 1;
+ bool m_isStrictMode : 1;
+ unsigned m_codeType : 2; // CodeType
// Internal methods for use by validation code. It would be private if it wasn't
// for the fact that we use it from anonymous namespaces.
void beginValidationDidFail();
NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
+
+ // Buffers used for large array literals
+ Vector<Vector<JSValue>> m_constantBuffers;
+
+ // Jump Tables
+ Vector<SimpleJumpTable> m_switchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+ DirectEvalCodeCache m_directEvalCodeCache;
+ };
+
+ void clearExceptionHandlers()
+ {
+ if (m_rareData)
+ m_rareData->m_exceptionHandlers.clear();
+ }
+
+ void appendExceptionHandler(const HandlerInfo& handler)
+ {
+ createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
+ m_rareData->m_exceptionHandlers.append(handler);
+ }
+
+ CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
+#if ENABLE(JIT)
+ void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
+ std::optional<CodeOrigin> findPC(void* pc);
+#endif
+
protected:
- virtual void visitWeakReferences(SlotVisitor&) override;
- virtual void finalizeUnconditionally() override;
+ void finalizeLLIntInlineCaches();
+ void finalizeBaselineJITInlineCaches();
#if ENABLE(DFG_JIT)
void tallyFrequentExitSites();
@@ -933,6 +900,8 @@ protected:
private:
friend class CodeBlockSet;
+
+ BytecodeLivenessAnalysis& livenessAnalysisSlow();
CodeBlock* specialOSREntryBlockOrNull();
@@ -940,299 +909,147 @@ private:
double optimizationThresholdScalingFactor();
-#if ENABLE(JIT)
- ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
-#endif
-
void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
- void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
+ void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation);
+
+ void replaceConstant(int index, JSValue value)
{
- size_t count = constants.size();
- m_constantRegisters.resize(count);
- for (size_t i = 0; i < count; i++)
- m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
+ ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+ m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value);
}
- void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
+ void dumpBytecode(
+ PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+ const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
CString registerName(int r) const;
+ CString constantName(int index) const;
void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
enum CacheDumpMode { DumpCaches, DontDumpCaches };
- void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
+ void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
- void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
- {
- out.printf("[%4d] %-17s ", location, op);
- }
-
- void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
- {
- printLocationAndOp(out, exec, location, it, op);
- out.printf("%s", registerName(operand).data());
- }
+ void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&);
+ void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+ void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
-
-#if ENABLE(DFG_JIT)
- bool shouldImmediatelyAssumeLivenessDuringScan()
- {
- // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
- // their weak references go stale. So if a basline JIT CodeBlock gets
- // scanned, we can assume that this means that it's live.
- if (!JITCode::isOptimizingJIT(jitType()))
- return true;
-
- // For simplicity, we don't attempt to jettison code blocks during GC if
- // they are executing. Instead we strongly mark their weak references to
- // allow them to continue to execute soundly.
- if (m_mayBeExecuting)
- return true;
-
- if (Options::forceDFGCodeBlockLiveness())
- return true;
+ void dumpArithProfile(PrintStream&, ArithProfile*, bool& hasPrintedProfiling);
- return false;
- }
-#else
- bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
-#endif
+ bool shouldVisitStrongly(const ConcurrentJSLocker&);
+ bool shouldJettisonDueToWeakReference();
+ bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
- void propagateTransitions(SlotVisitor&);
- void determineLiveness(SlotVisitor&);
+ void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
+ void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
- void stronglyVisitStrongReferences(SlotVisitor&);
- void stronglyVisitWeakReferences(SlotVisitor&);
+ void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
+ void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
+ void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
+
+ std::chrono::milliseconds timeSinceCreation()
+ {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::steady_clock::now() - m_creationTime);
+ }
void createRareDataIfNecessary()
{
if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
+ m_rareData = std::make_unique<RareData>();
}
-
-#if ENABLE(JIT)
- void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
- void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
-#endif
+
+ void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&);
+
WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
int m_numParameters;
+ int m_numberOfArgumentsToSkip { 0 };
union {
unsigned m_debuggerRequests;
struct {
+ unsigned m_hasDebuggerStatement : 1;
unsigned m_steppingMode : 1;
- unsigned m_numBreakpoints : 31;
+ unsigned m_numBreakpoints : 30;
};
};
- WriteBarrier<ScriptExecutable> m_ownerExecutable;
+ WriteBarrier<ExecutableBase> m_ownerExecutable;
VM* m_vm;
RefCountedArray<Instruction> m_instructions;
- WriteBarrier<SymbolTable> m_symbolTable;
VirtualRegister m_thisRegister;
- VirtualRegister m_argumentsRegister;
- VirtualRegister m_activationRegister;
-
- bool m_isStrictMode;
- bool m_needsActivation;
- bool m_mayBeExecuting;
- uint8_t m_visitAggregateHasBeenCalled;
+ VirtualRegister m_scopeRegister;
+ mutable CodeBlockHash m_hash;
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
unsigned m_firstLineColumnOffset;
- unsigned m_codeType;
-#if ENABLE(LLINT)
- Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
+ RefCountedArray<LLIntCallLinkInfo> m_llintCallLinkInfos;
SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
-#endif
+ StructureWatchpointMap m_llintGetByIdWatchpointMap;
RefPtr<JITCode> m_jitCode;
- MacroAssemblerCodePtr m_jitCodeWithArityCheck;
#if ENABLE(JIT)
+ std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
Bag<StructureStubInfo> m_stubInfos;
- Vector<ByValInfo> m_byValInfos;
- Vector<CallLinkInfo> m_callLinkInfos;
+ Bag<JITAddIC> m_addICs;
+ Bag<JITMulIC> m_mulICs;
+ Bag<JITNegIC> m_negICs;
+ Bag<JITSubIC> m_subICs;
+ Bag<ByValInfo> m_byValInfos;
+ Bag<CallLinkInfo> m_callLinkInfos;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
+ SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
+ std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
#endif
- OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+ std::unique_ptr<CompactJITCodeMap> m_jitCodeMap;
#if ENABLE(DFG_JIT)
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
DFG::ExitProfile m_exitProfile;
CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
- Vector<ValueProfile> m_argumentValueProfiles;
- Vector<ValueProfile> m_valueProfiles;
+ RefCountedArray<ValueProfile> m_argumentValueProfiles;
+ RefCountedArray<ValueProfile> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
- SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
- Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
+ RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles;
ArrayProfileVector m_arrayProfiles;
- Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
+ RefCountedArray<ObjectAllocationProfile> m_objectAllocationProfiles;
// Constant Pool
COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
// TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
// it, so we're stuck with it for now.
Vector<WriteBarrier<Unknown>> m_constantRegisters;
- Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
- Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
+ Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
+ RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
- RefPtr<CodeBlock> m_alternative;
+ WriteBarrier<CodeBlock> m_alternative;
- ExecutionCounter m_llintExecuteCounter;
+ BaselineExecutionCounter m_llintExecuteCounter;
- ExecutionCounter m_jitExecuteCounter;
- int32_t m_totalJITExecutions;
+ BaselineExecutionCounter m_jitExecuteCounter;
uint32_t m_osrExitCounter;
uint16_t m_optimizationDelayCounter;
uint16_t m_reoptimizationRetryCounter;
-
- mutable CodeBlockHash m_hash;
-
- std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
-
- struct RareData {
- WTF_MAKE_FAST_ALLOCATED;
- public:
- Vector<HandlerInfo> m_exceptionHandlers;
-
- // Buffers used for large array literals
- Vector<Vector<JSValue>> m_constantBuffers;
-
- // Jump Tables
- Vector<SimpleJumpTable> m_switchJumpTables;
- Vector<StringJumpTable> m_stringSwitchJumpTables;
-
- EvalCodeCache m_evalCodeCache;
- };
-#if COMPILER(MSVC)
- friend void WTF::deleteOwnedPtr<RareData>(RareData*);
-#endif
- OwnPtr<RareData> m_rareData;
-#if ENABLE(JIT)
- DFG::CapabilityLevel m_capabilityLevelState;
-#endif
-};
-
-// Program code is not marked by any function, so we make the global object
-// responsible for marking it.
-
-class GlobalCodeBlock : public CodeBlock {
-protected:
- GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
-
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
- {
- }
-};
-
-class ProgramCodeBlock : public GlobalCodeBlock {
-public:
- ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
- ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
- {
- }
+ std::chrono::steady_clock::time_point m_creationTime;
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-};
-
-class EvalCodeBlock : public GlobalCodeBlock {
-public:
- EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
- : GlobalCodeBlock(CopyParsedBlock, other)
- {
- }
-
- EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
- : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
- {
- }
-
- const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
- unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
-
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
-
-private:
- UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
-};
+ std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
-class FunctionCodeBlock : public CodeBlock {
-public:
- FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
- : CodeBlock(CopyParsedBlock, other)
- {
- }
+ std::unique_ptr<RareData> m_rareData;
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
- {
- }
-
-#if ENABLE(JIT)
-protected:
- virtual CodeBlock* replacement() override;
- virtual DFG::CapabilityLevel capabilityLevelInternal() override;
-#endif
+ UnconditionalFinalizer m_unconditionalFinalizer;
+ WeakReferenceHarvester m_weakReferenceHarvester;
};
-inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
-{
- RELEASE_ASSERT(inlineCallFrame);
- ExecutableBase* executable = inlineCallFrame->executable.get();
- RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
-}
-
-inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
-{
- if (codeOrigin.inlineCallFrame)
- return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
- return baselineCodeBlock;
-}
-
-inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
-{
- if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
- return CallFrame::argumentOffset(argument);
-
- const SlowArgument* slowArguments = symbolTable()->slowArguments();
- if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
- return CallFrame::argumentOffset(argument);
-
- ASSERT(slowArguments[argument].status == SlowArgument::Captured);
- return slowArguments[argument].index;
-}
-
-inline bool CodeBlock::hasSlowArguments()
-{
- return !!symbolTable()->slowArguments();
-}
-
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
@@ -1241,44 +1058,47 @@ inline Register& ExecState::r(int index)
return this[index];
}
+inline Register& ExecState::r(VirtualRegister reg)
+{
+ return r(reg.offset());
+}
+
inline Register& ExecState::uncheckedR(int index)
{
RELEASE_ASSERT(index < FirstConstantRegisterIndex);
return this[index];
}
-inline JSValue ExecState::argumentAfterCapture(size_t argument)
+inline Register& ExecState::uncheckedR(VirtualRegister reg)
{
- if (argument >= argumentCount())
- return jsUndefined();
-
- if (!codeBlock())
- return this[argumentOffset(argument)].jsValue();
-
- return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
+ return uncheckedR(reg.offset());
}
-inline void CodeBlockSet::mark(void* candidateCodeBlock)
+inline void CodeBlock::clearVisitWeaklyHasBeenCalled()
{
- // We have to check for 0 and -1 because those are used by the HashMap as markers.
- uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-
- // This checks for both of those nasty cases in one go.
- // 0 + 1 = 1
- // -1 + 1 = 0
- if (value + 1 <= 1)
- return;
-
- HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
- if (iter == m_set.end())
- return;
-
- (*iter)->m_mayBeExecuting = true;
-#if ENABLE(GGC)
- m_currentlyExecuting.append(static_cast<CodeBlock*>(candidateCodeBlock));
-#endif
+ m_visitWeaklyHasBeenCalled = false;
}
-} // namespace JSC
+template <typename ExecutableType>
+JSObject* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
+{
+ if (hasJITCodeFor(kind)) {
+ if (std::is_same<ExecutableType, EvalExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<EvalExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, ProgramExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, ModuleProgramExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->codeBlock());
+ else if (std::is_same<ExecutableType, FunctionExecutable>::value)
+ resultCodeBlock = jsCast<CodeBlock*>(jsCast<FunctionExecutable*>(this)->codeBlockFor(kind));
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+ }
+ return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
+}
-#endif // CodeBlock_h
+#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
+ (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.h b/Source/JavaScriptCore/bytecode/CodeBlockHash.h
index 4e3398867..b828fe808 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockHash.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlockHash_h
-#define CodeBlockHash_h
+#pragma once
#include "CodeSpecializationKind.h"
#include <wtf/PrintStream.h>
@@ -77,5 +76,3 @@ private:
};
} // namespace JSC
-
-#endif // CodeBlockHash_h
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
index be50c9778..50cf7378d 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,18 +28,16 @@
#include "CodeBlock.h"
#include "DFGCommon.h"
+#include "JSCInlines.h"
namespace JSC {
-void CodeBlockJettisoningWatchpoint::fireInternal()
+void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail)
{
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n");
- m_codeBlock->jettison(CountReoptimization);
-
- if (isOnList())
- remove();
+ m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
index 89d87f4d0..635cd78ca 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlockJettisoningWatchpoint_h
-#define CodeBlockJettisoningWatchpoint_h
+#pragma once
#include "Watchpoint.h"
@@ -34,24 +33,16 @@ class CodeBlock;
class CodeBlockJettisoningWatchpoint : public Watchpoint {
public:
- CodeBlockJettisoningWatchpoint()
- : m_codeBlock(0)
- {
- }
-
CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock)
: m_codeBlock(codeBlock)
{
}
protected:
- virtual void fireInternal() override;
+ void fireInternal(const FireDetail&) override;
private:
CodeBlock* m_codeBlock;
};
} // namespace JSC
-
-#endif // CodeBlockJettisoningWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
index d87085841..37f83c4b1 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeBlockWithJITType_h
-#define CodeBlockWithJITType_h
+#pragma once
#include "CodeBlock.h"
@@ -51,6 +50,3 @@ private:
};
} // namespace JSC
-
-#endif // CodeBlockWithJITType_h
-
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
index 39b83fead..a52df924f 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,15 +28,15 @@
#include "CallFrame.h"
#include "CodeBlock.h"
-#include "Executable.h"
-#include "Operations.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
namespace JSC {
unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
{
unsigned result = 1;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
result++;
return result;
}
@@ -45,18 +45,90 @@ unsigned CodeOrigin::inlineDepth() const
{
return inlineDepthForCallFrame(inlineCallFrame);
}
+
+bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const
+{
+ CodeOrigin a = *this;
+ CodeOrigin b = other;
+
+ if (!a.isSet())
+ return !b.isSet();
+ if (!b.isSet())
+ return false;
+
+ if (a.isHashTableDeletedValue())
+ return b.isHashTableDeletedValue();
+ if (b.isHashTableDeletedValue())
+ return false;
+ for (;;) {
+ ASSERT(a.isSet());
+ ASSERT(b.isSet());
+
+ if (a.bytecodeIndex != b.bytecodeIndex)
+ return false;
+
+ if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame))
+ return false;
+
+ if (!a.inlineCallFrame)
+ return true;
+
+ if (a.inlineCallFrame->baselineCodeBlock.get() != b.inlineCallFrame->baselineCodeBlock.get())
+ return false;
+
+ a = a.inlineCallFrame->directCaller;
+ b = b.inlineCallFrame->directCaller;
+ }
+}
+
+unsigned CodeOrigin::approximateHash() const
+{
+ if (!isSet())
+ return 0;
+ if (isHashTableDeletedValue())
+ return 1;
+
+ unsigned result = 2;
+ CodeOrigin codeOrigin = *this;
+ for (;;) {
+ result += codeOrigin.bytecodeIndex;
+
+ if (!codeOrigin.inlineCallFrame)
+ return result;
+
+ result += WTF::PtrHash<JSCell*>::hash(codeOrigin.inlineCallFrame->baselineCodeBlock.get());
+
+ codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+ }
+}
+
Vector<CodeOrigin> CodeOrigin::inlineStack() const
{
Vector<CodeOrigin> result(inlineDepth());
result.last() = *this;
unsigned index = result.size() - 2;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
- result[index--] = current->caller;
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+ result[index--] = current->directCaller;
RELEASE_ASSERT(!result[0].inlineCallFrame);
return result;
}
+CodeBlock* CodeOrigin::codeOriginOwner() const
+{
+ if (!inlineCallFrame)
+ return 0;
+ return inlineCallFrame->baselineCodeBlock.get();
+}
+
+int CodeOrigin::stackOffset() const
+{
+ if (!inlineCallFrame)
+ return 0;
+
+ return inlineCallFrame->stackOffset;
+}
+
void CodeOrigin::dump(PrintStream& out) const
{
if (!isSet()) {
@@ -70,7 +142,7 @@ void CodeOrigin::dump(PrintStream& out) const
out.print(" --> ");
if (InlineCallFrame* frame = stack[i].inlineCallFrame) {
- out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> ");
+ out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->baselineCodeBlock.get()), "> ");
if (frame->isClosureCall)
out.print("(closure) ");
}
@@ -84,51 +156,4 @@ void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const
dump(out);
}
-JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
-{
- return jsCast<JSFunction*>(calleeRecovery.recover(exec));
-}
-
-CodeBlockHash InlineCallFrame::hash() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->codeBlockFor(
- specializationKind())->hash();
-}
-
-CString InlineCallFrame::inferredName() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->inferredName().utf8();
-}
-
-CodeBlock* InlineCallFrame::baselineCodeBlock() const
-{
- return jsCast<FunctionExecutable*>(executable.get())->baselineCodeBlockFor(specializationKind());
-}
-
-void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
-{
- out.print(inferredName(), "#", hash());
-}
-
-void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
-{
- out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
- if (executable->isStrictMode())
- out.print(" (StrictMode)");
- out.print(", bc#", caller.bytecodeIndex, ", ", specializationKind());
- if (isClosureCall)
- out.print(", closure call");
- else
- out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
- out.print(", numArgs+this = ", arguments.size());
- out.print(", stack < loc", VirtualRegister(stackOffset).toLocal());
- out.print(">");
-}
-
-void InlineCallFrame::dump(PrintStream& out) const
-{
- dumpInContext(out, 0);
-}
-
} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h
index ed660c247..38712f964 100644
--- a/Source/JavaScriptCore/bytecode/CodeOrigin.h
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,15 +23,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeOrigin_h
-#define CodeOrigin_h
+#pragma once
+#include "CallMode.h"
#include "CodeBlockHash.h"
#include "CodeSpecializationKind.h"
-#include "JSFunction.h"
-#include "ValueRecovery.h"
#include "WriteBarrier.h"
-#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
#include <wtf/PrintStream.h>
#include <wtf/StdLibExtras.h>
@@ -39,10 +36,9 @@
namespace JSC {
+class CodeBlock;
+struct DumpContext;
struct InlineCallFrame;
-class ExecState;
-class ScriptExecutable;
-class JSFunction;
struct CodeOrigin {
static const unsigned invalidBytecodeIndex = UINT_MAX;
@@ -63,7 +59,7 @@ struct CodeOrigin {
CodeOrigin(WTF::HashTableDeletedValueType)
: bytecodeIndex(invalidBytecodeIndex)
- , inlineCallFrame(bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1)))
+ , inlineCallFrame(deletedMarker())
{
}
@@ -75,6 +71,7 @@ struct CodeOrigin {
}
bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; }
+ explicit operator bool() const { return isSet(); }
bool isHashTableDeletedValue() const
{
@@ -87,7 +84,7 @@ struct CodeOrigin {
// If the code origin corresponds to inlined code, gives you the heap object that
// would have owned the code if it had not been inlined. Otherwise returns 0.
- ScriptExecutable* codeOriginOwner() const;
+ CodeBlock* codeOriginOwner() const;
int stackOffset() const;
@@ -97,69 +94,28 @@ struct CodeOrigin {
bool operator==(const CodeOrigin& other) const;
bool operator!=(const CodeOrigin& other) const { return !(*this == other); }
+ // This checks if the two code origins correspond to the same stack trace snippets,
+ // but ignore whether the InlineCallFrame's are identical.
+ bool isApproximatelyEqualTo(const CodeOrigin& other) const;
+
+ unsigned approximateHash() const;
+
+ template <typename Function>
+ void walkUpInlineStack(const Function&);
+
// Get the inline stack. This is slow, and is intended for debugging only.
Vector<CodeOrigin> inlineStack() const;
- void dump(PrintStream&) const;
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
void dumpInContext(PrintStream&, DumpContext*) const;
-};
-
-struct InlineCallFrame {
- Vector<ValueRecovery> arguments;
- WriteBarrier<ScriptExecutable> executable;
- ValueRecovery calleeRecovery;
- CodeOrigin caller;
- BitVector capturedVars; // Indexed by the machine call frame's variable numbering.
- signed stackOffset : 30;
- bool isCall : 1;
- bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
- VirtualRegister argumentsRegister; // This is only set if the code uses arguments. The unmodified arguments register follows the unmodifiedArgumentsRegister() convention (see CodeBlock.h).
-
- // There is really no good notion of a "default" set of values for
- // InlineCallFrame's fields. This constructor is here just to reduce confusion if
- // we forgot to initialize explicitly.
- InlineCallFrame()
- : stackOffset(0)
- , isCall(false)
- , isClosureCall(false)
- {
- }
-
- CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); }
- JSFunction* calleeConstant() const
+private:
+ static InlineCallFrame* deletedMarker()
{
- if (calleeRecovery.isConstant())
- return jsCast<JSFunction*>(calleeRecovery.constant());
- return 0;
+ return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1));
}
-
- // Get the callee given a machine call frame to which this InlineCallFrame belongs.
- JSFunction* calleeForCallFrame(ExecState*) const;
-
- CString inferredName() const;
- CodeBlockHash hash() const;
-
- CodeBlock* baselineCodeBlock() const;
-
- ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
- ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
-
- void dumpBriefFunctionInformation(PrintStream&) const;
- void dump(PrintStream&) const;
- void dumpInContext(PrintStream&, DumpContext*) const;
-
- MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
};
-inline int CodeOrigin::stackOffset() const
-{
- if (!inlineCallFrame)
- return 0;
-
- return inlineCallFrame->stackOffset;
-}
-
inline unsigned CodeOrigin::hash() const
{
return WTF::IntHash<unsigned>::hash(bytecodeIndex) +
@@ -171,13 +127,6 @@ inline bool CodeOrigin::operator==(const CodeOrigin& other) const
return bytecodeIndex == other.bytecodeIndex
&& inlineCallFrame == other.inlineCallFrame;
}
-
-inline ScriptExecutable* CodeOrigin::codeOriginOwner() const
-{
- if (!inlineCallFrame)
- return 0;
- return inlineCallFrame->executable.get();
-}
struct CodeOriginHash {
static unsigned hash(const CodeOrigin& key) { return key.hash(); }
@@ -185,6 +134,12 @@ struct CodeOriginHash {
static const bool safeToCompareToEmptyOrDeleted = true;
};
+struct CodeOriginApproximateHash {
+ static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); }
+ static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
} // namespace JSC
namespace WTF {
@@ -200,6 +155,3 @@ template<> struct HashTraits<JSC::CodeOrigin> : SimpleClassHashTraits<JSC::CodeO
};
} // namespace WTF
-
-#endif // CodeOrigin_h
-
diff --git a/Source/JavaScriptCore/bytecode/CodeType.cpp b/Source/JavaScriptCore/bytecode/CodeType.cpp
index 8b2cad56a..0c7043dfa 100644
--- a/Source/JavaScriptCore/bytecode/CodeType.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeType.cpp
@@ -42,6 +42,9 @@ void printInternal(PrintStream& out, JSC::CodeType codeType)
case JSC::FunctionCode:
out.print("Function");
return;
+ case JSC::ModuleCode:
+ out.print("Module");
+ return;
default:
CRASH();
return;
diff --git a/Source/JavaScriptCore/bytecode/CodeType.h b/Source/JavaScriptCore/bytecode/CodeType.h
index 04afc1109..3c38ca21a 100644
--- a/Source/JavaScriptCore/bytecode/CodeType.h
+++ b/Source/JavaScriptCore/bytecode/CodeType.h
@@ -23,14 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CodeType_h
-#define CodeType_h
-
-#include <wtf/Platform.h>
+#pragma once
namespace JSC {
-enum CodeType { GlobalCode, EvalCode, FunctionCode };
+enum CodeType { GlobalCode, EvalCode, FunctionCode, ModuleCode };
} // namespace JSC
@@ -40,6 +37,3 @@ class PrintStream;
void printInternal(PrintStream&, JSC::CodeType);
} // namespace WTF
-
-#endif // CodeType_h
-
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
new file mode 100644
index 000000000..0622553c0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ComplexGetStatus.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+ComplexGetStatus ComplexGetStatus::computeFor(
+ Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid)
+{
+ // FIXME: We should assert that we never see a structure that
+ // getOwnPropertySlotIsImpure() but for which we don't
+ // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do
+ // that, yet.
+ // https://bugs.webkit.org/show_bug.cgi?id=131810
+
+ ASSERT(conditionSet.isValid());
+
+ if (headStructure->takesSlowPathInDFGForImpureProperty())
+ return takesSlowPath();
+
+ ComplexGetStatus result;
+ result.m_kind = Inlineable;
+
+ if (!conditionSet.isEmpty()) {
+ result.m_conditionSet = conditionSet;
+
+ if (!result.m_conditionSet.structuresEnsureValidity())
+ return skip();
+
+ unsigned numberOfSlotBases =
+ result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence);
+ RELEASE_ASSERT(numberOfSlotBases <= 1);
+ if (!numberOfSlotBases) {
+ ASSERT(result.m_offset == invalidOffset);
+ return result;
+ }
+ ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition();
+ ASSERT(base.kind() == PropertyCondition::Presence);
+
+ result.m_offset = base.offset();
+ } else
+ result.m_offset = headStructure->getConcurrently(uid);
+
+ if (!isValidOffset(result.m_offset))
+ return takesSlowPath();
+
+ return result;
+}
+
+} // namespace JSC
+
+
diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.h b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
new file mode 100644
index 000000000..d94b312ab
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class CodeBlock;
+class StructureChain;
+
+// This class is useful for figuring out how to inline a cached get-like access. We
+// say "get-like" because this is appropriate for loading the GetterSetter object in
+// a put_by_id that hits a setter. Notably, this doesn't figure out how to call
+// accessors, or even whether they should be called. What it gives us, is a way of
+// determining how to load the value from the requested property (identified by a
+// StringImpl* uid) from an object of the given structure in the given CodeBlock,
+// assuming that such an access had already been cached by Repatch (and so Repatch had
+// already done a bunch of safety checks). This doesn't reexecute any checks that
+// Repatch would have executed, and for prototype chain accesses, it doesn't ask the
+// objects in the prototype chain whether their getOwnPropertySlot would attempt to
+// intercept the access - so this really is only appropriate if you already know that
+// one of the JITOperations had OK'd this for caching and that Repatch concurred.
+//
+// The typical use pattern is something like:
+//
+// ComplexGetStatus status = ComplexGetStatus::computeFor(...);
+// switch (status.kind()) {
+// case ComplexGetStatus::ShouldSkip:
+// // Handle the case where this kind of access is possibly safe but wouldn't
+// // pass the required safety checks. For example, if an IC gives us a list of
+// // accesses and one of them is ShouldSkip, then we should pretend as if it
+// // wasn't even there.
+// break;
+// case ComplexGetStatus::TakesSlowPath:
+// // This kind of access is not safe to inline. Bail out of any attempst to
+// // inline.
+// break;
+// case ComplexGetStatus::Inlineable:
+// // The good stuff goes here. If it's Inlineable then the other properties of
+// // the 'status' object will tell you everything you need to know about how
+// // to execute the get-like operation.
+// break;
+// }
+
+class ComplexGetStatus {
+public:
+ enum Kind {
+ ShouldSkip,
+ TakesSlowPath,
+ Inlineable
+ };
+
+ ComplexGetStatus()
+ : m_kind(ShouldSkip)
+ , m_offset(invalidOffset)
+ {
+ }
+
+ static ComplexGetStatus skip()
+ {
+ return ComplexGetStatus();
+ }
+
+ static ComplexGetStatus takesSlowPath()
+ {
+ ComplexGetStatus result;
+ result.m_kind = TakesSlowPath;
+ return result;
+ }
+
+ static ComplexGetStatus computeFor(
+ Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid);
+
+ Kind kind() const { return m_kind; }
+ PropertyOffset offset() const { return m_offset; }
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+private:
+ Kind m_kind;
+ PropertyOffset m_offset;
+ ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
index 5d05bbb2f..64fe9a387 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,19 +28,32 @@
#if ENABLE(DFG_JIT)
-#include <wtf/PassOwnPtr.h>
+#include "CodeBlock.h"
+#include "VMInlines.h"
namespace JSC { namespace DFG {
+void FrequentExitSite::dump(PrintStream& out) const
+{
+ out.print("bc#", m_bytecodeOffset, ": ", m_kind, "/", m_jitType);
+}
+
ExitProfile::ExitProfile() { }
ExitProfile::~ExitProfile() { }
-bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site)
+bool ExitProfile::add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite& site)
{
+ ASSERT(site.jitType() != ExitFromAnything);
+
+ CODEBLOCK_LOG_EVENT(owner, "frequentExit", (site));
+
+ if (Options::verboseExitProfile())
+ dataLog(pointerDump(owner), ": Adding exit site: ", site, "\n");
+
// If we've never seen any frequent exits then create the list and put this site
// into it.
if (!m_frequentExitSites) {
- m_frequentExitSites = adoptPtr(new Vector<FrequentExitSite>());
+ m_frequentExitSites = std::make_unique<Vector<FrequentExitSite>>();
m_frequentExitSites->append(site);
return true;
}
@@ -72,13 +85,13 @@ Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex)
return result;
}
-bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite& site) const
+bool ExitProfile::hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite& site) const
{
if (!m_frequentExitSites)
return false;
for (unsigned i = m_frequentExitSites->size(); i--;) {
- if (m_frequentExitSites->at(i) == site)
+ if (site.subsumes(m_frequentExitSites->at(i)))
return true;
}
return false;
@@ -87,7 +100,7 @@ bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite
QueryableExitProfile::QueryableExitProfile() { }
QueryableExitProfile::~QueryableExitProfile() { }
-void QueryableExitProfile::initialize(const ConcurrentJITLocker&, const ExitProfile& profile)
+void QueryableExitProfile::initialize(const ConcurrentJSLocker&, const ExitProfile& profile)
{
if (!profile.m_frequentExitSites)
return;
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
index ab1a60d58..337e3ec01 100644
--- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,13 +23,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGExitProfile_h
-#define DFGExitProfile_h
+#pragma once
-#include "ConcurrentJITLock.h"
+#if ENABLE(DFG_JIT)
+
+#include "ConcurrentJSLock.h"
#include "ExitKind.h"
+#include "ExitingJITType.h"
#include <wtf/HashSet.h>
-#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -39,18 +40,21 @@ public:
FrequentExitSite()
: m_bytecodeOffset(0) // 0 = empty value
, m_kind(ExitKindUnset)
+ , m_jitType(ExitFromAnything)
{
}
FrequentExitSite(WTF::HashTableDeletedValueType)
: m_bytecodeOffset(1) // 1 = deleted value
, m_kind(ExitKindUnset)
+ , m_jitType(ExitFromAnything)
{
}
- explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind)
+ explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything)
: m_bytecodeOffset(bytecodeOffset)
, m_kind(kind)
+ , m_jitType(jitType)
{
if (m_kind == ArgumentsEscaped) {
// Count this one globally. It doesn't matter where in the code block the arguments excaped;
@@ -61,9 +65,10 @@ public:
// Use this constructor if you wish for the exit site to be counted globally within its
// code block.
- explicit FrequentExitSite(ExitKind kind)
+ explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything)
: m_bytecodeOffset(0)
, m_kind(kind)
+ , m_jitType(jitType)
{
}
@@ -75,25 +80,48 @@ public:
bool operator==(const FrequentExitSite& other) const
{
return m_bytecodeOffset == other.m_bytecodeOffset
- && m_kind == other.m_kind;
+ && m_kind == other.m_kind
+ && m_jitType == other.m_jitType;
+ }
+
+ bool subsumes(const FrequentExitSite& other) const
+ {
+ if (m_bytecodeOffset != other.m_bytecodeOffset)
+ return false;
+ if (m_kind != other.m_kind)
+ return false;
+ if (m_jitType == ExitFromAnything)
+ return true;
+ return m_jitType == other.m_jitType;
}
unsigned hash() const
{
- return WTF::intHash(m_bytecodeOffset) + m_kind;
+ return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7;
}
unsigned bytecodeOffset() const { return m_bytecodeOffset; }
ExitKind kind() const { return m_kind; }
+ ExitingJITType jitType() const { return m_jitType; }
+
+ FrequentExitSite withJITType(ExitingJITType jitType) const
+ {
+ FrequentExitSite result = *this;
+ result.m_jitType = jitType;
+ return result;
+ }
bool isHashTableDeletedValue() const
{
return m_kind == ExitKindUnset && m_bytecodeOffset;
}
+
+ void dump(PrintStream& out) const;
private:
unsigned m_bytecodeOffset;
ExitKind m_kind;
+ ExitingJITType m_jitType;
};
struct FrequentExitSiteHash {
@@ -104,6 +132,7 @@ struct FrequentExitSiteHash {
} } // namespace JSC::DFG
+
namespace WTF {
template<typename T> struct DefaultHash;
@@ -131,7 +160,7 @@ public:
// be called a fixed number of times per recompilation. Recompilation is
// rare to begin with, and implies doing O(n) operations on the CodeBlock
// anyway.
- bool add(const ConcurrentJITLocker&, const FrequentExitSite&);
+ bool add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite&);
// Get the frequent exit sites for a bytecode index. This is O(n), and is
// meant to only be used from debugging/profiling code.
@@ -141,12 +170,12 @@ public:
// in the compiler. It should be strictly cheaper than building a
// QueryableExitProfile, if you really expect this to be called infrequently
// and you believe that there are few exit sites.
- bool hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite&) const;
- bool hasExitSite(const ConcurrentJITLocker& locker, ExitKind kind) const
+ bool hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite&) const;
+ bool hasExitSite(const ConcurrentJSLocker& locker, ExitKind kind) const
{
return hasExitSite(locker, FrequentExitSite(kind));
}
- bool hasExitSite(const ConcurrentJITLocker& locker, unsigned bytecodeIndex, ExitKind kind) const
+ bool hasExitSite(const ConcurrentJSLocker& locker, unsigned bytecodeIndex, ExitKind kind) const
{
return hasExitSite(locker, FrequentExitSite(bytecodeIndex, kind));
}
@@ -154,7 +183,7 @@ public:
private:
friend class QueryableExitProfile;
- OwnPtr<Vector<FrequentExitSite>> m_frequentExitSites;
+ std::unique_ptr<Vector<FrequentExitSite>> m_frequentExitSites;
};
class QueryableExitProfile {
@@ -162,10 +191,14 @@ public:
QueryableExitProfile();
~QueryableExitProfile();
- void initialize(const ConcurrentJITLocker&, const ExitProfile&);
+ void initialize(const ConcurrentJSLocker&, const ExitProfile&);
bool hasExitSite(const FrequentExitSite& site) const
{
+ if (site.jitType() == ExitFromAnything) {
+ return hasExitSite(site.withJITType(ExitFromDFG))
+ || hasExitSite(site.withJITType(ExitFromFTL));
+ }
return m_frequentExitSites.find(site) != m_frequentExitSites.end();
}
@@ -184,4 +217,4 @@ private:
} } // namespace JSC::DFG
-#endif // DFGExitProfile_h
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp
new file mode 100644
index 000000000..790d9c03d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DOMJITAccessCasePatchpointParams.h"
+
+#include "LinkBuffer.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+template<typename JumpType, typename FunctionType, typename ResultType, typename... Arguments>
+class SlowPathCallGeneratorWithArguments : public DOMJITAccessCasePatchpointParams::SlowPathCallGenerator {
+public:
+ SlowPathCallGeneratorWithArguments(JumpType from, CCallHelpers::Label to, FunctionType function, ResultType result, std::tuple<Arguments...> arguments)
+ : m_from(from)
+ , m_to(to)
+ , m_function(function)
+ , m_result(result)
+ , m_arguments(arguments)
+ {
+ }
+
+ template<size_t... ArgumentsIndex>
+ CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
+ {
+ CCallHelpers::JumpList exceptions;
+ // We spill (1) the used registers by IC and (2) the used registers by DOMJIT::Patchpoint.
+ AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersByPatchpoint);
+
+ jit.store32(
+ CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));
+
+ jit.makeSpaceOnStackForCCall();
+
+ // FIXME: Currently, we do not check any ARM EABI things here.
+ // But it is OK because a compile error happens when you pass JSValueRegs as an argument.
+ // https://bugs.webkit.org/show_bug.cgi?id=163099
+ jit.setupArgumentsWithExecState(std::get<ArgumentsIndex>(m_arguments)...);
+
+ CCallHelpers::Call operationCall = jit.call();
+ auto function = m_function;
+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(operationCall, FunctionPtr(function));
+ });
+
+ jit.setupResults(m_result);
+ jit.reclaimSpaceOnStackForCCall();
+
+ CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+ exceptions.append(jit.jump());
+
+ noException.link(&jit);
+ RegisterSet dontRestore;
+ dontRestore.set(m_result);
+ state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+
+ return exceptions;
+ }
+
+ CCallHelpers::JumpList generate(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit) override
+ {
+ m_from.link(&jit);
+ CCallHelpers::JumpList exceptions = generateImpl(state, usedRegistersByPatchpoint, jit, std::make_index_sequence<std::tuple_size<std::tuple<Arguments...>>::value>());
+ jit.jump().linkTo(m_to, &jit);
+ return exceptions;
+ }
+
+protected:
+ JumpType m_from;
+ CCallHelpers::Label m_to;
+ FunctionType m_function;
+ ResultType m_result;
+ std::tuple<Arguments...> m_arguments;
+};
+
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) \
+ void DOMJITAccessCasePatchpointParams::addSlowPathCallImpl(CCallHelpers::JumpList from, CCallHelpers& jit, OperationType operation, ResultType result, std::tuple<__VA_ARGS__> args) \
+ { \
+ CCallHelpers::Label to = jit.label(); \
+ m_generators.append(std::make_unique<SlowPathCallGeneratorWithArguments<CCallHelpers::JumpList, OperationType, ResultType, __VA_ARGS__>>(from, to, operation, result, args)); \
+ } \
+
+DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+
+CCallHelpers::JumpList DOMJITAccessCasePatchpointParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit)
+{
+ CCallHelpers::JumpList exceptions;
+ for (auto& generator : m_generators)
+ exceptions.append(generator->generate(state, usedRegistersByPatchpoint, jit));
+ return exceptions;
+}
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h
new file mode 100644
index 000000000..8cf975197
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "DOMJITPatchpointParams.h"
+
+namespace JSC {
+
+struct AccessGenerationState;
+
+class DOMJITAccessCasePatchpointParams : public DOMJIT::PatchpointParams {
+public:
+ DOMJITAccessCasePatchpointParams(Vector<DOMJIT::Value>&& regs, Vector<GPRReg>&& gpScratch, Vector<FPRReg>&& fpScratch)
+ : DOMJIT::PatchpointParams(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch))
+ {
+ }
+
+ class SlowPathCallGenerator {
+ public:
+ virtual ~SlowPathCallGenerator() { }
+ virtual CCallHelpers::JumpList generate(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&) = 0;
+ };
+
+ CCallHelpers::JumpList emitSlowPathCalls(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&);
+
+private:
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) void addSlowPathCallImpl(CCallHelpers::JumpList, CCallHelpers&, OperationType, ResultType, std::tuple<__VA_ARGS__> args) override;
+ DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+ Vector<std::unique_ptr<SlowPathCallGenerator>> m_generators;
+};
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.cpp b/Source/JavaScriptCore/bytecode/DataFormat.cpp
new file mode 100644
index 000000000..8bd42e100
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DataFormat.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DataFormat.h"
+
+#include <wtf/Assertions.h>
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::DataFormat dataFormat)
+{
+ out.print(dataFormatToString(dataFormat));
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h
index bb9da4c57..22c649226 100644
--- a/Source/JavaScriptCore/bytecode/DataFormat.h
+++ b/Source/JavaScriptCore/bytecode/DataFormat.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DataFormat_h
-#define DataFormat_h
+#pragma once
#include <wtf/Assertions.h>
@@ -56,7 +55,6 @@ enum DataFormat {
// Special data formats used only for OSR.
DataFormatDead = 33, // Implies jsUndefined().
- DataFormatArguments = 34 // Implies that the arguments object must be reified.
};
inline const char* dataFormatToString(DataFormat dataFormat)
@@ -90,8 +88,6 @@ inline const char* dataFormatToString(DataFormat dataFormat)
return "JSBoolean";
case DataFormatDead:
return "Dead";
- case DataFormatArguments:
- return "Arguments";
default:
RELEASE_ASSERT_NOT_REACHED();
return "Unknown";
@@ -124,6 +120,11 @@ inline bool isJSBoolean(DataFormat format)
return isJSFormat(format, DataFormatJSBoolean);
}
-}
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::DataFormat);
-#endif // DataFormat_h
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
index 35af7c7b9..762387caf 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +26,46 @@
#include "config.h"
#include "DeferredCompilationCallback.h"
+#include "CodeBlock.h"
+
namespace JSC {
DeferredCompilationCallback::DeferredCompilationCallback() { }
DeferredCompilationCallback::~DeferredCompilationCallback() { }
+void DeferredCompilationCallback::compilationDidComplete(CodeBlock*, CodeBlock*, CompilationResult result)
+{
+ dumpCompiledSourcesIfNeeded();
+
+ switch (result) {
+ case CompilationFailed:
+ case CompilationInvalidated:
+ case CompilationSuccessful:
+ break;
+ case CompilationDeferred:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+Vector<DeferredSourceDump>& DeferredCompilationCallback::ensureDeferredSourceDump()
+{
+ if (!m_deferredSourceDump)
+ m_deferredSourceDump = std::make_unique<Vector<DeferredSourceDump>>();
+ return *m_deferredSourceDump;
+}
+
+void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded()
+{
+ if (!m_deferredSourceDump)
+ return;
+
+ ASSERT(Options::dumpSourceAtDFGTime());
+ unsigned index = 0;
+ for (auto& info : *m_deferredSourceDump) {
+ dataLog("[", ++index, "] ");
+ info.dump();
+ }
+}
+
} // JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
index 6421e3e25..925711047 100644
--- a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h
@@ -23,11 +23,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DeferredCompilationCallback_h
-#define DeferredCompilationCallback_h
+#pragma once
#include "CompilationResult.h"
+#include "DeferredSourceDump.h"
#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
namespace JSC {
@@ -40,11 +41,15 @@ protected:
public:
virtual ~DeferredCompilationCallback();
- virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) = 0;
- virtual void compilationDidComplete(CodeBlock*, CompilationResult) = 0;
-};
+ virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) = 0;
+ virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult);
-} // namespace JSC
+ Vector<DeferredSourceDump>& ensureDeferredSourceDump();
-#endif // DeferredCompilationCallback_h
+private:
+ void dumpCompiledSourcesIfNeeded();
+ std::unique_ptr<Vector<DeferredSourceDump>> m_deferredSourceDump;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
new file mode 100644
index 000000000..48079db66
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DeferredSourceDump.h"
+
+#include "CodeBlock.h"
+#include "CodeBlockWithJITType.h"
+
+namespace JSC {
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock)
+ : m_codeBlock(codeBlock)
+ , m_rootCodeBlock(nullptr)
+ , m_rootJITType(JITCode::None)
+{
+}
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin)
+ : m_codeBlock(codeBlock)
+ , m_rootCodeBlock(rootCodeBlock)
+ , m_rootJITType(rootJITType)
+ , m_callerCodeOrigin(callerCodeOrigin)
+{
+}
+
+void DeferredSourceDump::dump()
+{
+ bool isInlinedFrame = !!m_rootCodeBlock;
+ if (isInlinedFrame)
+ dataLog("Inlined ");
+ else
+ dataLog("Compiled ");
+ dataLog(*m_codeBlock);
+
+ if (isInlinedFrame)
+ dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin);
+
+ dataLog("\n'''");
+ m_codeBlock->dumpSource();
+ dataLog("'''\n");
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
index 108e23a37..6c9943d08 100644
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,46 +20,30 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ProfiledCodeBlockJettisoningWatchpoint_h
-#define ProfiledCodeBlockJettisoningWatchpoint_h
+#pragma once
#include "CodeOrigin.h"
-#include "ExitKind.h"
-#include "Watchpoint.h"
+#include "JITCode.h"
namespace JSC {
class CodeBlock;
-class ProfiledCodeBlockJettisoningWatchpoint : public Watchpoint {
+class DeferredSourceDump {
public:
- ProfiledCodeBlockJettisoningWatchpoint()
- : m_exitKind(ExitKindUnset)
- , m_codeBlock(0)
- {
- }
-
- ProfiledCodeBlockJettisoningWatchpoint(
- CodeOrigin codeOrigin, ExitKind exitKind, CodeBlock* codeBlock)
- : m_codeOrigin(codeOrigin)
- , m_exitKind(exitKind)
- , m_codeBlock(codeBlock)
- {
- }
-
-protected:
- virtual void fireInternal() override;
+ DeferredSourceDump(CodeBlock*);
+ DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin);
+
+ void dump();
private:
- CodeOrigin m_codeOrigin;
- ExitKind m_exitKind;
CodeBlock* m_codeBlock;
+ CodeBlock* m_rootCodeBlock;
+ JITCode::JITType m_rootJITType;
+ CodeOrigin m_callerCodeOrigin;
};
} // namespace JSC
-
-#endif // ProfiledCodeBlockJettisoningWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp
new file mode 100644
index 000000000..5bfef1201
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DirectEvalCodeCache.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void DirectEvalCodeCache::setSlow(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+{
+ LockHolder locker(m_lock);
+ m_cacheMap.set(CacheKey(evalSource, callSiteIndex), WriteBarrier<DirectEvalExecutable>(exec->vm(), owner, evalExecutable));
+}
+
+void DirectEvalCodeCache::clear()
+{
+ LockHolder locker(m_lock);
+ m_cacheMap.clear();
+}
+
+void DirectEvalCodeCache::visitAggregate(SlotVisitor& visitor)
+{
+ LockHolder locker(m_lock);
+ EvalCacheMap::iterator end = m_cacheMap.end();
+ for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
+ visitor.append(ptr->value);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h
new file mode 100644
index 000000000..e075357a8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DirectEvalExecutable.h"
+#include <wtf/HashMap.h>
+#include <wtf/RefPtr.h>
+#include <wtf/text/StringHash.h>
+
+namespace JSC {
+
+ class SlotVisitor;
+
+ class DirectEvalCodeCache {
+ public:
+ class CacheKey {
+ public:
+ CacheKey(const String& source, CallSiteIndex callSiteIndex)
+ : m_source(source.impl())
+ , m_callSiteIndex(callSiteIndex)
+ {
+ }
+
+ CacheKey(WTF::HashTableDeletedValueType)
+ : m_source(WTF::HashTableDeletedValue)
+ {
+ }
+
+ CacheKey() = default;
+
+ unsigned hash() const { return m_source->hash() ^ m_callSiteIndex.bits(); }
+
+ bool isEmptyValue() const { return !m_source; }
+
+ bool operator==(const CacheKey& other) const
+ {
+ return m_callSiteIndex == other.m_callSiteIndex && WTF::equal(m_source.get(), other.m_source.get());
+ }
+
+ bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); }
+
+ struct Hash {
+ static unsigned hash(const CacheKey& key)
+ {
+ return key.hash();
+ }
+ static bool equal(const CacheKey& lhs, const CacheKey& rhs)
+ {
+ return lhs == rhs;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+ };
+
+ typedef SimpleClassHashTraits<CacheKey> HashTraits;
+
+ private:
+ RefPtr<StringImpl> m_source;
+ CallSiteIndex m_callSiteIndex;
+ };
+
+ DirectEvalExecutable* tryGet(const String& evalSource, CallSiteIndex callSiteIndex)
+ {
+ return m_cacheMap.fastGet(CacheKey(evalSource, callSiteIndex)).get();
+ }
+
+ void set(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+ {
+ if (m_cacheMap.size() < maxCacheEntries)
+ setSlow(exec, owner, evalSource, callSiteIndex, evalExecutable);
+ }
+
+ bool isEmpty() const { return m_cacheMap.isEmpty(); }
+
+ void visitAggregate(SlotVisitor&);
+
+ void clear();
+
+ private:
+ static const int maxCacheEntries = 64;
+
+ void setSlow(ExecState*, JSCell* owner, const String& evalSource, CallSiteIndex, DirectEvalExecutable*);
+
+ typedef HashMap<CacheKey, WriteBarrier<DirectEvalExecutable>, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap;
+ EvalCacheMap m_cacheMap;
+ Lock m_lock;
+ };
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp b/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp
new file mode 100644
index 000000000..5232a0e05
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/EvalCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "EvalCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo EvalCodeBlock::s_info = {
+ "EvalCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(EvalCodeBlock)
+};
+
+void EvalCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<EvalCodeBlock*>(cell)->~EvalCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeBlock.h b/Source/JavaScriptCore/bytecode/EvalCodeBlock.h
new file mode 100644
index 000000000..fde7b1165
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/EvalCodeBlock.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+
+namespace JSC {
+
+class EvalCodeBlock : public GlobalCodeBlock {
+public:
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static EvalCodeBlock* create(VM* vm, CopyParsedBlockTag, EvalCodeBlock& other)
+ {
+ EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap))
+ EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static EvalCodeBlock* create(VM* vm, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider)
+ {
+ EvalCodeBlock* instance = new (NotNull, allocateCell<EvalCodeBlock>(vm->heap))
+ EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider));
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+ const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
+ unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+
+private:
+ EvalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, EvalCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ EvalCodeBlock(VM* vm, Structure* structure, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, 1)
+ {
+ }
+
+ static void destroy(JSCell*);
+
+private:
+ UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
deleted file mode 100644
index ff5911240..000000000
--- a/Source/JavaScriptCore/bytecode/EvalCodeCache.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef EvalCodeCache_h
-#define EvalCodeCache_h
-
-#include "Executable.h"
-#include "JSGlobalObject.h"
-#include "SourceCode.h"
-#include <wtf/HashMap.h>
-#include <wtf/RefPtr.h>
-#include <wtf/text/StringHash.h>
-
-namespace JSC {
-
- class SlotVisitor;
-
- class EvalCodeCache {
- public:
- EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, JSScope* scope)
- {
- if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject())
- return m_cacheMap.get(evalSource.impl()).get();
- return 0;
- }
-
- EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const String& evalSource, JSScope* scope)
- {
- EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext);
- if (!evalExecutable)
- return 0;
-
- if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && scope->begin()->isVariableObject() && m_cacheMap.size() < maxCacheEntries)
- m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable));
-
- return evalExecutable;
- }
-
- bool isEmpty() const { return m_cacheMap.isEmpty(); }
-
- void visitAggregate(SlotVisitor&);
-
- void clear()
- {
- m_cacheMap.clear();
- }
-
- private:
- static const unsigned maxCacheableSourceLength = 256;
- static const int maxCacheEntries = 64;
-
- typedef HashMap<RefPtr<StringImpl>, WriteBarrier<EvalExecutable>> EvalCacheMap;
- EvalCacheMap m_cacheMap;
- };
-
-} // namespace JSC
-
-#endif // EvalCodeCache_h
diff --git a/Source/JavaScriptCore/bytecode/ExecutableInfo.h b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
new file mode 100644
index 000000000..750900ecd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExecutableInfo.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ParserModes.h"
+
+namespace JSC {
+
+enum class DerivedContextType : uint8_t { None, DerivedConstructorContext, DerivedMethodContext };
+enum class EvalContextType : uint8_t { None, FunctionEvalContext };
+
+// FIXME: These flags, ParserModes and propagation to XXXCodeBlocks should be reorganized.
+// https://bugs.webkit.org/show_bug.cgi?id=151547
+struct ExecutableInfo {
+ ExecutableInfo(bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind, JSParserScriptMode scriptMode, SuperBinding superBinding, SourceParseMode parseMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, bool isClassContext, EvalContextType evalContextType)
+ : m_usesEval(usesEval)
+ , m_isStrictMode(isStrictMode)
+ , m_isConstructor(isConstructor)
+ , m_isBuiltinFunction(isBuiltinFunction)
+ , m_constructorKind(static_cast<unsigned>(constructorKind))
+ , m_superBinding(static_cast<unsigned>(superBinding))
+ , m_scriptMode(static_cast<unsigned>(scriptMode))
+ , m_parseMode(parseMode)
+ , m_derivedContextType(static_cast<unsigned>(derivedContextType))
+ , m_isArrowFunctionContext(isArrowFunctionContext)
+ , m_isClassContext(isClassContext)
+ , m_evalContextType(static_cast<unsigned>(evalContextType))
+ {
+ ASSERT(m_constructorKind == static_cast<unsigned>(constructorKind));
+ ASSERT(m_superBinding == static_cast<unsigned>(superBinding));
+ ASSERT(m_scriptMode == static_cast<unsigned>(scriptMode));
+ }
+
+ bool usesEval() const { return m_usesEval; }
+ bool isStrictMode() const { return m_isStrictMode; }
+ bool isConstructor() const { return m_isConstructor; }
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
+ JSParserScriptMode scriptMode() const { return static_cast<JSParserScriptMode>(m_scriptMode); }
+ SourceParseMode parseMode() const { return m_parseMode; }
+ DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); }
+ EvalContextType evalContextType() const { return static_cast<EvalContextType>(m_evalContextType); }
+ bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+ bool isClassContext() const { return m_isClassContext; }
+
+private:
+ unsigned m_usesEval : 1;
+ unsigned m_isStrictMode : 1;
+ unsigned m_isConstructor : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_superBinding : 1;
+ unsigned m_scriptMode: 1;
+ SourceParseMode m_parseMode;
+ unsigned m_derivedContextType : 2;
+ unsigned m_isArrowFunctionContext : 1;
+ unsigned m_isClassContext : 1;
+ unsigned m_evalContextType : 2;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
index 3a646a86a..237c0e752 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,21 +28,26 @@
#include "CodeBlock.h"
#include "ExecutableAllocator.h"
+#include "JSCInlines.h"
+#include "VMInlines.h"
#include <wtf/StringExtras.h>
namespace JSC {
-ExecutionCounter::ExecutionCounter()
+template<CountingVariant countingVariant>
+ExecutionCounter<countingVariant>::ExecutionCounter()
{
reset();
}
-void ExecutionCounter::forceSlowPathConcurrently()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::forceSlowPathConcurrently()
{
m_counter = 0;
}
-bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
{
if (hasCrossedThreshold(codeBlock))
return true;
@@ -53,26 +58,28 @@ bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
return false;
}
-void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
{
reset();
m_activeThreshold = threshold;
setThreshold(codeBlock);
}
-void ExecutionCounter::deferIndefinitely()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::deferIndefinitely()
{
m_totalCount = 0;
m_activeThreshold = std::numeric_limits<int32_t>::max();
m_counter = std::numeric_limits<int32_t>::min();
}
-double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
{
#if ENABLE(JIT)
double multiplier =
ExecutableAllocator::memoryPressureMultiplier(
- codeBlock->predictedMachineCodeSize());
+ codeBlock->baselineAlternative()->predictedMachineCodeSize());
#else
// This code path will probably not be taken, but if it is, we fake it.
double multiplier = 1.0;
@@ -82,8 +89,7 @@ double ExecutionCounter::applyMemoryUsageHeuristics(int32_t value, CodeBlock* co
return multiplier * value;
}
-int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
- int32_t value, CodeBlock* codeBlock)
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock)
{
double doubleResult = applyMemoryUsageHeuristics(value, codeBlock);
@@ -95,7 +101,8 @@ int32_t ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
return static_cast<int32_t>(doubleResult);
}
-bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::hasCrossedThreshold(CodeBlock* codeBlock) const
{
// This checks if the current count rounded up to the threshold we were targeting.
// For example, if we are using half of available executable memory and have
@@ -117,20 +124,25 @@ bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock);
- return static_cast<double>(m_totalCount) + m_counter >=
- modifiedThreshold - static_cast<double>(
- std::min(m_activeThreshold, Options::maximumExecutionCountsBetweenCheckpoints())) / 2;
+ double actualCount = static_cast<double>(m_totalCount) + m_counter;
+ double desiredCount = modifiedThreshold - static_cast<double>(
+ std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2;
+
+ bool result = actualCount >= desiredCount;
+
+ CODEBLOCK_LOG_EVENT(codeBlock, "thresholdCheck", ("activeThreshold = ", m_activeThreshold, ", modifiedThreshold = ", modifiedThreshold, ", actualCount = ", actualCount, ", desiredCount = ", desiredCount));
+
+ return result;
}
-bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
+template<CountingVariant countingVariant>
+bool ExecutionCounter<countingVariant>::setThreshold(CodeBlock* codeBlock)
{
if (m_activeThreshold == std::numeric_limits<int32_t>::max()) {
deferIndefinitely();
return false;
}
- ASSERT(!m_activeThreshold || !hasCrossedThreshold(codeBlock));
-
// Compute the true total count.
double trueTotalCount = count();
@@ -159,17 +171,22 @@ bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
return false;
}
-void ExecutionCounter::reset()
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::reset()
{
m_counter = 0;
m_totalCount = 0;
m_activeThreshold = 0;
}
-void ExecutionCounter::dump(PrintStream& out) const
+template<CountingVariant countingVariant>
+void ExecutionCounter<countingVariant>::dump(PrintStream& out) const
{
out.printf("%lf/%lf, %d", count(), static_cast<double>(m_activeThreshold), m_counter);
}
+template class ExecutionCounter<CountingForBaseline>;
+template class ExecutionCounter<CountingForUpperTiers>;
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
index a7346691d..f78a9123c 100644
--- a/Source/JavaScriptCore/bytecode/ExecutionCounter.h
+++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,18 +23,35 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ExecutionCounter_h
-#define ExecutionCounter_h
+#pragma once
#include "JSGlobalObject.h"
#include "Options.h"
#include <wtf/PrintStream.h>
-#include <wtf/SimpleStats.h>
namespace JSC {
class CodeBlock;
+enum CountingVariant {
+ CountingForBaseline,
+ CountingForUpperTiers
+};
+
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+inline int32_t formattedTotalExecutionCount(float value)
+{
+ union {
+ int32_t i;
+ float f;
+ } u;
+ u.f = value;
+ return u.i;
+}
+
+template<CountingVariant countingVariant>
class ExecutionCounter {
public:
ExecutionCounter();
@@ -44,31 +61,33 @@ public:
void deferIndefinitely();
double count() const { return static_cast<double>(m_totalCount) + m_counter; }
void dump(PrintStream&) const;
- static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
- static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+ static int32_t maximumExecutionCountsBetweenCheckpoints()
+ {
+ switch (countingVariant) {
+ case CountingForBaseline:
+ return Options::maximumExecutionCountsBetweenCheckpointsForBaseline();
+ case CountingForUpperTiers:
+ return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+ }
+
template<typename T>
static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
{
int32_t maxThreshold;
if (Options::randomizeExecutionCountsBetweenCheckpoints())
- maxThreshold = globalObject->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints();
+ maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
else
- maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints();
+ maxThreshold = maximumExecutionCountsBetweenCheckpoints();
if (threshold > maxThreshold)
threshold = maxThreshold;
return threshold;
}
- static int32_t formattedTotalCount(float value)
- {
- union {
- int32_t i;
- float f;
- } u;
- u.f = value;
- return u.i;
- }
-
private:
bool hasCrossedThreshold(CodeBlock*) const;
bool setThreshold(CodeBlock*);
@@ -89,12 +108,12 @@ public:
// m_counter.
float m_totalCount;
- // This is the threshold we were originally targetting, without any correction for
+ // This is the threshold we were originally targeting, without any correction for
// the memory usage heuristics.
int32_t m_activeThreshold;
};
-} // namespace JSC
-
-#endif // ExecutionCounter_h
+typedef ExecutionCounter<CountingForBaseline> BaselineExecutionCounter;
+typedef ExecutionCounter<CountingForUpperTiers> UpperTierExecutionCounter;
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp
index 350aa5857..f1ea76d38 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.cpp
+++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,20 +38,20 @@ const char* exitKindToString(ExitKind kind)
return "Unset";
case BadType:
return "BadType";
- case BadFunction:
- return "BadFunction";
+ case BadCell:
+ return "BadCell";
+ case BadIdent:
+ return "BadIdent";
case BadExecutable:
return "BadExecutable";
case BadCache:
return "BadCache";
- case BadCacheWatchpoint:
- return "BadCacheWatchpoint";
- case BadWeakConstantCache:
- return "BadWeakConstantCache";
- case BadWeakConstantCacheWatchpoint:
- return "BadWeakConstantCacheWatchpoint";
+ case BadConstantCache:
+ return "BadConstantCache";
case BadIndexingType:
return "BadIndexingType";
+ case BadTypeInfoFlags:
+ return "BadTypeInfoFlags";
case Overflow:
return "Overflow";
case NegativeZero:
@@ -68,38 +68,45 @@ const char* exitKindToString(ExitKind kind)
return "InadequateCoverage";
case ArgumentsEscaped:
return "ArgumentsEscaped";
+ case ExoticObjectMode:
+ return "ExoticObjectMode";
case NotStringObject:
return "NotStringObject";
+ case VarargsOverflow:
+ return "VarargsOverflow";
+ case TDZFailure:
+ return "TDZFailure";
+ case HoistingFailed:
+ return "HoistingFailed";
case Uncountable:
return "Uncountable";
- case UncountableWatchpoint:
- return "UncountableWatchpoint";
case UncountableInvalidation:
return "UncountableInvalidation";
case WatchdogTimerFired:
return "WatchdogTimerFired";
case DebuggerEvent:
return "DebuggerEvent";
+ case ExceptionCheck:
+ return "ExceptionCheck";
+ case GenericUnwind:
+ return "GenericUnwind";
}
RELEASE_ASSERT_NOT_REACHED();
return "Unknown";
}
-bool exitKindIsCountable(ExitKind kind)
+bool exitKindMayJettison(ExitKind kind)
{
switch (kind) {
- case ExitKindUnset:
- RELEASE_ASSERT_NOT_REACHED();
- case BadType:
- case Uncountable:
- case UncountableWatchpoint:
- case LoadFromHole: // Already counted directly by the baseline JIT.
- case StoreToHole: // Already counted directly by the baseline JIT.
- case OutOfBounds: // Already counted directly by the baseline JIT.
+ case ExceptionCheck:
+ case GenericUnwind:
return false;
default:
return true;
}
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h
index a9f6df6d4..a6c2e0ea2 100644
--- a/Source/JavaScriptCore/bytecode/ExitKind.h
+++ b/Source/JavaScriptCore/bytecode/ExitKind.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,21 +23,20 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ExitKind_h
-#define ExitKind_h
+#pragma once
namespace JSC {
-enum ExitKind {
+enum ExitKind : uint8_t {
ExitKindUnset,
BadType, // We exited because a type prediction was wrong.
- BadFunction, // We exited because we made an incorrect assumption about what function we would see.
+ BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks.
+ BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val.
BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
BadCache, // We exited because an inline cache was wrong.
- BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
- BadCacheWatchpoint, // Same as BadCache but from a watchpoint.
- BadWeakConstantCacheWatchpoint, // Same as BadWeakConstantCache but from a watchpoint.
+ BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
BadIndexingType, // We exited because an indexing type was wrong.
+ BadTypeInfoFlags, // We exited because we made an incorrect assumption about what TypeInfo flags we would see.
Overflow, // We exited because of overflow.
NegativeZero, // We exited because we encountered negative zero.
Int52Overflow, // We exited because of an Int52 overflow.
@@ -46,28 +45,21 @@ enum ExitKind {
OutOfBounds, // We had an out-of-bounds access to an array.
InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
+ ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments).
NotStringObject, // We exited because we shouldn't have attempted to optimize string object access.
+ VarargsOverflow, // We exited because a varargs call passed more arguments than we expected.
+ TDZFailure, // We exited because we were in the TDZ and accessed the variable.
+ HoistingFailed, // Something that was hoisted exited. So, assume that hoisting is a bad idea.
Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated.
- UncountableWatchpoint, // We exited because of a watchpoint, which isn't counted because watchpoints do tracking themselves.
WatchdogTimerFired, // We exited because we need to service the watchdog timer.
- DebuggerEvent // We exited because we need to service the debugger.
+ DebuggerEvent, // We exited because we need to service the debugger.
+ ExceptionCheck, // We exited because a direct exception check showed that we threw an exception from a C call.
+ GenericUnwind, // We exited because a we arrived at this OSR exit from genericUnwind.
};
const char* exitKindToString(ExitKind);
-bool exitKindIsCountable(ExitKind);
-
-inline bool isWatchpoint(ExitKind kind)
-{
- switch (kind) {
- case BadCacheWatchpoint:
- case BadWeakConstantCacheWatchpoint:
- case UncountableWatchpoint:
- return true;
- default:
- return false;
- }
-}
+bool exitKindMayJettison(ExitKind);
} // namespace JSC
@@ -77,6 +69,3 @@ class PrintStream;
void printInternal(PrintStream&, JSC::ExitKind);
} // namespace WTF
-
-#endif // ExitKind_h
-
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.cpp b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
new file mode 100644
index 000000000..aa8f120b6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ExitingJITType.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ExitingJITType type)
+{
+ switch (type) {
+ case ExitFromAnything:
+ out.print("FromAnything");
+ return;
+ case ExitFromDFG:
+ out.print("FromDFG");
+ return;
+ case ExitFromFTL:
+ out.print("FromFTL");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.h b/Source/JavaScriptCore/bytecode/ExitingJITType.h
new file mode 100644
index 000000000..dfbfee4aa
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ExitingJITType.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JITCode.h"
+
+namespace JSC {
+
+enum ExitingJITType : uint8_t {
+ ExitFromAnything,
+ ExitFromDFG,
+ ExitFromFTL
+};
+
+inline ExitingJITType exitingJITTypeFor(JITCode::JITType type)
+{
+ switch (type) {
+ case JITCode::DFGJIT:
+ return ExitFromDFG;
+ case JITCode::FTLJIT:
+ return ExitFromFTL;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return ExitFromAnything;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitingJITType);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
index 855738aec..8f83527ff 100644
--- a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
+++ b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ExpressionRangeInfo_h
-#define ExpressionRangeInfo_h
+#pragma once
#include <wtf/StdLibExtras.h>
@@ -86,13 +85,13 @@ struct ExpressionRangeInfo {
position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask));
}
- void decodeFatLineMode(unsigned& line, unsigned& column)
+ void decodeFatLineMode(unsigned& line, unsigned& column) const
{
line = (position >> FatLineModeLineShift) & FatLineModeLineMask;
column = position & FatLineModeColumnMask;
}
- void decodeFatColumnMode(unsigned& line, unsigned& column)
+ void decodeFatColumnMode(unsigned& line, unsigned& column) const
{
line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask;
column = position & FatColumnModeColumnMask;
@@ -107,6 +106,3 @@ struct ExpressionRangeInfo {
};
} // namespace JSC
-
-#endif // ExpressionRangeInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
index d34392121..073ce2757 100644
--- a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
+++ b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FullBytecodeLiveness_h
-#define FullBytecodeLiveness_h
+#pragma once
#include <wtf/FastBitVector.h>
@@ -35,36 +34,22 @@ class BytecodeLivenessAnalysis;
typedef HashMap<unsigned, FastBitVector, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeToBitmapMap;
class FullBytecodeLiveness {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- FullBytecodeLiveness() : m_codeBlock(0) { }
-
- // We say "out" to refer to the bitvector that contains raw results for a bytecode
- // instruction.
- const FastBitVector& getOut(unsigned bytecodeIndex) const
+ const FastBitVector& getLiveness(unsigned bytecodeIndex) const
{
- BytecodeToBitmapMap::const_iterator iter = m_map.find(bytecodeIndex);
- ASSERT(iter != m_map.end());
- return iter->value;
+ return m_map[bytecodeIndex];
}
bool operandIsLive(int operand, unsigned bytecodeIndex) const
{
- return operandIsAlwaysLive(m_codeBlock, operand) || operandThatIsNotAlwaysLiveIsLive(m_codeBlock, getOut(bytecodeIndex), operand);
- }
-
- FastBitVector getLiveness(unsigned bytecodeIndex) const
- {
- return getLivenessInfo(m_codeBlock, getOut(bytecodeIndex));
+ return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand);
}
private:
friend class BytecodeLivenessAnalysis;
- CodeBlock* m_codeBlock;
- BytecodeToBitmapMap m_map;
+ Vector<FastBitVector, 0, UnsafeVectorOverflow> m_map;
};
} // namespace JSC
-
-#endif // FullBytecodeLiveness_h
-
diff --git a/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp
new file mode 100644
index 000000000..56eadc62d
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FunctionCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo FunctionCodeBlock::s_info = {
+ "FunctionCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(FunctionCodeBlock)
+};
+
+void FunctionCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<FunctionCodeBlock*>(cell)->~FunctionCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h
new file mode 100644
index 000000000..4f58d0911
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/FunctionCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+class FunctionCodeBlock : public CodeBlock {
+public:
+ typedef CodeBlock Base;
+ DECLARE_INFO;
+
+ static FunctionCodeBlock* create(VM* vm, CopyParsedBlockTag, FunctionCodeBlock& other)
+ {
+ FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap))
+ FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static FunctionCodeBlock* create(VM* vm, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+ RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ {
+ FunctionCodeBlock* instance = new (NotNull, allocateCell<FunctionCodeBlock>(vm->heap))
+ FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ FunctionCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, FunctionCodeBlock& other)
+ : CodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ FunctionCodeBlock(VM* vm, Structure* structure, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+ RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset)
+ {
+ }
+
+ static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index fbb3da1a5..1537cd9b1 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,279 +27,432 @@
#include "GetByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "GetterSetterAccessCase.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSCInlines.h"
#include "JSScope.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+#include <wtf/ListDump.h>
namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+ // Attempt to merge this variant with an already existing variant.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+
+ // Make sure there is no overlap. We should have pruned out opportunities for
+ // overlap but it's possible that an inline cache got into a weird state. We are
+ // defensive and bail if we detect crazy.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+ return false;
+ }
+
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
- Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ VM& vm = *profiledBlock->vm();
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ Opcode opcode = instruction[0].u.opcode;
+
+ ASSERT(opcode == LLInt::getOpcode(op_get_array_length) || opcode == LLInt::getOpcode(op_try_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_proto_load) || opcode == LLInt::getOpcode(op_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_unset));
+
+ // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load.
+ // https://bugs.webkit.org/show_bug.cgi?id=158039
+ if (opcode != LLInt::getOpcode(op_get_by_id))
return GetByIdStatus(NoInformation, false);
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
return GetByIdStatus(NoInformation, false);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
if (structure->takesSlowPathInDFGForImpureProperty())
return GetByIdStatus(NoInformation, false);
- unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
if (!isValidOffset(offset))
return GetByIdStatus(NoInformation, false);
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(NoInformation, false);
- return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
-#else
- return GetByIdStatus(NoInformation, false);
-#endif
+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
}
-void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
-#if ENABLE(JIT)
- // Validate the chain. If the chain is invalid, then currently the best thing
- // we can do is to assume that TakesSlow is true. In the future, it might be
- // worth exploring reifying the structure chain from the structure we've got
- // instead of using the one from the cache, since that will do the right things
- // if the structure chain has changed. But that may be harder, because we may
- // then end up having a different type of access altogether. And it currently
- // does not appear to be worth it to do so -- effectively, the heuristic we
- // have now is that if the structure chain has changed between when it was
- // cached on in the baseline JIT and when the DFG tried to inline the access,
- // then we fall back on a polymorphic access.
- if (!result.m_chain->isStillValid())
- return;
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
- if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty())
- return;
- size_t chainSize = result.m_chain->size();
- for (size_t i = 0; i < chainSize; i++) {
- if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty())
- return;
- }
+ GetByIdStatus result;
- JSObject* currentObject = result.m_chain->terminalPrototype();
- Structure* currentStructure = result.m_chain->last();
+#if ENABLE(DFG_JIT)
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
- ASSERT_UNUSED(currentObject, currentObject);
-
- unsigned attributesIgnored;
- JSCell* specificValue;
-
- result.m_offset = currentStructure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (currentStructure->isDictionary())
- specificValue = 0;
- if (!isValidOffset(result.m_offset))
- return;
-
- result.m_structureSet.add(result.m_chain->head());
- result.m_specificValue = JSValue(specificValue);
+ if (!result.takesSlowPath()
+ && hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
#else
- UNUSED_PARAM(result);
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(uid);
- UNREACHABLE_FOR_PLATFORM();
+ UNUSED_PARAM(map);
#endif
+
+ if (!result)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+
+ return result;
}
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+#if ENABLE(DFG_JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
-
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(uid);
+ GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
+
+ if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
+ return result;
+}
+#endif // ENABLE(DFG_JIT)
+
#if ENABLE(JIT)
- StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
- if (!stubInfo || !stubInfo->seen)
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
-
- if (stubInfo->resetByGC)
- return GetByIdStatus(TakesSlowPath, true);
-
- PolymorphicAccessStructureList* list;
- int listSize;
- switch (stubInfo->accessType) {
- case access_get_by_id_self_list:
- list = stubInfo->u.getByIdSelfList.structureList;
- listSize = stubInfo->u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- list = stubInfo->u.getByIdProtoList.structureList;
- listSize = stubInfo->u.getByIdProtoList.listSize;
- break;
- default:
- list = 0;
- listSize = 0;
- break;
- }
- for (int i = 0; i < listSize; ++i) {
- if (!list->list[i].isDirect)
- return GetByIdStatus(MakesCalls, true);
+GetByIdStatus::GetByIdStatus(const ModuleNamespaceAccessCase& accessCase)
+ : m_state(ModuleNamespace)
+ , m_wasSeenInJIT(true)
+ , m_moduleNamespaceObject(accessCase.moduleNamespaceObject())
+ , m_moduleEnvironment(accessCase.moduleEnvironment())
+ , m_scopeOffset(accessCase.scopeOffset())
+{
+}
+
+GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return GetByIdStatus(NoInformation);
+
+ PolymorphicAccess* list = 0;
+ State slowPathState = TakesSlowPath;
+ if (stubInfo->cacheType == CacheType::Stub) {
+ list = stubInfo->u.stub;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
+ }
}
- // Next check if it takes slow case, in which case we want to be kind of careful.
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, true);
+ if (stubInfo->tookSlowPath)
+ return GetByIdStatus(slowPathState);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
+ result.m_state = Simple;
result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
- switch (stubInfo->accessType) {
- case access_unset:
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ return GetByIdStatus(NoInformation);
- case access_get_by_id_self: {
- Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
+ case CacheType::GetByIdSelf: {
+ Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
- unsigned attributesIgnored;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ return GetByIdStatus(slowPathState, true);
+ unsigned attributes;
+ GetByIdVariant variant;
+ variant.m_offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(variant.m_offset))
+ return GetByIdStatus(slowPathState, true);
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(slowPathState, true);
- if (isValidOffset(result.m_offset)) {
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
- }
-
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
+ variant.m_structureSet.add(structure);
+ bool didAppend = result.appendVariant(variant);
+ ASSERT_UNUSED(didAppend, didAppend);
+ return result;
}
- case access_get_by_id_self_list: {
- for (int i = 0; i < listSize; ++i) {
- ASSERT(list->list[i].isDirect);
-
- Structure* structure = list->list[i].base.get();
- if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
+ case CacheType::Stub: {
+ if (list->size() == 1) {
+ const AccessCase& access = list->at(0);
+ switch (access.type()) {
+ case AccessCase::ModuleNamespaceLoad:
+ return GetByIdStatus(access.as<ModuleNamespaceAccessCase>());
+ default:
+ break;
+ }
+ }
- if (result.m_structureSet.contains(structure))
- continue;
-
- unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset myOffset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+ const AccessCase& access = list->at(listIndex);
+ if (access.viaProxy())
+ return GetByIdStatus(slowPathState, true);
- if (!isValidOffset(myOffset)) {
- result.m_offset = invalidOffset;
- break;
+ Structure* structure = access.structure();
+ if (!structure) {
+ // The null structure cases arise due to array.length and string.length. We have no way
+ // of creating a GetByIdVariant for those, and we don't really have to since the DFG
+ // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
+ // shouldn't have to use value profiling to discover something that the AccessCase
+ // could have told us. But, it works well enough. So, our only concern here is to not
+ // crash on null structure.
+ return GetByIdStatus(slowPathState, true);
}
-
- if (!i) {
- result.m_offset = myOffset;
- result.m_specificValue = JSValue(specificValue);
- } else if (result.m_offset != myOffset) {
- result.m_offset = invalidOffset;
- break;
- } else if (result.m_specificValue != JSValue(specificValue))
- result.m_specificValue = JSValue();
- result.m_structureSet.add(structure);
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return GetByIdStatus(slowPathState, true);
+
+ case ComplexGetStatus::Inlineable: {
+ std::unique_ptr<CallLinkStatus> callLinkStatus;
+ JSFunction* intrinsicFunction = nullptr;
+ DOMJIT::GetterSetter* domJIT = nullptr;
+
+ switch (access.type()) {
+ case AccessCase::Load:
+ case AccessCase::GetGetter:
+ case AccessCase::Miss: {
+ break;
+ }
+ case AccessCase::IntrinsicGetter: {
+ intrinsicFunction = access.as<IntrinsicGetterAccessCase>().intrinsicFunction();
+ break;
+ }
+ case AccessCase::Getter: {
+ callLinkStatus = std::make_unique<CallLinkStatus>();
+ if (CallLinkInfo* callLinkInfo = access.as<GetterSetterAccessCase>().callLinkInfo()) {
+ *callLinkStatus = CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData);
+ }
+ break;
+ }
+ case AccessCase::CustomAccessorGetter: {
+ domJIT = access.as<GetterSetterAccessCase>().domJIT();
+ if (!domJIT)
+ return GetByIdStatus(slowPathState, true);
+ result.m_state = Custom;
+ break;
+ }
+ default: {
+ // FIXME: It would be totally sweet to support more of these at some point in the
+ // future. https://bugs.webkit.org/show_bug.cgi?id=133052
+ return GetByIdStatus(slowPathState, true);
+ } }
+
+ ASSERT((AccessCase::Miss == access.type()) == (access.offset() == invalidOffset));
+ GetByIdVariant variant(
+ StructureSet(structure), complexGetStatus.offset(),
+ complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
+ intrinsicFunction,
+ domJIT);
+
+ if (!result.appendVariant(variant))
+ return GetByIdStatus(slowPathState, true);
+
+ if (domJIT) {
+ // Give up when cutom accesses are not merged into one.
+ if (result.numVariants() != 1)
+ return GetByIdStatus(slowPathState, true);
+ } else {
+ // Give up when custom access and simple access are mixed.
+ if (result.m_state == Custom)
+ return GetByIdStatus(slowPathState, true);
+ }
+ break;
+ } }
}
-
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
- }
- case access_get_by_id_proto: {
- if (!stubInfo->u.getByIdProto.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdProto.baseObjectStructure.get(),
- stubInfo->u.getByIdProto.prototypeStructure.get()));
- computeForChain(result, profiledBlock, uid);
- break;
- }
-
- case access_get_by_id_chain: {
- if (!stubInfo->u.getByIdChain.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdChain.baseObjectStructure.get(),
- stubInfo->u.getByIdChain.chain.get(),
- stubInfo->u.getByIdChain.count));
- computeForChain(result, profiledBlock, uid);
- break;
+ return result;
}
default:
- ASSERT(!isValidOffset(result.m_offset));
- break;
+ return GetByIdStatus(slowPathState, true);
}
- if (!isValidOffset(result.m_offset)) {
- result.m_state = TakesSlowPath;
- result.m_structureSet.clear();
- result.m_chain.clear();
- result.m_specificValue = JSValue();
- } else
- result.m_state = Simple;
-
- return result;
-#else // ENABLE(JIT)
- UNUSED_PARAM(map);
- return GetByIdStatus(NoInformation, false);
+ RELEASE_ASSERT_NOT_REACHED();
+ return GetByIdStatus();
+}
#endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+ CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+ StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, profiledBlock, codeOrigin.bytecodeIndex);
+ }
+
+ GetByIdStatus result;
+ {
+ ConcurrentJSLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ if (result.takesSlowPath())
+ return result;
+
+ {
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+ if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(TakesSlowPath, true);
+ }
+
+ if (result.isSet())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
}
-GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
{
// For now we only handle the super simple self access case. We could handle the
// prototype case in the future.
- if (!structure)
- return GetByIdStatus(TakesSlowPath);
+ if (set.isEmpty())
+ return GetByIdStatus();
- if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
- return GetByIdStatus(TakesSlowPath);
-
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ if (parseIndex(*uid))
return GetByIdStatus(TakesSlowPath);
- if (!structure->propertyAccessesAreCacheable())
- return GetByIdStatus(TakesSlowPath);
-
GetByIdStatus result;
- result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
- unsigned attributes;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue);
- if (!isValidOffset(result.m_offset))
- return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
- if (attributes & Accessor)
- return GetByIdStatus(MakesCalls);
- if (structure->isDictionary())
- specificValue = 0;
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
result.m_state = Simple;
+ result.m_wasSeenInJIT = false;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GetByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(offset))
+ return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+ if (attributes & Accessor)
+ return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!result.appendVariant(GetByIdVariant(structure, offset)))
+ return GetByIdStatus(TakesSlowPath);
+ }
+
return result;
}
+bool GetByIdStatus::makesCalls() const
+{
+ switch (m_state) {
+ case NoInformation:
+ case TakesSlowPath:
+ case Custom:
+ case ModuleNamespace:
+ return false;
+ case Simple:
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].callLinkStatus())
+ return true;
+ }
+ return false;
+ case MakesCalls:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return false;
+}
+
+void GetByIdStatus::filter(const StructureSet& set)
+{
+ if (m_state != Simple)
+ return;
+
+ // FIXME: We could also filter the variants themselves.
+
+ m_variants.removeAllMatching(
+ [&] (GetByIdVariant& variant) -> bool {
+ return !variant.structureSet().overlaps(set);
+ });
+
+ if (m_variants.isEmpty())
+ m_state = NoInformation;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+ out.print("(");
+ switch (m_state) {
+ case NoInformation:
+ out.print("NoInformation");
+ break;
+ case Simple:
+ out.print("Simple");
+ break;
+ case Custom:
+ out.print("Custom");
+ break;
+ case ModuleNamespace:
+ out.print("ModuleNamespace");
+ break;
+ case TakesSlowPath:
+ out.print("TakesSlowPath");
+ break;
+ case MakesCalls:
+ out.print("MakesCalls");
+ break;
+ }
+ out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
index a1e801cca..de47bf5cc 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,85 +23,121 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GetByIdStatus_h
-#define GetByIdStatus_h
+#pragma once
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
-#include "StructureSet.h"
-#include "StructureStubInfo.h"
+#include "CallLinkStatus.h"
+#include "CodeOrigin.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
+#include "GetByIdVariant.h"
+#include "ScopeOffset.h"
namespace JSC {
+class AccessCase;
class CodeBlock;
+class JSModuleEnvironment;
+class JSModuleNamespaceObject;
+class ModuleNamespaceAccessCase;
+class StructureStubInfo;
+
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
class GetByIdStatus {
public:
enum State {
- NoInformation, // It's uncached so we have no information.
- Simple, // It's cached for a simple access to a known object property with
- // a possible structure chain and a possible specific value.
- TakesSlowPath, // It's known to often take slow path.
- MakesCalls // It's known to take paths that make calls.
+ // It's uncached so we have no information.
+ NoInformation,
+ // It's cached for a simple access to a known object property with
+ // a possible structure chain and a possible specific value.
+ Simple,
+ // It's cached for a custom accessor with a possible structure chain.
+ Custom,
+ // It's cached for an access to a module namespace object's binding.
+ ModuleNamespace,
+ // It's known to often take slow path.
+ TakesSlowPath,
+ // It's known to take paths that make calls.
+ MakesCalls,
};
GetByIdStatus()
: m_state(NoInformation)
- , m_offset(invalidOffset)
{
}
explicit GetByIdStatus(State state)
: m_state(state)
- , m_offset(invalidOffset)
{
ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls);
}
+
GetByIdStatus(
- State state, bool wasSeenInJIT, const StructureSet& structureSet = StructureSet(),
- PropertyOffset offset = invalidOffset, JSValue specificValue = JSValue(), PassRefPtr<IntendedStructureChain> chain = nullptr)
+ State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant())
: m_state(state)
- , m_structureSet(structureSet)
- , m_chain(chain)
- , m_specificValue(specificValue)
- , m_offset(offset)
, m_wasSeenInJIT(wasSeenInJIT)
{
- ASSERT((state == Simple) == (offset != invalidOffset));
+ ASSERT((state == Simple || state == Custom) == variant.isSet());
+ m_variants.append(variant);
}
- static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
- static GetByIdStatus computeFor(VM&, Structure*, StringImpl* uid);
+ static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+ static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid);
+ static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(DFG_JIT)
+ static GetByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
+
State state() const { return m_state; }
bool isSet() const { return m_state != NoInformation; }
bool operator!() const { return !isSet(); }
bool isSimple() const { return m_state == Simple; }
- bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
- bool makesCalls() const { return m_state == MakesCalls; }
-
- const StructureSet& structureSet() const { return m_structureSet; }
- IntendedStructureChain* chain() const { return const_cast<IntendedStructureChain*>(m_chain.get()); } // Returns null if this is a direct access.
- JSValue specificValue() const { return m_specificValue; } // Returns JSValue() if there is no specific value.
- PropertyOffset offset() const { return m_offset; }
+ bool isCustom() const { return m_state == Custom; }
+ bool isModuleNamespace() const { return m_state == ModuleNamespace; }
+
+ size_t numVariants() const { return m_variants.size(); }
+ const Vector<GetByIdVariant, 1>& variants() const { return m_variants; }
+ const GetByIdVariant& at(size_t index) const { return m_variants[index]; }
+ const GetByIdVariant& operator[](size_t index) const { return at(index); }
+
+ bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls || m_state == Custom || m_state == ModuleNamespace; }
+ bool makesCalls() const;
bool wasSeenInJIT() const { return m_wasSeenInJIT; }
+ // Attempts to reduce the set of variants to fit the given structure set. This may be approximate.
+ void filter(const StructureSet&);
+
+ JSModuleNamespaceObject* moduleNamespaceObject() const { return m_moduleNamespaceObject; }
+ JSModuleEnvironment* moduleEnvironment() const { return m_moduleEnvironment; }
+ ScopeOffset scopeOffset() const { return m_scopeOffset; }
+
+ void dump(PrintStream&) const;
+
private:
- static void computeForChain(GetByIdStatus& result, CodeBlock*, StringImpl* uid);
- static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+ static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+ GetByIdStatus(const ModuleNamespaceAccessCase&);
+ static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback(
+ const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*,
+ UniquedStringImpl* uid, CallLinkStatus::ExitSiteData);
+#endif
+ static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+
+ bool appendVariant(const GetByIdVariant&);
State m_state;
- StructureSet m_structureSet;
- RefPtr<IntendedStructureChain> m_chain;
- JSValue m_specificValue;
- PropertyOffset m_offset;
+ Vector<GetByIdVariant, 1> m_variants;
bool m_wasSeenInJIT;
+ JSModuleNamespaceObject* m_moduleNamespaceObject { nullptr };
+ JSModuleEnvironment* m_moduleEnvironment { nullptr };
+ ScopeOffset m_scopeOffset { };
};
} // namespace JSC
-
-#endif // PropertyAccessStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
new file mode 100644
index 000000000..d940b62ca
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GetByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+GetByIdVariant::GetByIdVariant(
+ const StructureSet& structureSet, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet,
+ std::unique_ptr<CallLinkStatus> callLinkStatus,
+ JSFunction* intrinsicFunction,
+ DOMJIT::GetterSetter* domJIT)
+ : m_structureSet(structureSet)
+ , m_conditionSet(conditionSet)
+ , m_offset(offset)
+ , m_callLinkStatus(WTFMove(callLinkStatus))
+ , m_intrinsicFunction(intrinsicFunction)
+ , m_domJIT(domJIT)
+{
+ if (!structureSet.size()) {
+ ASSERT(offset == invalidOffset);
+ ASSERT(conditionSet.isEmpty());
+ }
+ if (intrinsicFunction)
+ ASSERT(intrinsic() != NoIntrinsic);
+}
+
+GetByIdVariant::~GetByIdVariant() { }
+
+GetByIdVariant::GetByIdVariant(const GetByIdVariant& other)
+ : GetByIdVariant()
+{
+ *this = other;
+}
+
+GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other)
+{
+ m_structureSet = other.m_structureSet;
+ m_conditionSet = other.m_conditionSet;
+ m_offset = other.m_offset;
+ m_intrinsicFunction = other.m_intrinsicFunction;
+ m_domJIT = other.m_domJIT;
+ if (other.m_callLinkStatus)
+ m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus);
+ else
+ m_callLinkStatus = nullptr;
+ return *this;
+}
+
+inline bool GetByIdVariant::canMergeIntrinsicStructures(const GetByIdVariant& other) const
+{
+ if (m_intrinsicFunction != other.m_intrinsicFunction)
+ return false;
+ switch (intrinsic()) {
+ case TypedArrayByteLengthIntrinsic: {
+ // We can merge these sets as long as the element size of the two sets is the same.
+ TypedArrayType thisType = (*m_structureSet.begin())->classInfo()->typedArrayStorageType;
+ TypedArrayType otherType = (*other.m_structureSet.begin())->classInfo()->typedArrayStorageType;
+
+ ASSERT(isTypedView(thisType) && isTypedView(otherType));
+
+ return logElementSize(thisType) == logElementSize(otherType);
+ }
+
+ default:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other)
+{
+ if (m_offset != other.m_offset)
+ return false;
+ if (m_callLinkStatus || other.m_callLinkStatus)
+ return false;
+
+ if (!canMergeIntrinsicStructures(other))
+ return false;
+
+ if (m_domJIT != other.m_domJIT)
+ return false;
+
+ if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty())
+ return false;
+
+ ObjectPropertyConditionSet mergedConditionSet;
+ if (!m_conditionSet.isEmpty()) {
+ mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet);
+ if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition())
+ return false;
+ }
+ m_conditionSet = mergedConditionSet;
+
+ m_structureSet.merge(other.m_structureSet);
+
+ return true;
+}
+
+void GetByIdVariant::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!isSet()) {
+ out.print("<empty>");
+ return;
+ }
+
+ out.print(
+ "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context));
+ out.print(", offset = ", offset());
+ if (m_callLinkStatus)
+ out.print(", call = ", *m_callLinkStatus);
+ if (m_intrinsicFunction)
+ out.print(", intrinsic = ", *m_intrinsicFunction);
+ if (m_domJIT)
+ out.print(", domjit = ", RawPointer(m_domJIT));
+ out.print(">");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.h b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
new file mode 100644
index 000000000..8ded24867
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CallLinkStatus.h"
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CallLinkStatus;
+class GetByIdStatus;
+struct DumpContext;
+
+class GetByIdVariant {
+public:
+ GetByIdVariant(
+ const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+ std::unique_ptr<CallLinkStatus> = nullptr,
+ JSFunction* = nullptr,
+ DOMJIT::GetterSetter* = nullptr);
+
+ ~GetByIdVariant();
+
+ GetByIdVariant(const GetByIdVariant&);
+ GetByIdVariant& operator=(const GetByIdVariant&);
+
+ bool isSet() const { return !!m_structureSet.size(); }
+ bool operator!() const { return !isSet(); }
+ const StructureSet& structureSet() const { return m_structureSet; }
+ StructureSet& structureSet() { return m_structureSet; }
+
+ // A non-empty condition set means that this is a prototype load.
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+ PropertyOffset offset() const { return m_offset; }
+ CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); }
+ JSFunction* intrinsicFunction() const { return m_intrinsicFunction; }
+ Intrinsic intrinsic() const { return m_intrinsicFunction ? m_intrinsicFunction->intrinsic() : NoIntrinsic; }
+ DOMJIT::GetterSetter* domJIT() const { return m_domJIT; }
+
+ bool isPropertyUnset() const { return offset() == invalidOffset; }
+
+ bool attemptToMerge(const GetByIdVariant& other);
+
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+ friend class GetByIdStatus;
+
+ bool canMergeIntrinsicStructures(const GetByIdVariant&) const;
+
+ StructureSet m_structureSet;
+ ObjectPropertyConditionSet m_conditionSet;
+ PropertyOffset m_offset;
+ std::unique_ptr<CallLinkStatus> m_callLinkStatus;
+ JSFunction* m_intrinsicFunction;
+ DOMJIT::GetterSetter* m_domJIT;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp
new file mode 100644
index 000000000..9b6bccc29
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GetterSetterAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "DOMJITAccessCasePatchpointParams.h"
+#include "DOMJITCallDOMGetterPatchpoint.h"
+#include "DOMJITGetterSetter.h"
+#include "HeapInlines.h"
+#include "JSCJSValueInlines.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+static const bool verbose = false;
+
+GetterSetterAccessCase::GetterSetterAccessCase(VM& vm, JSCell* owner, AccessType accessType, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet, JSObject* customSlotBase)
+ : Base(vm, owner, accessType, offset, structure, conditionSet, viaProxy, additionalSet)
+{
+ m_customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+}
+
+
+std::unique_ptr<AccessCase> GetterSetterAccessCase::create(
+ VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
+ const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
+ PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase, DOMJIT::GetterSetter* domJIT)
+{
+ switch (type) {
+ case Getter:
+ case CustomAccessorGetter:
+ case CustomValueGetter:
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ };
+
+ std::unique_ptr<GetterSetterAccessCase> result(new GetterSetterAccessCase(vm, owner, type, offset, structure, conditionSet, viaProxy, additionalSet, customSlotBase));
+ result->m_domJIT = domJIT;
+ result->m_customAccessor.getter = customGetter;
+ return WTFMove(result);
+}
+
+std::unique_ptr<AccessCase> GetterSetterAccessCase::create(VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
+ JSObject* customSlotBase)
+{
+ ASSERT(type == Setter || type == CustomValueSetter || type == CustomAccessorSetter);
+ std::unique_ptr<GetterSetterAccessCase> result(new GetterSetterAccessCase(vm, owner, type, offset, structure, conditionSet, false, nullptr, customSlotBase));
+ result->m_customAccessor.setter = customSetter;
+ return WTFMove(result);
+}
+
+
+GetterSetterAccessCase::~GetterSetterAccessCase()
+{
+}
+
+
+GetterSetterAccessCase::GetterSetterAccessCase(const GetterSetterAccessCase& other)
+ : Base(other)
+ , m_customSlotBase(other.m_customSlotBase)
+{
+ m_customAccessor.opaque = other.m_customAccessor.opaque;
+ m_domJIT = other.m_domJIT;
+}
+
+std::unique_ptr<AccessCase> GetterSetterAccessCase::clone() const
+{
+ std::unique_ptr<GetterSetterAccessCase> result(new GetterSetterAccessCase(*this));
+ result->resetState();
+ return WTFMove(result);
+}
+
+JSObject* GetterSetterAccessCase::alternateBase() const
+{
+ if (customSlotBase())
+ return customSlotBase();
+ return conditionSet().slotBaseCondition().object();
+}
+
+void GetterSetterAccessCase::dumpImpl(PrintStream& out, CommaPrinter& comma) const
+{
+ Base::dumpImpl(out, comma);
+ out.print(comma, "customSlotBase = ", RawPointer(customSlotBase()));
+ if (callLinkInfo())
+ out.print(comma, "callLinkInfo = ", RawPointer(callLinkInfo()));
+ out.print(comma, "customAccessor = ", RawPointer(m_customAccessor.opaque));
+}
+
+void GetterSetterAccessCase::emitDOMJITGetter(AccessGenerationState& state, GPRReg baseForGetGPR)
+{
+ CCallHelpers& jit = *state.jit;
+ StructureStubInfo& stubInfo = *state.stubInfo;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg scratchGPR = state.scratchGPR;
+
+ // We construct the environment that can execute the DOMJIT::Patchpoint here.
+ Ref<DOMJIT::CallDOMGetterPatchpoint> patchpoint = domJIT()->callDOMGetter();
+
+ Vector<GPRReg> gpScratch;
+ Vector<FPRReg> fpScratch;
+ Vector<DOMJIT::Value> regs;
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+#endif
+ allocator.lock(valueRegs);
+ allocator.lock(scratchGPR);
+
+ GPRReg paramBaseGPR = InvalidGPRReg;
+ GPRReg paramGlobalObjectGPR = InvalidGPRReg;
+ JSValueRegs paramValueRegs = valueRegs;
+ GPRReg remainingScratchGPR = InvalidGPRReg;
+
+ // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
+ // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
+ // DOMJIT::Patchpoint, DOMJIT::Patchpoint assumes that result registers always early interfere with input registers, in this case,
+ // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
+ if (baseForGetGPR != valueRegs.payloadGPR()) {
+ paramBaseGPR = baseForGetGPR;
+ if (!patchpoint->requireGlobalObject)
+ remainingScratchGPR = scratchGPR;
+ else
+ paramGlobalObjectGPR = scratchGPR;
+ } else {
+ jit.move(valueRegs.payloadGPR(), scratchGPR);
+ paramBaseGPR = scratchGPR;
+ if (patchpoint->requireGlobalObject)
+ paramGlobalObjectGPR = allocator.allocateScratchGPR();
+ }
+
+ JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
+
+ regs.append(paramValueRegs);
+ regs.append(paramBaseGPR);
+ if (patchpoint->requireGlobalObject) {
+ ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
+ regs.append(DOMJIT::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
+ }
+
+ if (patchpoint->numGPScratchRegisters) {
+ unsigned i = 0;
+ if (remainingScratchGPR != InvalidGPRReg) {
+ gpScratch.append(remainingScratchGPR);
+ ++i;
+ }
+ for (; i < patchpoint->numGPScratchRegisters; ++i)
+ gpScratch.append(allocator.allocateScratchGPR());
+ }
+
+ for (unsigned i = 0; i < patchpoint->numFPScratchRegisters; ++i)
+ fpScratch.append(allocator.allocateScratchFPR());
+
+ // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
+ ScratchRegisterAllocator::PreservedState preservedState =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+ if (verbose) {
+ dataLog("baseGPR = ", baseGPR, "\n");
+ dataLog("valueRegs = ", valueRegs, "\n");
+ dataLog("scratchGPR = ", scratchGPR, "\n");
+ dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
+ if (paramGlobalObjectGPR != InvalidGPRReg)
+ dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
+ dataLog("paramValueRegs = ", paramValueRegs, "\n");
+ for (unsigned i = 0; i < patchpoint->numGPScratchRegisters; ++i)
+ dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
+ }
+
+ if (patchpoint->requireGlobalObject)
+ jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
+
+ // We just spill the registers used in DOMJIT::Patchpoint here. For not spilled registers here explicitly,
+ // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
+ // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
+ // same to valueRegs, and not include it in the used registers since it will be changed.
+ RegisterSet registersToSpillForCCall;
+ for (auto& value : regs) {
+ DOMJIT::Reg reg = value.reg();
+ if (reg.isJSValueRegs())
+ registersToSpillForCCall.set(reg.jsValueRegs());
+ else if (reg.isGPR())
+ registersToSpillForCCall.set(reg.gpr());
+ else
+ registersToSpillForCCall.set(reg.fpr());
+ }
+ for (GPRReg reg : gpScratch)
+ registersToSpillForCCall.set(reg);
+ for (FPRReg reg : fpScratch)
+ registersToSpillForCCall.set(reg);
+ registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
+
+ DOMJITAccessCasePatchpointParams params(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
+ patchpoint->generator()->run(jit, params);
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ state.succeed();
+
+ CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
+ if (!exceptions.empty()) {
+ exceptions.link(&jit);
+ allocator.restoreReusedRegistersByPopping(jit, preservedState);
+ state.emitExplicitExceptionHandler();
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h
new file mode 100644
index 000000000..06192dac2
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "ProxyableAccessCase.h"
+
+namespace JSC {
+
+class GetterSetterAccessCase : public ProxyableAccessCase {
+public:
+ typedef ProxyableAccessCase Base;
+ friend class AccessCase;
+
+ // This can return null if it hasn't been generated yet. That's
+ // actually somewhat likely because of how we do buffering of new cases.
+ CallLinkInfo* callLinkInfo() const { return m_callLinkInfo.get(); }
+ JSObject* customSlotBase() const { return m_customSlotBase.get(); }
+ DOMJIT::GetterSetter* domJIT() const { return m_domJIT; }
+
+ JSObject* alternateBase() const override;
+
+ void emitDOMJITGetter(AccessGenerationState&, GPRReg baseForGetGPR);
+
+ static std::unique_ptr<AccessCase> create(
+ VM&, JSCell* owner, AccessType, PropertyOffset, Structure*,
+ const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+ bool viaProxy = false,
+ WatchpointSet* additionalSet = nullptr,
+ PropertySlot::GetValueFunc = nullptr,
+ JSObject* customSlotBase = nullptr,
+ DOMJIT::GetterSetter* = nullptr);
+
+ static std::unique_ptr<AccessCase> create(VM&, JSCell* owner, AccessType, Structure*, PropertyOffset,
+ const ObjectPropertyConditionSet&, PutPropertySlot::PutValueFunc = nullptr,
+ JSObject* customSlotBase = nullptr);
+
+ void dumpImpl(PrintStream&, CommaPrinter&) const override;
+ std::unique_ptr<AccessCase> clone() const override;
+
+ ~GetterSetterAccessCase();
+
+private:
+ GetterSetterAccessCase(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, bool viaProxy, WatchpointSet* additionalSet, JSObject* customSlotBase);
+
+ GetterSetterAccessCase(const GetterSetterAccessCase&);
+
+ WriteBarrier<JSObject> m_customSlotBase;
+ std::unique_ptr<CallLinkInfo> m_callLinkInfo;
+ union {
+ PutPropertySlot::PutValueFunc setter;
+ PropertySlot::GetValueFunc getter;
+ void* opaque;
+ } m_customAccessor;
+ DOMJIT::GetterSetter* m_domJIT;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h b/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h
new file mode 100644
index 000000000..aa29cca33
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/GlobalCodeBlock.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+// Program code is not marked by any function, so we make the global object
+// responsible for marking it.
+
+class GlobalCodeBlock : public CodeBlock {
+ typedef CodeBlock Base;
+
+protected:
+ GlobalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, GlobalCodeBlock& other)
+ : CodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ GlobalCodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), sourceOffset, firstLineColumnOffset)
+ {
+ }
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/HandlerInfo.h b/Source/JavaScriptCore/bytecode/HandlerInfo.h
index 8396c9607..752defe8a 100644
--- a/Source/JavaScriptCore/bytecode/HandlerInfo.h
+++ b/Source/JavaScriptCore/bytecode/HandlerInfo.h
@@ -23,25 +23,100 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HandlerInfo_h
-#define HandlerInfo_h
+#pragma once
#include "CodeLocation.h"
-#include <wtf/Platform.h>
+#include <wtf/Vector.h>
namespace JSC {
-struct HandlerInfo {
+enum class HandlerType {
+ Catch = 0,
+ Finally = 1,
+ SynthesizedCatch = 2,
+ SynthesizedFinally = 3
+};
+
+enum class RequiredHandler {
+ CatchHandler,
+ AnyHandler
+};
+
+struct HandlerInfoBase {
+ HandlerType type() const { return static_cast<HandlerType>(typeBits); }
+ void setType(HandlerType type) { typeBits = static_cast<uint32_t>(type); }
+
+ const char* typeName()
+ {
+ switch (type()) {
+ case HandlerType::Catch:
+ return "catch";
+ case HandlerType::Finally:
+ return "finally";
+ case HandlerType::SynthesizedCatch:
+ return "synthesized catch";
+ case HandlerType::SynthesizedFinally:
+ return "synthesized finally";
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ return nullptr;
+ }
+
+ bool isCatchHandler() const { return type() == HandlerType::Catch; }
+
+ template<typename Handler>
+ static Handler* handlerForIndex(Vector<Handler>& exeptionHandlers, unsigned index, RequiredHandler requiredHandler)
+ {
+ for (Handler& handler : exeptionHandlers) {
+ if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
+ continue;
+
+ // Handlers are ordered innermost first, so the first handler we encounter
+ // that contains the source address is the correct handler to use.
+ // This index used is either the BytecodeOffset or a CallSiteIndex.
+ if (handler.start <= index && handler.end > index)
+ return &handler;
+ }
+
+ return nullptr;
+ }
+
uint32_t start;
uint32_t end;
uint32_t target;
- uint32_t scopeDepth;
+ uint32_t typeBits : 2; // HandlerType
+};
+
+struct UnlinkedHandlerInfo : public HandlerInfoBase {
+ UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType)
+ {
+ this->start = start;
+ this->end = end;
+ this->target = target;
+ setType(handlerType);
+ ASSERT(type() == handlerType);
+ }
+};
+
+struct HandlerInfo : public HandlerInfoBase {
+ void initialize(const UnlinkedHandlerInfo& unlinkedInfo)
+ {
+ start = unlinkedInfo.start;
+ end = unlinkedInfo.end;
+ target = unlinkedInfo.target;
+ typeBits = unlinkedInfo.typeBits;
+ }
+
#if ENABLE(JIT)
+ void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label)
+ {
+ initialize(unlinkedInfo);
+ nativeCode = label;
+ }
+
CodeLocationLabel nativeCode;
#endif
};
} // namespace JSC
-
-#endif // HandlerInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/InlineAccess.cpp b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
new file mode 100644
index 000000000..667492ac3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineAccess.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "InlineAccess.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JSArray.h"
+#include "JSCellInlines.h"
+#include "LinkBuffer.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include "StructureStubInfo.h"
+#include "VM.h"
+
+namespace JSC {
+
+void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
+{
+ GPRReg base = GPRInfo::regT0;
+ GPRReg value = GPRInfo::regT1;
+#if USE(JSVALUE32_64)
+ JSValueRegs regs(base, value);
+#else
+ JSValueRegs regs(base);
+#endif
+
+ {
+ CCallHelpers jit(&vm);
+
+ GPRReg scratchGPR = value;
+ jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
+ jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
+ jit.patchableBranch32(
+ CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
+ jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
+ jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
+ jit.boxInt32(scratchGPR, regs);
+
+ dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
+ }
+
+ {
+ CCallHelpers jit(&vm);
+
+ jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(0x000ab21ca));
+ jit.loadPtr(
+ CCallHelpers::Address(base, JSObject::butterflyOffset()),
+ value);
+ GPRReg storageGPR = value;
+ jit.loadValue(
+ CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
+
+ dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+ }
+
+ {
+ CCallHelpers jit(&vm);
+
+ jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(0x000ab21ca));
+ jit.loadValue(
+ MacroAssembler::Address(base, 0x000ab21ca), regs);
+
+ dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+ }
+
+ {
+ CCallHelpers jit(&vm);
+
+ jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(0x000ab21ca));
+
+ jit.storeValue(
+ regs, MacroAssembler::Address(base, 0x000ab21ca));
+
+ dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+ }
+
+ {
+ CCallHelpers jit(&vm);
+
+ jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(0x000ab21ca));
+
+ jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
+ jit.storeValue(
+ regs,
+ MacroAssembler::Address(base, 120342));
+
+ dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+ }
+
+ CRASH();
+}
+
+
+template <typename Function>
+ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
+{
+ if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
+ bool needsBranchCompaction = false;
+ LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
+ ASSERT(linkBuffer.isValid());
+ function(linkBuffer);
+ FINALIZE_CODE(linkBuffer, ("InlineAccessType: '%s'", name));
+ return true;
+ }
+
+ // This is helpful when determining the size for inline ICs on various
+ // platforms. You want to choose a size that usually succeeds, but sometimes
+ // there may be variability in the length of the code we generate just because
+ // of randomness. It's helpful to flip this on when running tests or browsing
+ // the web just to see how often it fails. You don't want an IC size that always fails.
+ const bool failIfCantInline = false;
+ if (failIfCantInline) {
+ dataLog("Failure for: ", name, "\n");
+ dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
+ CRASH();
+ }
+
+ return false;
+}
+
+bool InlineAccess::generateSelfPropertyAccess(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+ CCallHelpers jit(&vm);
+
+ GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ JSValueRegs value = stubInfo.valueRegs();
+
+ auto branchToSlowPath = jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
+ GPRReg storage;
+ if (isInlineOffset(offset))
+ storage = base;
+ else {
+ jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+ storage = value.payloadGPR();
+ }
+
+ jit.loadValue(
+ MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
+
+ bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+ });
+ return linkedCodeInline;
+}
+
+ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
+{
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseGPR));
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueGPR));
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueTagGPR));
+#endif
+ GPRReg scratch = allocator.allocateScratchGPR();
+ if (allocator.didReuseRegisters())
+ return InvalidGPRReg;
+ return scratch;
+}
+
+ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
+{
+ return getScratchRegister(stubInfo) != InvalidGPRReg;
+}
+
+bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
+{
+ if (isInlineOffset(offset))
+ return true;
+
+ return hasFreeRegister(stubInfo);
+}
+
+bool InlineAccess::generateSelfPropertyReplace(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+ ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
+
+ CCallHelpers jit(&vm);
+
+ GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ JSValueRegs value = stubInfo.valueRegs();
+
+ auto branchToSlowPath = jit.patchableBranch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(base, JSCell::structureIDOffset()),
+ MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
+
+ GPRReg storage;
+ if (isInlineOffset(offset))
+ storage = base;
+ else {
+ storage = getScratchRegister(stubInfo);
+ ASSERT(storage != InvalidGPRReg);
+ jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
+ }
+
+ jit.storeValue(
+ value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
+
+ bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+ });
+ return linkedCodeInline;
+}
+
+bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
+{
+ ASSERT(array->indexingType() & IsArray);
+
+ if (!hasFreeRegister(stubInfo))
+ return false;
+
+ return array->indexingType() == ArrayWithInt32
+ || array->indexingType() == ArrayWithDouble
+ || array->indexingType() == ArrayWithContiguous;
+}
+
+bool InlineAccess::generateArrayLength(VM& vm, StructureStubInfo& stubInfo, JSArray* array)
+{
+ ASSERT(isCacheableArrayLength(stubInfo, array));
+
+ CCallHelpers jit(&vm);
+
+ GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ JSValueRegs value = stubInfo.valueRegs();
+ GPRReg scratch = getScratchRegister(stubInfo);
+
+ jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
+ jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), scratch);
+ auto branchToSlowPath = jit.patchableBranch32(
+ CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
+ jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+ jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
+ jit.boxInt32(value.payloadGPR(), value);
+
+ bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+ });
+ return linkedCodeInline;
+}
+
+void InlineAccess::rewireStubAsJump(VM& vm, StructureStubInfo& stubInfo, CodeLocationLabel target)
+{
+ CCallHelpers jit(&vm);
+
+ auto jump = jit.jump();
+
+ // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
+ bool needsBranchCompaction = false;
+ LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+ RELEASE_ASSERT(linkBuffer.isValid());
+ linkBuffer.link(jump, target);
+
+ FINALIZE_CODE(linkBuffer, ("InlineAccess: linking constant jump"));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/InlineAccess.h b/Source/JavaScriptCore/bytecode/InlineAccess.h
new file mode 100644
index 000000000..3910c5b3b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineAccess.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CodeLocation.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class JSArray;
+class Structure;
+class StructureStubInfo;
+class VM;
+
+class InlineAccess {
+public:
+
+ // This is the maximum between inline and out of line self access cases.
+ static constexpr size_t sizeForPropertyAccess()
+ {
+#if CPU(X86_64)
+ return 23;
+#elif CPU(X86)
+ return 27;
+#elif CPU(ARM64)
+ return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+ return 48;
+#else
+ return 52;
+#endif
+#else
+#error "unsupported platform"
+#endif
+ }
+
+ // This is the maximum between inline and out of line property replace cases.
+ static constexpr size_t sizeForPropertyReplace()
+ {
+#if CPU(X86_64)
+ return 23;
+#elif CPU(X86)
+ return 27;
+#elif CPU(ARM64)
+ return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+ return 48;
+#else
+ return 48;
+#endif
+#else
+#error "unsupported platform"
+#endif
+ }
+
+ // FIXME: Make this constexpr when GCC is able to compile std::max() inside a constexpr function.
+ // https://bugs.webkit.org/show_bug.cgi?id=159436
+ //
+ // This is the maximum between the size for array length access, and the size for regular self access.
+ ALWAYS_INLINE static size_t sizeForLengthAccess()
+ {
+#if CPU(X86_64)
+ size_t size = 26;
+#elif CPU(X86)
+ size_t size = 27;
+#elif CPU(ARM64)
+ size_t size = 32;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+ size_t size = 30;
+#else
+ size_t size = 32;
+#endif
+#else
+#error "unsupported platform"
+#endif
+ return std::max(size, sizeForPropertyAccess());
+ }
+
+ static bool generateSelfPropertyAccess(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+ static bool canGenerateSelfPropertyReplace(StructureStubInfo&, PropertyOffset);
+ static bool generateSelfPropertyReplace(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+ static bool isCacheableArrayLength(StructureStubInfo&, JSArray*);
+ static bool generateArrayLength(VM&, StructureStubInfo&, JSArray*);
+ static void rewireStubAsJump(VM&, StructureStubInfo&, CodeLocationLabel);
+
+ // This is helpful when determining the size of an IC on
+ // various platforms. When adding a new type of IC, implement
+ // its placeholder code here, and log the size. That way we
+ // can intelligently choose sizes on various platforms.
+ NO_RETURN_DUE_TO_CRASH static void dumpCacheSizesAndCrash(VM&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
new file mode 100644
index 000000000..97ce84d63
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "InlineCallFrame.h"
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JSFunction* InlineCallFrame::calleeConstant() const
+{
+ if (calleeRecovery.isConstant())
+ return jsCast<JSFunction*>(calleeRecovery.constant());
+ return nullptr;
+}
+
+JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
+{
+ return jsCast<JSFunction*>(calleeRecovery.recover(exec));
+}
+
+CodeBlockHash InlineCallFrame::hash() const
+{
+ return baselineCodeBlock->hash();
+}
+
+CString InlineCallFrame::hashAsStringIfPossible() const
+{
+ return baselineCodeBlock->hashAsStringIfPossible();
+}
+
+CString InlineCallFrame::inferredName() const
+{
+ return jsCast<FunctionExecutable*>(baselineCodeBlock->ownerExecutable())->inferredName().utf8();
+}
+
+void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
+{
+ out.print(inferredName(), "#", hashAsStringIfPossible());
+}
+
+void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
+ if (isStrictMode())
+ out.print(" (StrictMode)");
+ out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast<Kind>(kind));
+ if (isClosureCall)
+ out.print(", closure call");
+ else
+ out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
+ out.print(", numArgs+this = ", arguments.size());
+ out.print(", stackOffset = ", stackOffset);
+ out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
+}
+
+void InlineCallFrame::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind)
+{
+ switch (kind) {
+ case JSC::InlineCallFrame::Call:
+ out.print("Call");
+ return;
+ case JSC::InlineCallFrame::Construct:
+ out.print("Construct");
+ return;
+ case JSC::InlineCallFrame::TailCall:
+ out.print("TailCall");
+ return;
+ case JSC::InlineCallFrame::CallVarargs:
+ out.print("CallVarargs");
+ return;
+ case JSC::InlineCallFrame::ConstructVarargs:
+ out.print("ConstructVarargs");
+ return;
+ case JSC::InlineCallFrame::TailCallVarargs:
+ out.print("TailCallVarargs");
+ return;
+ case JSC::InlineCallFrame::GetterCall:
+ out.print("GetterCall");
+ return;
+ case JSC::InlineCallFrame::SetterCall:
+ out.print("SetterCall");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrame.h b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
new file mode 100644
index 000000000..cd2a5fe11
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrame.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeBlockHash.h"
+#include "CodeOrigin.h"
+#include "ValueRecovery.h"
+#include "WriteBarrier.h"
+#include <wtf/HashMap.h>
+#include <wtf/PrintStream.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct InlineCallFrame;
+class ExecState;
+class JSFunction;
+
+struct InlineCallFrame {
+ enum Kind {
+ Call,
+ Construct,
+ TailCall,
+ CallVarargs,
+ ConstructVarargs,
+ TailCallVarargs,
+
+ // For these, the stackOffset incorporates the argument count plus the true return PC
+ // slot.
+ GetterCall,
+ SetterCall
+ };
+
+ static CallMode callModeFor(Kind kind)
+ {
+ switch (kind) {
+ case Call:
+ case CallVarargs:
+ case GetterCall:
+ case SetterCall:
+ return CallMode::Regular;
+ case TailCall:
+ case TailCallVarargs:
+ return CallMode::Tail;
+ case Construct:
+ case ConstructVarargs:
+ return CallMode::Construct;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static Kind kindFor(CallMode callMode)
+ {
+ switch (callMode) {
+ case CallMode::Regular:
+ return Call;
+ case CallMode::Construct:
+ return Construct;
+ case CallMode::Tail:
+ return TailCall;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static Kind varargsKindFor(CallMode callMode)
+ {
+ switch (callMode) {
+ case CallMode::Regular:
+ return CallVarargs;
+ case CallMode::Construct:
+ return ConstructVarargs;
+ case CallMode::Tail:
+ return TailCallVarargs;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static CodeSpecializationKind specializationKindFor(Kind kind)
+ {
+ switch (kind) {
+ case Call:
+ case CallVarargs:
+ case TailCall:
+ case TailCallVarargs:
+ case GetterCall:
+ case SetterCall:
+ return CodeForCall;
+ case Construct:
+ case ConstructVarargs:
+ return CodeForConstruct;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static bool isVarargs(Kind kind)
+ {
+ switch (kind) {
+ case CallVarargs:
+ case TailCallVarargs:
+ case ConstructVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isTail(Kind kind)
+ {
+ switch (kind) {
+ case TailCall:
+ case TailCallVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool isTail() const
+ {
+ return isTail(static_cast<Kind>(kind));
+ }
+
+ static CodeOrigin* computeCallerSkippingTailCalls(InlineCallFrame* inlineCallFrame, Kind* callerCallKind = nullptr)
+ {
+ CodeOrigin* codeOrigin;
+ bool tailCallee;
+ int callKind;
+ do {
+ tailCallee = inlineCallFrame->isTail();
+ callKind = inlineCallFrame->kind;
+ codeOrigin = &inlineCallFrame->directCaller;
+ inlineCallFrame = codeOrigin->inlineCallFrame;
+ } while (inlineCallFrame && tailCallee);
+
+ if (tailCallee)
+ return nullptr;
+
+ if (callerCallKind)
+ *callerCallKind = static_cast<Kind>(callKind);
+
+ return codeOrigin;
+ }
+
+ CodeOrigin* getCallerSkippingTailCalls(Kind* callerCallKind = nullptr)
+ {
+ return computeCallerSkippingTailCalls(this, callerCallKind);
+ }
+
+ InlineCallFrame* getCallerInlineFrameSkippingTailCalls()
+ {
+ CodeOrigin* caller = getCallerSkippingTailCalls();
+ return caller ? caller->inlineCallFrame : nullptr;
+ }
+
+ Vector<ValueRecovery> arguments; // Includes 'this'.
+ WriteBarrier<CodeBlock> baselineCodeBlock;
+ ValueRecovery calleeRecovery;
+ CodeOrigin directCaller;
+
+ signed stackOffset : 28;
+ unsigned kind : 3; // real type is Kind
+ bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
+ VirtualRegister argumentCountRegister; // Only set when we inline a varargs call.
+
+ // There is really no good notion of a "default" set of values for
+ // InlineCallFrame's fields. This constructor is here just to reduce confusion if
+ // we forgot to initialize explicitly.
+ InlineCallFrame()
+ : stackOffset(0)
+ , kind(Call)
+ , isClosureCall(false)
+ {
+ }
+
+ bool isVarargs() const
+ {
+ return isVarargs(static_cast<Kind>(kind));
+ }
+
+ CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast<Kind>(kind)); }
+
+ JSFunction* calleeConstant() const;
+
+ // Get the callee given a machine call frame to which this InlineCallFrame belongs.
+ JSFunction* calleeForCallFrame(ExecState*) const;
+
+ CString inferredName() const;
+ CodeBlockHash hash() const;
+ CString hashAsStringIfPossible() const;
+
+ void setStackOffset(signed offset)
+ {
+ stackOffset = offset;
+ RELEASE_ASSERT(static_cast<signed>(stackOffset) == offset);
+ }
+
+ ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
+ ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
+
+ bool isStrictMode() const { return baselineCodeBlock->isStrictMode(); }
+
+ void dumpBriefFunctionInformation(PrintStream&) const;
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+ MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
+
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+ RELEASE_ASSERT(inlineCallFrame);
+ return inlineCallFrame->baselineCodeBlock.get();
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+ if (codeOrigin.inlineCallFrame)
+ return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+ return baselineCodeBlock;
+}
+
+template <typename Function>
+inline void CodeOrigin::walkUpInlineStack(const Function& function)
+{
+ CodeOrigin codeOrigin = *this;
+ while (true) {
+ function(codeOrigin);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::InlineCallFrame::Kind);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
index be5edb34c..402cfd06d 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp
@@ -26,6 +26,9 @@
#include "config.h"
#include "InlineCallFrameSet.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
+
namespace JSC {
InlineCallFrameSet::InlineCallFrameSet() { }
diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
index 0a8b2e79c..6c6184173 100644
--- a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
+++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h
@@ -23,17 +23,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef InlineCallFrameSet_h
-#define InlineCallFrameSet_h
+#pragma once
#include "CodeOrigin.h"
#include <wtf/Bag.h>
-#include <wtf/Noncopyable.h>
+#include <wtf/RefCounted.h>
namespace JSC {
-class InlineCallFrameSet {
- WTF_MAKE_NONCOPYABLE(InlineCallFrameSet);
+class InlineCallFrameSet : public RefCounted<InlineCallFrameSet> {
public:
InlineCallFrameSet();
~InlineCallFrameSet();
@@ -45,12 +43,9 @@ public:
typedef Bag<InlineCallFrame>::iterator iterator;
iterator begin() { return m_frames.begin(); }
iterator end() { return m_frames.end(); }
-
+
private:
Bag<InlineCallFrame> m_frames;
};
} // namespace JSC
-
-#endif // InlineCallFrameSet_h
-
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 00bd8155b..a86739f47 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,15 +26,18 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Instruction_h
-#define Instruction_h
+#pragma once
+#include "BasicBlockLocation.h"
#include "MacroAssembler.h"
-#include "Opcode.h"
+#include "PutByIdFlags.h"
+#include "SymbolTable.h"
+#include "TypeLocation.h"
#include "PropertySlot.h"
#include "SpecialPointer.h"
#include "Structure.h"
#include "StructureChain.h"
+#include "ToThisStatus.h"
#include "VirtualRegister.h"
#include <wtf/VectorTraits.h>
@@ -43,10 +46,16 @@ namespace JSC {
class ArrayAllocationProfile;
class ArrayProfile;
class ObjectAllocationProfile;
-class VariableWatchpointSet;
+class WatchpointSet;
struct LLIntCallLinkInfo;
struct ValueProfile;
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+typedef void* Opcode;
+#else
+typedef OpcodeID Opcode;
+#endif
+
struct Instruction {
Instruction()
{
@@ -70,6 +79,18 @@ struct Instruction {
u.jsCell.clear();
u.operand = operand;
}
+ Instruction(unsigned unsignedValue)
+ {
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized in 64-bit.
+ u.jsCell.clear();
+ u.unsignedValue = unsignedValue;
+ }
+
+ Instruction(PutByIdFlags flags)
+ {
+ u.putByIdFlags = flags;
+ }
Instruction(VM& vm, JSCell* owner, Structure* structure)
{
@@ -94,30 +115,36 @@ struct Instruction {
Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; }
- Instruction(WriteBarrier<Unknown>* registerPointer) { u.registerPointer = registerPointer; }
+ Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; }
Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
- Instruction(StringImpl* uid) { u.uid = uid; }
+ Instruction(UniquedStringImpl* uid) { u.uid = uid; }
Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
union {
Opcode opcode;
int operand;
+ unsigned unsignedValue;
WriteBarrierBase<Structure> structure;
+ StructureID structureID;
+ WriteBarrierBase<SymbolTable> symbolTable;
WriteBarrierBase<StructureChain> structureChain;
WriteBarrierBase<JSCell> jsCell;
- WriteBarrier<Unknown>* registerPointer;
+ WriteBarrier<Unknown>* variablePointer;
Special::Pointer specialPointer;
PropertySlot::GetValueFunc getterFunc;
LLIntCallLinkInfo* callLinkInfo;
- StringImpl* uid;
+ UniquedStringImpl* uid;
ValueProfile* profile;
ArrayProfile* arrayProfile;
ArrayAllocationProfile* arrayAllocationProfile;
ObjectAllocationProfile* objectAllocationProfile;
- VariableWatchpointSet* watchpointSet;
- WriteBarrierBase<JSActivation> activation;
+ WatchpointSet* watchpointSet;
void* pointer;
bool* predicatePointer;
+ ToThisStatus toThisStatus;
+ TypeLocation* location;
+ BasicBlockLocation* basicBlockLocation;
+ PutByIdFlags putByIdFlags;
} u;
private:
@@ -132,5 +159,3 @@ namespace WTF {
template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { };
} // namespace WTF
-
-#endif // Instruction_h
diff --git a/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
new file mode 100644
index 000000000..a4865233b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/InternalFunctionAllocationProfile.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class InternalFunctionAllocationProfile {
+public:
+ Structure* structure() { return m_structure.get(); }
+ Structure* createAllocationStructureFromBase(VM&, JSGlobalObject*, JSCell* owner, JSObject* prototype, Structure* base);
+
+ void clear() { m_structure.clear(); }
+ void visitAggregate(SlotVisitor& visitor) { visitor.append(m_structure); }
+
+private:
+ WriteBarrier<Structure> m_structure;
+};
+
+inline Structure* InternalFunctionAllocationProfile::createAllocationStructureFromBase(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, Structure* baseStructure)
+{
+ ASSERT(!m_structure || m_structure.get()->classInfo() != baseStructure->classInfo());
+
+ Structure* structure;
+ if (prototype == baseStructure->storedPrototype())
+ structure = baseStructure;
+ else
+ structure = vm.prototypeMap.emptyStructureForPrototypeFromBaseStructure(globalObject, prototype, baseStructure);
+
+ // Ensure that if another thread sees the structure, it will see it properly created.
+ WTF::storeStoreFence();
+
+ m_structure.set(vm, owner, structure);
+ return m_structure.get();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp
new file mode 100644
index 000000000..92d6a5580
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "IntrinsicGetterAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "HeapInlines.h"
+
+namespace JSC {
+
+IntrinsicGetterAccessCase::IntrinsicGetterAccessCase(VM& vm, JSCell* owner, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, JSFunction* intrinsicFunction)
+ : Base(vm, owner, IntrinsicGetter, offset, structure, conditionSet)
+{
+ m_intrinsicFunction.set(vm, owner, intrinsicFunction);
+}
+
+std::unique_ptr<AccessCase> IntrinsicGetterAccessCase::create(VM& vm, JSCell* owner, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, JSFunction* intrinsicFunction)
+{
+ return std::unique_ptr<AccessCase>(new IntrinsicGetterAccessCase(vm, owner, offset, structure, conditionSet, intrinsicFunction));
+}
+
+IntrinsicGetterAccessCase::~IntrinsicGetterAccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> IntrinsicGetterAccessCase::clone() const
+{
+ std::unique_ptr<IntrinsicGetterAccessCase> result(new IntrinsicGetterAccessCase(*this));
+ result->resetState();
+ return WTFMove(result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h
new file mode 100644
index 000000000..1021c18e6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/IntrinsicGetterAccessCase.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class IntrinsicGetterAccessCase : public AccessCase {
+public:
+ typedef AccessCase Base;
+ friend class AccessCase;
+
+ JSFunction* intrinsicFunction() const { return m_intrinsicFunction.get(); }
+ Intrinsic intrinsic() const { return m_intrinsicFunction->intrinsic(); }
+
+ static bool canEmitIntrinsicGetter(JSFunction*, Structure*);
+ void emitIntrinsicGetter(AccessGenerationState&);
+
+ static std::unique_ptr<AccessCase> create(VM&, JSCell*, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, JSFunction* intrinsicFunction);
+
+ std::unique_ptr<AccessCase> clone() const override;
+
+ ~IntrinsicGetterAccessCase();
+
+private:
+ IntrinsicGetterAccessCase(VM&, JSCell*, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, JSFunction* intrinsicFunction);
+
+ WriteBarrier<JSFunction> m_intrinsicFunction;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp
index ef7098b65..e22ad03c9 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.cpp
+++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h
index 55d6855a5..333542517 100644
--- a/Source/JavaScriptCore/bytecode/JumpTable.h
+++ b/Source/JavaScriptCore/bytecode/JumpTable.h
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -27,8 +27,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JumpTable_h
-#define JumpTable_h
+#pragma once
#include "MacroAssembler.h"
#include <wtf/HashMap.h>
@@ -94,6 +93,12 @@ namespace JSC {
}
#if ENABLE(JIT)
+ void ensureCTITable()
+ {
+ ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size());
+ ctiOffsets.grow(branchOffsets.size());
+ }
+
inline CodeLocationLabel ctiForValue(int32_t value)
{
if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size())
@@ -112,5 +117,3 @@ namespace JSC {
};
} // namespace JSC
-
-#endif // JumpTable_h
diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
index bfb951018..c2cf4d1dc 100644
--- a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
+++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntCallLinkInfo_h
-#define LLIntCallLinkInfo_h
+#pragma once
#include "JSFunction.h"
#include "MacroAssemblerCodeRef.h"
@@ -45,7 +44,7 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> {
remove();
}
- bool isLinked() { return callee; }
+ bool isLinked() { return !!callee; }
void unlink()
{
@@ -61,6 +60,3 @@ struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> {
};
} // namespace JSC
-
-#endif // LLIntCallLinkInfo_h
-
diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
new file mode 100644
index 000000000..9a5ac0112
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
+
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction)
+ : m_key(key)
+ , m_getByIdInstruction(getByIdInstruction)
+{
+ RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint());
+ RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint());
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::install()
+{
+ RELEASE_ASSERT(m_key.isWatchable());
+
+ m_key.object()->structure()->addTransitionWatchpoint(this);
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+ if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+ install();
+ return;
+ }
+
+ StringPrintStream out;
+ out.print("ObjectToStringValue Adaptation of ", m_key, " failed: ", detail);
+
+ StringFireDetail stringDetail(out.toCString().data());
+
+ CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction);
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
new file mode 100644
index 000000000..8a73c6c79
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Instruction.h"
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint {
+public:
+ LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*);
+
+ void install();
+
+protected:
+ void fireInternal(const FireDetail&) override;
+
+private:
+ ObjectPropertyCondition m_key;
+ Instruction* m_getByIdInstruction;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
index a8ad779ac..0929d6fb4 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -26,14 +26,14 @@
#include "config.h"
#include "LazyOperandValueProfile.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { }
CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { }
-void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJITLocker& locker)
+void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJSLocker& locker)
{
if (!m_data)
return;
@@ -43,10 +43,10 @@ void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const Co
}
LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add(
- const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key)
+ const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key)
{
if (!m_data)
- m_data = adoptPtr(new LazyOperandValueProfile::List());
+ m_data = std::make_unique<LazyOperandValueProfile::List>();
else {
for (unsigned i = 0; i < m_data->size(); ++i) {
if (m_data->at(i).key() == key)
@@ -62,7 +62,7 @@ LazyOperandValueProfileParser::LazyOperandValueProfileParser() { }
LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { }
void LazyOperandValueProfileParser::initialize(
- const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder)
+ const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder)
{
ASSERT(m_map.isEmpty());
@@ -87,7 +87,7 @@ LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent(
}
SpeculatedType LazyOperandValueProfileParser::prediction(
- const ConcurrentJITLocker& locker, const LazyOperandValueProfileKey& key) const
+ const ConcurrentJSLocker& locker, const LazyOperandValueProfileKey& key) const
{
LazyOperandValueProfile* profile = getIfPresent(key);
if (!profile)
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
index 95ef941cd..9c3b06842 100644
--- a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
@@ -23,15 +23,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LazyOperandValueProfile_h
-#define LazyOperandValueProfile_h
+#pragma once
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
#include "ValueProfile.h"
#include "VirtualRegister.h"
#include <wtf/HashMap.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
#include <wtf/SegmentedVector.h>
namespace JSC {
@@ -154,14 +152,14 @@ public:
CompressedLazyOperandValueProfileHolder();
~CompressedLazyOperandValueProfileHolder();
- void computeUpdatedPredictions(const ConcurrentJITLocker&);
+ void computeUpdatedPredictions(const ConcurrentJSLocker&);
LazyOperandValueProfile* add(
- const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key);
+ const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key);
private:
friend class LazyOperandValueProfileParser;
- OwnPtr<LazyOperandValueProfile::List> m_data;
+ std::unique_ptr<LazyOperandValueProfile::List> m_data;
};
class LazyOperandValueProfileParser {
@@ -171,19 +169,15 @@ public:
~LazyOperandValueProfileParser();
void initialize(
- const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder);
+ const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder);
LazyOperandValueProfile* getIfPresent(
const LazyOperandValueProfileKey& key) const;
SpeculatedType prediction(
- const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) const;
+ const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key) const;
private:
HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*> m_map;
};
} // namespace JSC
-
-#endif // LazyOperandValueProfile_h
-
-
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
index 1ac5bb5a0..f479e5f85 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +28,10 @@
#if ENABLE(DFG_JIT)
+#include "ArithProfile.h"
+#include "CCallHelpers.h"
#include "CodeBlock.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -43,28 +46,32 @@ MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
return result;
}
-EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index) const
+void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const
{
switch (m_kind) {
case None:
- return 0;
+ return;
case Ready:
- return u.profile->specFailBucket(index);
+ jit.storeValue(regs, u.profile->specFailBucket(0));
+ return;
case LazyOperand: {
LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
- ConcurrentJITLocker locker(u.lazyOperand.codeBlock->m_lock);
+ ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
LazyOperandValueProfile* profile =
u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
- return profile->specFailBucket(index);
+ jit.storeValue(regs, profile->specFailBucket(0));
+ return;
}
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
- }
+ case ArithProfileReady: {
+ u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
+ return;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
index c6fe6c5f0..98e39db1d 100644
--- a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MethodOfGettingAValueProfile_h
-#define MethodOfGettingAValueProfile_h
-
-#include <wtf/Platform.h>
+#pragma once
// This is guarded by ENABLE_DFG_JIT only because it uses some value profiles
// that are currently only used if the DFG is enabled (i.e. they are not
@@ -34,12 +31,15 @@
// these #if's will disappear...
#if ENABLE(DFG_JIT)
+#include "GPRInfo.h"
#include "JSCJSValue.h"
namespace JSC {
+class CCallHelpers;
class CodeBlock;
class LazyOperandValueProfileKey;
+struct ArithProfile;
struct ValueProfile;
class MethodOfGettingAValueProfile {
@@ -49,7 +49,7 @@ public:
{
}
- explicit MethodOfGettingAValueProfile(ValueProfile* profile)
+ MethodOfGettingAValueProfile(ValueProfile* profile)
{
if (profile) {
m_kind = Ready;
@@ -58,31 +58,34 @@ public:
m_kind = None;
}
+ MethodOfGettingAValueProfile(ArithProfile* profile)
+ {
+ if (profile) {
+ m_kind = ArithProfileReady;
+ u.arithProfile = profile;
+ } else
+ m_kind = None;
+ }
+
static MethodOfGettingAValueProfile fromLazyOperand(
CodeBlock*, const LazyOperandValueProfileKey&);
- bool operator!() const { return m_kind == None; }
-
- // This logically has a pointer to a "There exists X such that
- // ValueProfileBase<X>". But since C++ does not have existential
- // templates, I cannot return it. So instead, for any methods that
- // users of this class would like to call, we'll just have to provide
- // a method here that does it through an indirection. Or we could
- // possibly just make ValueProfile less template-based. But last I
- // tried that, it felt more yucky than this class.
+ explicit operator bool() const { return m_kind != None; }
- EncodedJSValue* getSpecFailBucket(unsigned index) const;
+ void emitReportValue(CCallHelpers&, JSValueRegs) const;
private:
enum Kind {
None,
Ready,
+ ArithProfileReady,
LazyOperand
};
Kind m_kind;
union {
ValueProfile* profile;
+ ArithProfile* arithProfile;
struct {
CodeBlock* codeBlock;
unsigned bytecodeOffset;
@@ -94,6 +97,3 @@ private:
} // namespace JSC
#endif // ENABLE(DFG_JIT)
-
-#endif // MethodOfGettingAValueProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp
new file mode 100644
index 000000000..3c168c6c9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ModuleNamespaceAccessCase.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "HeapInlines.h"
+#include "JSModuleEnvironment.h"
+#include "JSModuleNamespaceObject.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+ModuleNamespaceAccessCase::ModuleNamespaceAccessCase(VM& vm, JSCell* owner, JSModuleNamespaceObject* moduleNamespaceObject, JSModuleEnvironment* moduleEnvironment, ScopeOffset scopeOffset)
+ : Base(vm, owner, ModuleNamespaceLoad, invalidOffset, nullptr, ObjectPropertyConditionSet())
+ , m_scopeOffset(scopeOffset)
+{
+ m_moduleNamespaceObject.set(vm, owner, moduleNamespaceObject);
+ m_moduleEnvironment.set(vm, owner, moduleEnvironment);
+}
+
+std::unique_ptr<AccessCase> ModuleNamespaceAccessCase::create(VM& vm, JSCell* owner, JSModuleNamespaceObject* moduleNamespaceObject, JSModuleEnvironment* moduleEnvironment, ScopeOffset scopeOffset)
+{
+ return std::unique_ptr<AccessCase>(new ModuleNamespaceAccessCase(vm, owner, moduleNamespaceObject, moduleEnvironment, scopeOffset));
+}
+
+ModuleNamespaceAccessCase::~ModuleNamespaceAccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> ModuleNamespaceAccessCase::clone() const
+{
+ std::unique_ptr<ModuleNamespaceAccessCase> result(new ModuleNamespaceAccessCase(*this));
+ result->resetState();
+ return WTFMove(result);
+}
+
+void ModuleNamespaceAccessCase::emit(AccessGenerationState& state, MacroAssembler::JumpList& fallThrough)
+{
+ CCallHelpers& jit = *state.jit;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+
+ fallThrough.append(
+ jit.branchPtr(
+ CCallHelpers::NotEqual,
+ baseGPR,
+ CCallHelpers::TrustedImmPtr(m_moduleNamespaceObject.get())));
+
+ jit.loadValue(&m_moduleEnvironment->variableAt(m_scopeOffset), valueRegs);
+ state.failAndIgnore.append(jit.branchIfEmpty(valueRegs));
+ state.succeed();
+}
+
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h
new file mode 100644
index 000000000..333075f2e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleNamespaceAccessCase.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 Yusuke Suzuki <utatane.tea@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class JSModuleEnvironment;
+class JSModuleNamespaceObject;
+
+class ModuleNamespaceAccessCase : public AccessCase {
+public:
+ using Base = AccessCase;
+ friend class AccessCase;
+
+ JSModuleNamespaceObject* moduleNamespaceObject() const { return m_moduleNamespaceObject.get(); }
+ JSModuleEnvironment* moduleEnvironment() const { return m_moduleEnvironment.get(); }
+ ScopeOffset scopeOffset() const { return m_scopeOffset; }
+
+ static std::unique_ptr<AccessCase> create(VM&, JSCell* owner, JSModuleNamespaceObject*, JSModuleEnvironment*, ScopeOffset);
+
+ std::unique_ptr<AccessCase> clone() const override;
+
+ void emit(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+ ~ModuleNamespaceAccessCase();
+
+private:
+ ModuleNamespaceAccessCase(VM&, JSCell* owner, JSModuleNamespaceObject*, JSModuleEnvironment*, ScopeOffset);
+
+ WriteBarrier<JSModuleNamespaceObject> m_moduleNamespaceObject;
+ WriteBarrier<JSModuleEnvironment> m_moduleEnvironment;
+ ScopeOffset m_scopeOffset;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp
new file mode 100644
index 000000000..3d54c3ac8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ModuleProgramCodeBlock::s_info = {
+ "ModuleProgramCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(ModuleProgramCodeBlock)
+};
+
+void ModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<ModuleProgramCodeBlock*>(cell)->~ModuleProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h
new file mode 100644
index 000000000..62674ea92
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ModuleProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+class ModuleProgramCodeBlock : public GlobalCodeBlock {
+public:
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static ModuleProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+ {
+ ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap))
+ ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static ModuleProgramCodeBlock* create(VM* vm, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned firstLineColumnOffset)
+ {
+ ModuleProgramCodeBlock* instance = new (NotNull, allocateCell<ModuleProgramCodeBlock>(vm->heap))
+ ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ ModuleProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ ModuleProgramCodeBlock(VM* vm, Structure* structure, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned firstLineColumnOffset)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, firstLineColumnOffset)
+ {
+ }
+
+ static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
index 9a9db0bc7..301a3580c 100644
--- a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
+++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ObjectAllocationProfile_h
-#define ObjectAllocationProfile_h
+#pragma once
#include "VM.h"
#include "JSGlobalObject.h"
@@ -39,18 +38,21 @@ class ObjectAllocationProfile {
public:
static ptrdiff_t offsetOfAllocator() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_allocator); }
static ptrdiff_t offsetOfStructure() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_structure); }
+ static ptrdiff_t offsetOfInlineCapacity() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_inlineCapacity); }
ObjectAllocationProfile()
: m_allocator(0)
+ , m_inlineCapacity(0)
{
}
- bool isNull() { return !m_allocator; }
+ bool isNull() { return !m_structure; }
- void initialize(VM& vm, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
+ void initialize(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
{
ASSERT(!m_allocator);
ASSERT(!m_structure);
+ ASSERT(!m_inlineCapacity);
unsigned inlineCapacity = 0;
if (inferredInlineCapacity < JSFinalObject::defaultInlineCapacity()) {
@@ -80,33 +82,46 @@ public:
ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
- MarkedAllocator* allocator = &vm.heap.allocatorForObjectWithoutDestructor(allocationSize);
- ASSERT(allocator->cellSize());
-
+ MarkedAllocator* allocator = vm.cellSpace.allocatorFor(allocationSize);
+
// Take advantage of extra inline capacity available in the size class.
- size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier<Unknown>);
- inlineCapacity += slop;
- if (inlineCapacity > JSFinalObject::maxInlineCapacity())
- inlineCapacity = JSFinalObject::maxInlineCapacity();
+ if (allocator) {
+ size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier<Unknown>);
+ inlineCapacity += slop;
+ if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+ inlineCapacity = JSFinalObject::maxInlineCapacity();
+ }
+
+ Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(globalObject, prototype, inlineCapacity);
+
+ // Ensure that if another thread sees the structure, it will see it properly created
+ WTF::storeStoreFence();
m_allocator = allocator;
- m_structure.set(vm, owner,
- vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity));
+ m_structure.set(vm, owner, structure);
+ m_inlineCapacity = inlineCapacity;
}
- Structure* structure() { return m_structure.get(); }
- unsigned inlineCapacity() { return m_structure->inlineCapacity(); }
+ Structure* structure()
+ {
+ Structure* structure = m_structure.get();
+ // Ensure that if we see the structure, it has been properly created
+ WTF::loadLoadFence();
+ return structure;
+ }
+ unsigned inlineCapacity() { return m_inlineCapacity; }
void clear()
{
m_allocator = 0;
m_structure.clear();
+ m_inlineCapacity = 0;
ASSERT(isNull());
}
void visitAggregate(SlotVisitor& visitor)
{
- visitor.append(&m_structure);
+ visitor.append(m_structure);
}
private:
@@ -117,14 +132,14 @@ private:
return 0;
size_t count = 0;
- PropertyNameArray propertyNameArray(&vm);
- prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, ExcludeDontEnumProperties);
+ PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols);
+ prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode());
PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector();
for (size_t i = 0; i < propertyNameVector.size(); ++i) {
JSValue value = prototype->getDirect(vm, propertyNameVector[i]);
// Functions are common, and are usually class-level objects that are not overridden.
- if (jsDynamicCast<JSFunction*>(value))
+ if (jsDynamicCast<JSFunction*>(vm, value))
continue;
++count;
@@ -135,8 +150,7 @@ private:
MarkedAllocator* m_allocator; // Precomputed to make things easier for generated code.
WriteBarrier<Structure> m_structure;
+ unsigned m_inlineCapacity;
};
} // namespace JSC
-
-#endif // ObjectAllocationProfile_h
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
new file mode 100644
index 000000000..3aad09409
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ObjectPropertyCondition.h"
+
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!*this) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">");
+}
+
+void ObjectPropertyCondition::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint(
+ Structure* structure) const
+{
+ return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const
+{
+ if (!*this)
+ return false;
+
+ return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+ return m_condition.validityRequiresImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const
+{
+ if (!*this)
+ return false;
+
+ return validityRequiresImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(Structure* structure) const
+{
+ return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint() const
+{
+ if (!*this)
+ return false;
+
+ return isStillValidAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+
+bool ObjectPropertyCondition::isStillValid(Structure* structure) const
+{
+ return m_condition.isStillValid(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValid() const
+{
+ if (!*this)
+ return false;
+
+ return isStillValid(m_object->structure());
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const
+{
+ return m_condition.isStillValid(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity() const
+{
+ if (!*this)
+ return false;
+
+ return structureEnsuresValidity(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+ return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ PropertyCondition::WatchabilityEffort effort) const
+{
+ if (!*this)
+ return false;
+
+ return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(
+ Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+ return m_condition.isWatchable(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const
+{
+ if (!*this)
+ return false;
+
+ return isWatchable(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isStillLive() const
+{
+ if (!*this)
+ return false;
+
+ if (!Heap::isMarked(m_object))
+ return false;
+
+ return m_condition.isStillLive();
+}
+
+void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+ if (!*this)
+ return;
+
+ tracked.check(m_object);
+ m_condition.validateReferences(tracked);
+}
+
+ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier(VM& vm) const
+{
+ PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(vm, object());
+ if (!result)
+ return ObjectPropertyCondition();
+ return ObjectPropertyCondition(object(), result);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
new file mode 100644
index 000000000..377e07ba2
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include "PropertyCondition.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+class TrackedReferences;
+
+class ObjectPropertyCondition {
+public:
+ ObjectPropertyCondition()
+ : m_object(nullptr)
+ {
+ }
+
+ ObjectPropertyCondition(WTF::HashTableDeletedValueType token)
+ : m_object(nullptr)
+ , m_condition(token)
+ {
+ }
+
+ ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition)
+ : m_object(object)
+ , m_condition(condition)
+ {
+ }
+
+ static ObjectPropertyCondition presenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes);
+ return result;
+ }
+
+ static ObjectPropertyCondition presence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset,
+ unsigned attributes)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return presenceWithoutBarrier(object, uid, offset, attributes);
+ }
+
+ // NOTE: The prototype is the storedPrototype, not the prototypeForLookup.
+ static ObjectPropertyCondition absenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype);
+ return result;
+ }
+
+ static ObjectPropertyCondition absence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceWithoutBarrier(object, uid, prototype);
+ }
+
+ static ObjectPropertyCondition absenceOfSetterWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype);
+ return result;
+ }
+
+ static ObjectPropertyCondition absenceOfSetter(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceOfSetterWithoutBarrier(object, uid, prototype);
+ }
+
+ static ObjectPropertyCondition equivalenceWithoutBarrier(
+ JSObject* object, UniquedStringImpl* uid, JSValue value)
+ {
+ ObjectPropertyCondition result;
+ result.m_object = object;
+ result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value);
+ return result;
+ }
+
+ static ObjectPropertyCondition equivalence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return equivalenceWithoutBarrier(object, uid, value);
+ }
+
+ explicit operator bool() const { return !!m_condition; }
+
+ JSObject* object() const { return m_object; }
+ PropertyCondition condition() const { return m_condition; }
+
+ PropertyCondition::Kind kind() const { return condition().kind(); }
+ UniquedStringImpl* uid() const { return condition().uid(); }
+ bool hasOffset() const { return condition().hasOffset(); }
+ PropertyOffset offset() const { return condition().offset(); }
+ unsigned hasAttributes() const { return condition().hasAttributes(); }
+ unsigned attributes() const { return condition().attributes(); }
+ bool hasPrototype() const { return condition().hasPrototype(); }
+ JSObject* prototype() const { return condition().prototype(); }
+ bool hasRequiredValue() const { return condition().hasRequiredValue(); }
+ JSValue requiredValue() const { return condition().requiredValue(); }
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ unsigned hash() const
+ {
+ return WTF::PtrHash<JSObject*>::hash(m_object) ^ m_condition.hash();
+ }
+
+ bool operator==(const ObjectPropertyCondition& other) const
+ {
+ return m_object == other.m_object
+ && m_condition == other.m_condition;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_object && m_condition.isHashTableDeletedValue();
+ }
+
+ // Two conditions are compatible if they are identical or if they speak of different uids or
+ // different objects. If false is returned, you have to decide how to resolve the conflict -
+ // for example if there is a Presence and an Equivalence then in some cases you'll want the
+ // more general of the two while in other cases you'll want the more specific of the two. This
+ // will also return false for contradictions, like Presence and Absence on the same
+ // object/uid. By convention, invalid conditions aren't compatible with anything.
+ bool isCompatibleWith(const ObjectPropertyCondition& other) const
+ {
+ if (!*this || !other)
+ return false;
+ return *this == other || uid() != other.uid() || object() != other.object();
+ }
+
+ // These validity-checking methods can optionally take a Struture* instead of loading the
+ // Structure* from the object. If you're in the concurrent JIT, then you must use the forms
+ // that take an explicit Structure* because you want the compiler to optimize for the same
+ // structure that you validated (i.e. avoid a TOCTOU race).
+
+ // Checks if the object's structure claims that the property won't be intercepted. Validity
+ // does not require watchpoints on the object.
+ bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const;
+ bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const;
+
+ // Returns true if we need an impure property watchpoint to ensure validity even if
+ // isStillValidAccordingToStructure() returned true.
+ bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+ bool validityRequiresImpurePropertyWatchpoint() const;
+
+ // Checks if the condition still holds setting aside the need for an impure property watchpoint.
+ // Validity might still require watchpoints on the object.
+ bool isStillValidAssumingImpurePropertyWatchpoint(Structure*) const;
+ bool isStillValidAssumingImpurePropertyWatchpoint() const;
+
+ // Checks if the condition still holds. May conservatively return false, if the object and
+ // structure alone don't guarantee the condition. Note that this may return true if the
+ // condition still requires some watchpoints on the object in addition to checking the
+ // structure. If you want to check if the condition holds by using the structure alone,
+ // use structureEnsuresValidity().
+ bool isStillValid(Structure*) const;
+ bool isStillValid() const;
+
+ // Shorthand for condition().isStillValid(structure).
+ bool structureEnsuresValidity(Structure*) const;
+ bool structureEnsuresValidity() const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure and possibly an impure property watchpoint.
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ Structure*,
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure.
+ bool isWatchable(
+ Structure*,
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+ bool isWatchable(
+ PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+ bool watchingRequiresStructureTransitionWatchpoint() const
+ {
+ return condition().watchingRequiresStructureTransitionWatchpoint();
+ }
+ bool watchingRequiresReplacementWatchpoint() const
+ {
+ return condition().watchingRequiresReplacementWatchpoint();
+ }
+
+ // This means that the objects involved in this are still live.
+ bool isStillLive() const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+ bool isValidValueForPresence(VM& vm, JSValue value) const
+ {
+ return condition().isValidValueForPresence(vm, value);
+ }
+
+ ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier(VM&) const;
+
+private:
+ JSObject* m_object;
+ PropertyCondition m_condition;
+};
+
+struct ObjectPropertyConditionHash {
+ static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); }
+ static bool equal(
+ const ObjectPropertyCondition& a, const ObjectPropertyCondition& b)
+ {
+ return a == b;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::ObjectPropertyCondition> {
+ typedef JSC::ObjectPropertyConditionHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::ObjectPropertyCondition> : SimpleClassHashTraits<JSC::ObjectPropertyCondition> { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
new file mode 100644
index 000000000..e2e4a8fbb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ObjectPropertyConditionSet.h"
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.object() == object)
+ return condition;
+ }
+ return ObjectPropertyCondition();
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind(
+ PropertyCondition::Kind kind) const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == kind)
+ return condition;
+ }
+ return ObjectPropertyCondition();
+}
+
+unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const
+{
+ unsigned result = 0;
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == kind)
+ result++;
+ }
+ return result;
+}
+
+bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const
+{
+ return (numberOfConditionsWithKind(PropertyCondition::Presence) == 1) != (numberOfConditionsWithKind(PropertyCondition::Equivalence) == 1);
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const
+{
+ ObjectPropertyCondition result;
+ unsigned numFound = 0;
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.kind() == PropertyCondition::Presence
+ || condition.kind() == PropertyCondition::Equivalence) {
+ result = condition;
+ numFound++;
+ }
+ }
+ RELEASE_ASSERT(numFound == 1);
+ return result;
+}
+
+ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith(
+ const ObjectPropertyConditionSet& other) const
+{
+ if (!isValid() || !other.isValid())
+ return invalid();
+
+ Vector<ObjectPropertyCondition> result;
+
+ if (!isEmpty())
+ result.appendVector(m_data->vector);
+
+ for (const ObjectPropertyCondition& newCondition : other) {
+ bool foundMatch = false;
+ for (const ObjectPropertyCondition& existingCondition : *this) {
+ if (newCondition == existingCondition) {
+ foundMatch = true;
+ continue;
+ }
+ if (!newCondition.isCompatibleWith(existingCondition))
+ return invalid();
+ }
+ if (!foundMatch)
+ result.append(newCondition);
+ }
+
+ return create(result);
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidity() const
+{
+ if (!isValid())
+ return false;
+
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.structureEnsuresValidity())
+ return false;
+ }
+ return true;
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const
+{
+ if (!isValid())
+ return false;
+
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint())
+ return false;
+ }
+ return true;
+}
+
+bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (condition.validityRequiresImpurePropertyWatchpoint())
+ return true;
+ }
+ return false;
+}
+
+bool ObjectPropertyConditionSet::areStillLive() const
+{
+ for (const ObjectPropertyCondition& condition : *this) {
+ if (!condition.isStillLive())
+ return false;
+ }
+ return true;
+}
+
+void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!isValid()) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print("[");
+ if (m_data)
+ out.print(listDumpInContext(m_data->vector, context));
+ out.print("]");
+}
+
+void ObjectPropertyConditionSet::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyConditionSet::isValidAndWatchable() const
+{
+ if (!isValid())
+ return false;
+
+ for (ObjectPropertyCondition condition : m_data->vector) {
+ if (!condition.isWatchable())
+ return false;
+ }
+ return true;
+}
+
+namespace {
+
+bool verbose = false;
+
+ObjectPropertyCondition generateCondition(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind)
+{
+ Structure* structure = object->structure();
+ if (verbose)
+ dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n");
+
+ ObjectPropertyCondition result;
+ switch (conditionKind) {
+ case PropertyCondition::Presence: {
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (offset == invalidOffset)
+ return ObjectPropertyCondition();
+ result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes);
+ break;
+ }
+ case PropertyCondition::Absence: {
+ result = ObjectPropertyCondition::absence(
+ vm, owner, object, uid, object->structure()->storedPrototypeObject());
+ break;
+ }
+ case PropertyCondition::AbsenceOfSetter: {
+ result = ObjectPropertyCondition::absenceOfSetter(
+ vm, owner, object, uid, object->structure()->storedPrototypeObject());
+ break;
+ }
+ case PropertyCondition::Equivalence: {
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (offset == invalidOffset)
+ return ObjectPropertyCondition();
+ JSValue value = object->getDirect(offset);
+ result = ObjectPropertyCondition::equivalence(vm, owner, object, uid, value);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return ObjectPropertyCondition();
+ }
+
+ if (!result.isStillValidAssumingImpurePropertyWatchpoint()) {
+ if (verbose)
+ dataLog("Failed to create condition: ", result, "\n");
+ return ObjectPropertyCondition();
+ }
+
+ if (verbose)
+ dataLog("New condition: ", result, "\n");
+ return result;
+}
+
+enum Concurrency {
+ MainThread,
+ Concurrent
+};
+template<typename Functor>
+ObjectPropertyConditionSet generateConditions(
+ VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor,
+ Concurrency concurrency = MainThread)
+{
+ Vector<ObjectPropertyCondition> conditions;
+
+ for (;;) {
+ if (verbose)
+ dataLog("Considering structure: ", pointerDump(structure), "\n");
+
+ if (structure->isProxy()) {
+ if (verbose)
+ dataLog("It's a proxy, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ JSValue value = structure->prototypeForLookup(globalObject);
+
+ if (value.isNull()) {
+ if (!prototype) {
+ if (verbose)
+ dataLog("Reached end of prototype chain as expected, done.\n");
+ break;
+ }
+ if (verbose)
+ dataLog("Unexpectedly reached end of prototype chain, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ JSObject* object = jsCast<JSObject*>(value);
+ structure = object->structure(vm);
+
+ if (structure->isDictionary()) {
+ if (concurrency == MainThread) {
+ if (structure->hasBeenFlattenedBefore()) {
+ if (verbose)
+ dataLog("Dictionary has been flattened before, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ if (verbose)
+ dataLog("Flattening ", pointerDump(structure));
+ structure->flattenDictionaryStructure(vm, object);
+ } else {
+ if (verbose)
+ dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+ }
+
+ if (!functor(conditions, object)) {
+ if (verbose)
+ dataLog("Functor failed, invalid.\n");
+ return ObjectPropertyConditionSet::invalid();
+ }
+
+ if (object == prototype) {
+ if (verbose)
+ dataLog("Reached desired prototype, done.\n");
+ break;
+ }
+ }
+
+ if (verbose)
+ dataLog("Returning conditions: ", listDump(conditions), "\n");
+ return ObjectPropertyConditionSet::create(conditions);
+}
+
+} // anonymous namespace
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, prototype,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ PropertyCondition::Kind kind =
+ object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence;
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, kind);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+ VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, exec->lexicalGlobalObject(), headStructure, prototype,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ if (object == prototype)
+ return true;
+ ObjectPropertyCondition result =
+ generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+ VM& vm, JSGlobalObject* globalObject, Structure* headStructure, JSObject* prototype, UniquedStringImpl* uid)
+{
+ return generateConditions(vm, globalObject, headStructure, prototype,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ PropertyCondition::Kind kind =
+ object == prototype ? PropertyCondition::Equivalence : PropertyCondition::Absence;
+ ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, kind);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+ VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, globalObject, headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, PropertyCondition::Absence);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+ VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+ return generateConditions(
+ vm, globalObject, headStructure, nullptr,
+ [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool {
+ ObjectPropertyCondition result =
+ generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter);
+ if (!result)
+ return false;
+ conditions.append(result);
+ return true;
+ }, Concurrent);
+}
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+ VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid)
+{
+ return generateCondition(vm, owner, object, uid, PropertyCondition::Equivalence);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
new file mode 100644
index 000000000..2b15965f6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+// An object property condition set is used to represent the set of additional conditions
+// that need to be met for some heap access to be valid. The set can have the following
+// interesting states:
+//
+// Empty: There are no special conditions that need to be met.
+// Invalid: The heap access is never valid.
+// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid.
+
+class ObjectPropertyConditionSet {
+public:
+ ObjectPropertyConditionSet() { }
+
+ static ObjectPropertyConditionSet invalid()
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(new Data());
+ return result;
+ }
+
+ static ObjectPropertyConditionSet create(const Vector<ObjectPropertyCondition>& vector)
+ {
+ if (vector.isEmpty())
+ return ObjectPropertyConditionSet();
+
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(new Data());
+ result.m_data->vector = vector;
+ return result;
+ }
+
+ bool isValid() const
+ {
+ return !m_data || !m_data->vector.isEmpty();
+ }
+
+ bool isValidAndWatchable() const;
+
+ bool isEmpty() const
+ {
+ return !m_data;
+ }
+
+ typedef const ObjectPropertyCondition* iterator;
+
+ iterator begin() const
+ {
+ if (!m_data)
+ return nullptr;
+ return m_data->vector.begin();
+ }
+ iterator end() const
+ {
+ if (!m_data)
+ return nullptr;
+ return m_data->vector.end();
+ }
+
+ ObjectPropertyCondition forObject(JSObject*) const;
+ ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const;
+
+ unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const;
+
+ bool hasOneSlotBaseCondition() const;
+
+ // If this is a condition set for a prototype hit, then this is guaranteed to return the
+ // condition on the prototype itself. This allows you to get the object, offset, and
+ // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence
+ // in the set, and it will return that presence.
+ ObjectPropertyCondition slotBaseCondition() const;
+
+ // Attempt to create a new condition set by merging this one with the other one. This will
+ // fail if any of the conditions are incompatible with each other. When if fails, it returns
+ // invalid().
+ ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const;
+
+ bool structuresEnsureValidity() const;
+ bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const;
+
+ bool needImpurePropertyWatchpoint() const;
+ bool areStillLive() const;
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ // Helpers for using this in a union.
+ void* releaseRawPointer()
+ {
+ return static_cast<void*>(m_data.leakRef());
+ }
+ static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer)
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = adoptRef(static_cast<Data*>(rawPointer));
+ return result;
+ }
+ static ObjectPropertyConditionSet fromRawPointer(void* rawPointer)
+ {
+ ObjectPropertyConditionSet result;
+ result.m_data = static_cast<Data*>(rawPointer);
+ return result;
+ }
+
+ // FIXME: Everything below here should be private, but cannot be because of a bug in VS.
+
+ // Internally, this represents Invalid using a pointer to a Data that has an empty vector.
+
+ // FIXME: This could be made more compact by having it internally use a vector that just has
+ // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all
+ // of the APIs supply the uid.
+
+ class Data : public ThreadSafeRefCounted<Data> {
+ WTF_MAKE_NONCOPYABLE(Data);
+ WTF_MAKE_FAST_ALLOCATED;
+
+ public:
+ Data() { }
+
+ Vector<ObjectPropertyCondition> vector;
+ };
+
+private:
+ RefPtr<Data> m_data;
+};
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+ VM&, JSCell* owner, JSObject* object, UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+ VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+ VM&, JSGlobalObject*, Structure* headStructure, JSObject* prototype,
+ UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+ VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+ VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp
index 26f53511a..0d16dfc2f 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.cpp
+++ b/Source/JavaScriptCore/bytecode/Opcode.cpp
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,8 @@
#include "config.h"
#include "Opcode.h"
+#include <wtf/PrintStream.h>
+
#if ENABLE(OPCODE_STATS)
#include <array>
#include <wtf/DataLog.h>
@@ -185,3 +187,14 @@ void OpcodeStats::resetLastInstruction()
#endif
} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, OpcodeID opcode)
+{
+ out.print(opcodeNames[opcode]);
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index e8636e785..41c8509a2 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -27,9 +27,9 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Opcode_h
-#define Opcode_h
+#pragma once
+#include "Bytecodes.h"
#include "LLIntOpcode.h"
#include <algorithm>
@@ -40,158 +40,8 @@
namespace JSC {
#define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
- macro(op_enter, 1) \
- macro(op_create_activation, 2) \
- macro(op_touch_entry, 1) \
- macro(op_init_lazy_reg, 2) \
- macro(op_create_arguments, 2) \
- macro(op_create_this, 4) \
- macro(op_get_callee, 3) \
- macro(op_to_this, 3) \
- \
- macro(op_new_object, 4) \
- macro(op_new_array, 5) \
- macro(op_new_array_with_size, 4) \
- macro(op_new_array_buffer, 5) \
- macro(op_new_regexp, 3) \
- macro(op_mov, 3) \
- macro(op_captured_mov, 4) \
- \
- macro(op_not, 3) \
- macro(op_eq, 4) \
- macro(op_eq_null, 3) \
- macro(op_neq, 4) \
- macro(op_neq_null, 3) \
- macro(op_stricteq, 4) \
- macro(op_nstricteq, 4) \
- macro(op_less, 4) \
- macro(op_lesseq, 4) \
- macro(op_greater, 4) \
- macro(op_greatereq, 4) \
- \
- macro(op_inc, 2) \
- macro(op_dec, 2) \
- macro(op_to_number, 3) \
- macro(op_negate, 3) \
- macro(op_add, 5) \
- macro(op_mul, 5) \
- macro(op_div, 5) \
- macro(op_mod, 4) \
- macro(op_sub, 5) \
- \
- macro(op_lshift, 4) \
- macro(op_rshift, 4) \
- macro(op_urshift, 4) \
- macro(op_unsigned, 3) \
- macro(op_bitand, 5) \
- macro(op_bitxor, 5) \
- macro(op_bitor, 5) \
- \
- macro(op_check_has_instance, 5) \
- macro(op_instanceof, 4) \
- macro(op_typeof, 3) \
- macro(op_is_undefined, 3) \
- macro(op_is_boolean, 3) \
- macro(op_is_number, 3) \
- macro(op_is_string, 3) \
- macro(op_is_object, 3) \
- macro(op_is_function, 3) \
- macro(op_in, 4) \
- \
- macro(op_init_global_const_nop, 5) \
- macro(op_init_global_const, 5) \
- macro(op_get_by_id, 9) /* has value profiling */ \
- macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \
- macro(op_get_by_id_self, 9) /* has value profiling */ \
- macro(op_get_by_id_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_self, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_getter_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_self, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_proto, 9) /* has value profiling */ \
- macro(op_get_by_id_custom_chain, 9) /* has value profiling */ \
- macro(op_get_by_id_generic, 9) /* has value profiling */ \
- macro(op_get_array_length, 9) /* has value profiling */ \
- macro(op_get_string_length, 9) /* has value profiling */ \
- macro(op_get_arguments_length, 4) \
- macro(op_put_by_id, 9) \
- macro(op_put_by_id_out_of_line, 9) \
- macro(op_put_by_id_transition, 9) \
- macro(op_put_by_id_transition_direct, 9) \
- macro(op_put_by_id_transition_direct_out_of_line, 9) \
- macro(op_put_by_id_transition_normal, 9) \
- macro(op_put_by_id_transition_normal_out_of_line, 9) \
- macro(op_put_by_id_replace, 9) \
- macro(op_put_by_id_generic, 9) \
- macro(op_del_by_id, 4) \
- macro(op_get_by_val, 6) /* has value profiling */ \
- macro(op_get_argument_by_val, 6) /* must be the same size as op_get_by_val */ \
- macro(op_get_by_pname, 7) \
- macro(op_put_by_val, 5) \
- macro(op_put_by_val_direct, 5) \
- macro(op_del_by_val, 4) \
- macro(op_put_by_index, 4) \
- macro(op_put_getter_setter, 5) \
- \
- macro(op_jmp, 2) \
- macro(op_jtrue, 3) \
- macro(op_jfalse, 3) \
- macro(op_jeq_null, 3) \
- macro(op_jneq_null, 3) \
- macro(op_jneq_ptr, 4) \
- macro(op_jless, 4) \
- macro(op_jlesseq, 4) \
- macro(op_jgreater, 4) \
- macro(op_jgreatereq, 4) \
- macro(op_jnless, 4) \
- macro(op_jnlesseq, 4) \
- macro(op_jngreater, 4) \
- macro(op_jngreatereq, 4) \
- \
- macro(op_loop_hint, 1) \
- \
- macro(op_switch_imm, 4) \
- macro(op_switch_char, 4) \
- macro(op_switch_string, 4) \
- \
- macro(op_new_func, 4) \
- macro(op_new_captured_func, 4) \
- macro(op_new_func_exp, 3) \
- macro(op_call, 8) /* has value profiling */ \
- macro(op_call_eval, 8) /* has value profiling */ \
- macro(op_call_varargs, 8) /* has value profiling */ \
- macro(op_tear_off_activation, 2) \
- macro(op_tear_off_arguments, 3) \
- macro(op_ret, 2) \
- macro(op_ret_object_or_this, 3) \
- \
- macro(op_construct, 8) \
- macro(op_strcat, 4) \
- macro(op_to_primitive, 3) \
- \
- macro(op_get_pnames, 6) \
- macro(op_next_pname, 7) \
- \
- macro(op_resolve_scope, 6) \
- macro(op_get_from_scope, 8) /* has value profiling */ \
- macro(op_put_to_scope, 7) \
- \
- macro(op_push_with_scope, 2) \
- macro(op_pop_scope, 1) \
- macro(op_push_name_scope, 4) \
- \
- macro(op_catch, 2) \
- macro(op_throw, 2) \
- macro(op_throw_static_error, 3) \
- \
- macro(op_debug, 3) \
- macro(op_profile_will_call, 2) \
- macro(op_profile_did_call, 2) \
- \
- extension__ \
- \
- macro(op_end, 2) // end must be the last opcode in the list
+ FOR_EACH_BYTECODE_ID(macro) \
+ extension__
#define FOR_EACH_CORE_OPCODE_ID(macro) \
FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
@@ -204,11 +54,15 @@ namespace JSC {
#define OPCODE_ID_ENUM(opcode, length) opcode,
- typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
+ enum OpcodeID : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) };
#undef OPCODE_ID_ENUM
const int maxOpcodeLength = 9;
-const int numOpcodeIDs = op_end + 1;
+#if !ENABLE(JIT)
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#else
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#endif
#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
@@ -220,10 +74,19 @@ const int numOpcodeIDs = op_end + 1;
const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
#undef OPCODE_ID_LENGTH_MAP
-#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+#if COMPILER(GCC)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#endif
+
+#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
#undef VERIFY_OPCODE_ID
+#if COMPILER(GCC)
+#pragma GCC diagnostic pop
+#endif
+
#if ENABLE(COMPUTED_GOTO_OPCODES)
typedef void* Opcode;
#else
@@ -271,6 +134,70 @@ inline size_t opcodeLength(OpcodeID opcode)
return 0;
}
+inline bool isBranch(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_jmp:
+ case op_jtrue:
+ case op_jfalse:
+ case op_jeq_null:
+ case op_jneq_null:
+ case op_jneq_ptr:
+ case op_jless:
+ case op_jlesseq:
+ case op_jgreater:
+ case op_jgreatereq:
+ case op_jnless:
+ case op_jnlesseq:
+ case op_jngreater:
+ case op_jngreatereq:
+ case op_switch_imm:
+ case op_switch_char:
+ case op_switch_string:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isUnconditionalBranch(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_jmp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isTerminal(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_ret:
+ case op_end:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool isThrow(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_throw:
+ case op_throw_static_error:
+ return true;
+ default:
+ return false;
+ }
+}
+
} // namespace JSC
-#endif // Opcode_h
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::OpcodeID);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h
index f21e05f5f..102879814 100644
--- a/Source/JavaScriptCore/bytecode/Operands.h
+++ b/Source/JavaScriptCore/bytecode/Operands.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013, 2015, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Operands_h
-#define Operands_h
+#pragma once
#include "CallFrame.h"
#include "JSObject.h"
@@ -37,32 +36,37 @@ namespace JSC {
template<typename T> struct OperandValueTraits;
-template<typename T>
-struct OperandValueTraits {
- static T defaultValue() { return T(); }
- static bool isEmptyForDump(const T& value) { return !value; }
-};
-
enum OperandKind { ArgumentOperand, LocalOperand };
enum OperandsLikeTag { OperandsLike };
-template<typename T, typename Traits = OperandValueTraits<T>>
+template<typename T>
class Operands {
public:
Operands() { }
explicit Operands(size_t numArguments, size_t numLocals)
{
- m_arguments.fill(Traits::defaultValue(), numArguments);
- m_locals.fill(Traits::defaultValue(), numLocals);
+ if (WTF::VectorTraits<T>::needsInitialization) {
+ m_arguments.resize(numArguments);
+ m_locals.resize(numLocals);
+ } else {
+ m_arguments.fill(T(), numArguments);
+ m_locals.fill(T(), numLocals);
+ }
+ }
+
+ explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue)
+ {
+ m_arguments.fill(initialValue, numArguments);
+ m_locals.fill(initialValue, numLocals);
}
- template<typename U, typename OtherTraits>
- explicit Operands(OperandsLikeTag, const Operands<U, OtherTraits>& other)
+ template<typename U>
+ explicit Operands(OperandsLikeTag, const Operands<U>& other)
{
- m_arguments.fill(Traits::defaultValue(), other.numberOfArguments());
- m_locals.fill(Traits::defaultValue(), other.numberOfLocals());
+ m_arguments.fill(T(), other.numberOfArguments());
+ m_locals.fill(T(), other.numberOfLocals());
}
size_t numberOfArguments() const { return m_arguments.size(); }
@@ -103,8 +107,21 @@ public:
size_t oldSize = m_locals.size();
m_locals.resize(size);
+ if (!WTF::VectorTraits<T>::needsInitialization) {
+ for (size_t i = oldSize; i < m_locals.size(); ++i)
+ m_locals[i] = T();
+ }
+ }
+
+ void ensureLocals(size_t size, const T& ensuredValue)
+ {
+ if (size <= m_locals.size())
+ return;
+
+ size_t oldSize = m_locals.size();
+ m_locals.resize(size);
for (size_t i = oldSize; i < m_locals.size(); ++i)
- m_locals[i] = Traits::defaultValue();
+ m_locals[i] = ensuredValue;
}
void setLocal(size_t idx, const T& value)
@@ -117,19 +134,19 @@ public:
T getLocal(size_t idx)
{
if (idx >= m_locals.size())
- return Traits::defaultValue();
+ return T();
return m_locals[idx];
}
void setArgumentFirstTime(size_t idx, const T& value)
{
- ASSERT(m_arguments[idx] == Traits::defaultValue());
+ ASSERT(m_arguments[idx] == T());
argument(idx) = value;
}
void setLocalFirstTime(size_t idx, const T& value)
{
- ASSERT(idx >= m_locals.size() || m_locals[idx] == Traits::defaultValue());
+ ASSERT(idx >= m_locals.size() || m_locals[idx] == T());
setLocal(idx, value);
}
@@ -149,6 +166,7 @@ public:
}
const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
+ const T& operand(VirtualRegister operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
bool hasOperand(int operand) const
{
@@ -209,6 +227,10 @@ public:
return virtualRegisterForArgument(index).offset();
return virtualRegisterForLocal(index - numberOfArguments()).offset();
}
+ VirtualRegister virtualRegisterForIndex(size_t index) const
+ {
+ return VirtualRegister(operandForIndex(index));
+ }
size_t indexForOperand(int operand) const
{
if (operandIsArgument(operand))
@@ -240,7 +262,7 @@ public:
void clear()
{
- fill(Traits::defaultValue());
+ fill(T());
}
bool operator==(const Operands& other) const
@@ -252,11 +274,7 @@ public:
}
void dumpInContext(PrintStream& out, DumpContext* context) const;
-
- void dump(PrintStream& out) const
- {
- dumpInContext(out, 0);
- }
+ void dump(PrintStream& out) const;
private:
Vector<T, 8> m_arguments;
@@ -264,6 +282,3 @@ private:
};
} // namespace JSC
-
-#endif // Operands_h
-
diff --git a/Source/JavaScriptCore/bytecode/OperandsInlines.h b/Source/JavaScriptCore/bytecode/OperandsInlines.h
index 74ad60bc1..65fedda07 100644
--- a/Source/JavaScriptCore/bytecode/OperandsInlines.h
+++ b/Source/JavaScriptCore/bytecode/OperandsInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,31 +23,43 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef OperandsInlines_h
-#define OperandsInlines_h
+#pragma once
#include "Operands.h"
#include <wtf/CommaPrinter.h>
namespace JSC {
-template<typename T, typename Traits>
-void Operands<T, Traits>::dumpInContext(PrintStream& out, DumpContext* context) const
+template<typename T>
+void Operands<T>::dumpInContext(PrintStream& out, DumpContext* context) const
{
CommaPrinter comma(" ");
for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
- if (Traits::isEmptyForDump(argument(argumentIndex)))
+ if (!argument(argumentIndex))
continue;
out.print(comma, "arg", argumentIndex, ":", inContext(argument(argumentIndex), context));
}
for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
- if (Traits::isEmptyForDump(local(localIndex)))
+ if (!local(localIndex))
continue;
out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context));
}
}
-} // namespace JSC
-
-#endif // OperandsInlines_h
+template<typename T>
+void Operands<T>::dump(PrintStream& out) const
+{
+ CommaPrinter comma(" ");
+ for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
+ if (!argument(argumentIndex))
+ continue;
+ out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex));
+ }
+ for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
+ if (!local(localIndex))
+ continue;
+ out.print(comma, "loc", localIndex, ":", local(localIndex));
+ }
+}
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
new file mode 100644
index 000000000..f062bc5c0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PolymorphicAccess.h"
+
+#if ENABLE(JIT)
+
+#include "BinarySwitch.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "Heap.h"
+#include "JITOperations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+static const bool verbose = false;
+
+void AccessGenerationResult::dump(PrintStream& out) const
+{
+ out.print(m_kind);
+ if (m_code)
+ out.print(":", m_code);
+}
+
+Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
+{
+ return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+ watchpoints, jit->codeBlock(), stubInfo, condition);
+}
+
+void AccessGenerationState::restoreScratch()
+{
+ allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
+}
+
+void AccessGenerationState::succeed()
+{
+ restoreScratch();
+ success.append(jit->jump());
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersForCall()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling)
+ calculateLiveRegistersForCallAndExceptionHandling();
+ return m_liveRegistersForCall;
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling)
+ calculateLiveRegistersForCallAndExceptionHandling();
+ return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+}
+
+static RegisterSet calleeSaveRegisters()
+{
+ RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
+ result.filter(RegisterSet::registersToNotSaveForCCall());
+ return result;
+}
+
+const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling) {
+ m_calculatedRegistersForCallAndExceptionHandling = true;
+
+ m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
+ m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
+ if (m_needsToRestoreRegistersIfException)
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
+
+ m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
+ m_liveRegistersForCall.exclude(calleeSaveRegisters());
+ }
+ return m_liveRegistersForCall;
+}
+
+auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
+{
+ RegisterSet liveRegisters = liveRegistersForCall();
+ liveRegisters.merge(extra);
+
+ unsigned extraStackPadding = 0;
+ unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
+ return SpillState {
+ WTFMove(liveRegisters),
+ numberOfStackBytesUsedForRegisterPreservation
+ };
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
+{
+ // Even if we're a getter, we don't want to ignore the result value like we normally do
+ // because the getter threw, and therefore, didn't return a value that means anything.
+ // Instead, we want to restore that register to what it was upon entering the getter
+ // inline cache. The subtlety here is if the base and the result are the same register,
+ // and the getter threw, we want OSR exit to see the original base value, not the result
+ // of the getter call.
+ RegisterSet dontRestore = spillState.spilledRegisters;
+ // As an optimization here, we only need to restore what is live for exception handling.
+ // We can construct the dontRestore set to accomplish this goal by having it contain only
+ // what is live for call but not live for exception handling. By ignoring things that are
+ // only live at the call but not the exception handler, we will only restore things live
+ // at the exception handler.
+ dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
+ restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
+{
+ unsigned extraStackPadding = 0;
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
+}
+
+CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling)
+ calculateLiveRegistersForCallAndExceptionHandling();
+
+ if (!m_calculatedCallSiteIndex) {
+ m_calculatedCallSiteIndex = true;
+
+ if (m_needsToRestoreRegistersIfException)
+ m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
+ else
+ m_callSiteIndex = originalCallSiteIndex();
+ }
+
+ return m_callSiteIndex;
+}
+
+const HandlerInfo& AccessGenerationState::originalExceptionHandler()
+{
+ if (!m_calculatedRegistersForCallAndExceptionHandling)
+ calculateLiveRegistersForCallAndExceptionHandling();
+
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
+ RELEASE_ASSERT(exceptionHandler);
+ return *exceptionHandler;
+}
+
+CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
+
+void AccessGenerationState::emitExplicitExceptionHandler()
+{
+ restoreScratch();
+ jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+ if (needsToRestoreRegistersIfException()) {
+ // To the JIT that produces the original exception handling
+ // call site, they will expect the OSR exit to be arrived
+ // at from genericUnwind. Therefore we must model what genericUnwind
+ // does here. I.e, set callFrameForCatch and copy callee saves.
+
+ jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
+
+ // We don't need to insert a new exception handler in the table
+ // because we're doing a manual exception check here. i.e, we'll
+ // never arrive here from genericUnwind().
+ HandlerInfo originalHandler = originalExceptionHandler();
+ jit->addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
+ });
+ } else {
+ jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
+ CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
+ jit->addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+ });
+ jit->jumpToExceptionHandler();
+ }
+}
+
+
+PolymorphicAccess::PolymorphicAccess() { }
+PolymorphicAccess::~PolymorphicAccess() { }
+
+AccessGenerationResult PolymorphicAccess::addCases(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+ Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ // This method will add the originalCasesToAdd to the list one at a time while preserving the
+ // invariants:
+ // - If a newly added case canReplace() any existing case, then the existing case is removed before
+ // the new case is added. Removal doesn't change order of the list. Any number of existing cases
+ // can be removed via the canReplace() rule.
+ // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
+ // cascade through the cases in reverse order, you will get the most recent cases first.
+ // - If this method fails (returns null, doesn't add the cases), then both the previous case list
+ // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
+ // add more things after failure.
+
+ // First ensure that the originalCasesToAdd doesn't contain duplicates.
+ Vector<std::unique_ptr<AccessCase>> casesToAdd;
+ for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
+ std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
+
+ // Add it only if it is not replaced by the subsequent cases in the list.
+ bool found = false;
+ for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
+ if (originalCasesToAdd[j]->canReplace(*myCase)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ casesToAdd.append(WTFMove(myCase));
+ }
+
+ if (verbose)
+ dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
+
+ // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
+ // new stub that will be identical to the old one. Returning null should tell the caller to just
+ // keep doing what they were doing before.
+ if (casesToAdd.isEmpty())
+ return AccessGenerationResult::MadeNoChanges;
+
+ // Now add things to the new list. Note that at this point, we will still have old cases that
+ // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
+ for (auto& caseToAdd : casesToAdd) {
+ commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
+ m_list.append(WTFMove(caseToAdd));
+ }
+
+ if (verbose)
+ dataLog("After addCases: m_list: ", listDump(m_list), "\n");
+
+ return AccessGenerationResult::Buffered;
+}
+
+AccessGenerationResult PolymorphicAccess::addCase(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+ std::unique_ptr<AccessCase> newAccess)
+{
+ Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
+ newAccesses.append(WTFMove(newAccess));
+ return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
+}
+
+bool PolymorphicAccess::visitWeak(VM& vm) const
+{
+ for (unsigned i = 0; i < size(); ++i) {
+ if (!at(i).visitWeak(vm))
+ return false;
+ }
+ if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
+ for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
+ if (!Heap::isMarked(weakReference.get()))
+ return false;
+ }
+ }
+ return true;
+}
+
+bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
+{
+ bool result = true;
+ for (unsigned i = 0; i < size(); ++i)
+ result &= at(i).propagateTransitions(visitor);
+ return result;
+}
+
+void PolymorphicAccess::dump(PrintStream& out) const
+{
+ out.print(RawPointer(this), ":[");
+ CommaPrinter comma;
+ for (auto& entry : m_list)
+ out.print(comma, *entry);
+ out.print("]");
+}
+
+void PolymorphicAccess::commit(
+ VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
+ StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
+{
+ // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
+ // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
+ // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
+ // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
+ // Those common kinds of JSC object accesses don't hit this case.
+
+ for (WatchpointSet* set : accessCase.commit(vm, ident)) {
+ Watchpoint* watchpoint =
+ WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+ watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
+
+ set->add(watchpoint);
+ }
+}
+
+AccessGenerationResult PolymorphicAccess::regenerate(
+ VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ if (verbose)
+ dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
+
+ AccessGenerationState state;
+
+ state.access = this;
+ state.stubInfo = &stubInfo;
+ state.ident = &ident;
+
+ state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ state.valueRegs = stubInfo.valueRegs();
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ state.allocator = &allocator;
+ allocator.lock(state.baseGPR);
+ allocator.lock(state.valueRegs);
+#if USE(JSVALUE32_64)
+ allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
+#endif
+
+ state.scratchGPR = allocator.allocateScratchGPR();
+
+ CCallHelpers jit(&vm, codeBlock);
+ state.jit = &jit;
+
+ state.preservedReusedRegisterState =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+ // Regenerating is our opportunity to figure out what our list of cases should look like. We
+ // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
+ // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
+ // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
+ // from the code of the current stub (aka previous).
+ ListType cases;
+ unsigned srcIndex = 0;
+ unsigned dstIndex = 0;
+ while (srcIndex < m_list.size()) {
+ std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
+
+ // If the case had been generated, then we have to keep the original in m_list in case we
+ // fail to regenerate. That case may have data structures that are used by the code that it
+ // had generated. If the case had not been generated, then we want to remove it from m_list.
+ bool isGenerated = someCase->state() == AccessCase::Generated;
+
+ [&] () {
+ if (!someCase->couldStillSucceed())
+ return;
+
+ // Figure out if this is replaced by any later case.
+ for (unsigned j = srcIndex; j < m_list.size(); ++j) {
+ if (m_list[j]->canReplace(*someCase))
+ return;
+ }
+
+ if (isGenerated)
+ cases.append(someCase->clone());
+ else
+ cases.append(WTFMove(someCase));
+ }();
+
+ if (isGenerated)
+ m_list[dstIndex++] = WTFMove(someCase);
+ }
+ m_list.resize(dstIndex);
+
+ if (verbose)
+ dataLog("Optimized cases: ", listDump(cases), "\n");
+
+ // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
+ // won't change that set anymore.
+
+ bool allGuardedByStructureCheck = true;
+ bool hasJSGetterSetterCall = false;
+ for (auto& newCase : cases) {
+ commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
+ allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
+ if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
+ hasJSGetterSetterCall = true;
+ }
+
+ if (cases.isEmpty()) {
+ // This is super unlikely, but we make it legal anyway.
+ state.failAndRepatch.append(jit.jump());
+ } else if (!allGuardedByStructureCheck || cases.size() == 1) {
+ // If there are any proxies in the list, we cannot just use a binary switch over the structure.
+ // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
+ // one case.
+ CCallHelpers::JumpList fallThrough;
+
+ // Cascade through the list, preferring newer entries.
+ for (unsigned i = cases.size(); i--;) {
+ fallThrough.link(&jit);
+ fallThrough.clear();
+ cases[i]->generateWithGuard(state, fallThrough);
+ }
+ state.failAndRepatch.append(fallThrough);
+ } else {
+ jit.load32(
+ CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
+ state.scratchGPR);
+
+ Vector<int64_t> caseValues(cases.size());
+ for (unsigned i = 0; i < cases.size(); ++i)
+ caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
+
+ BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
+ while (binarySwitch.advance(jit))
+ cases[binarySwitch.caseIndex()]->generate(state);
+ state.failAndRepatch.append(binarySwitch.fallThrough());
+ }
+
+ if (!state.failAndIgnore.empty()) {
+ state.failAndIgnore.link(&jit);
+
+ // Make sure that the inline cache optimization code knows that we are taking slow path because
+ // of something that isn't patchable. The slow path will decrement "countdown" and will only
+ // patch things if the countdown reaches zero. We increment the slow path count here to ensure
+ // that the slow path does not try to patch.
+#if CPU(X86) || CPU(X86_64)
+ jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
+ jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
+#else
+ jit.load8(&stubInfo.countdown, state.scratchGPR);
+ jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
+ jit.store8(state.scratchGPR, &stubInfo.countdown);
+#endif
+ }
+
+ CCallHelpers::JumpList failure;
+ if (allocator.didReuseRegisters()) {
+ state.failAndRepatch.link(&jit);
+ state.restoreScratch();
+ } else
+ failure = state.failAndRepatch;
+ failure.append(jit.jump());
+
+ CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
+ CallSiteIndex callSiteIndexForExceptionHandling;
+ if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
+ // Emit the exception handler.
+ // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
+ // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
+ // their own exception handling logic that doesn't go through genericUnwind.
+ MacroAssembler::Label makeshiftCatchHandler = jit.label();
+
+ int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
+ AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
+ ASSERT(!spillStateForJSGetterSetter.isEmpty());
+ stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
+ stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
+
+ jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+ jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
+ state.restoreScratch();
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+ HandlerInfo oldHandler = state.originalExceptionHandler();
+ CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
+
+ HandlerInfo handlerToRegister = oldHandler;
+ handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+ handlerToRegister.start = newExceptionHandlingCallSite.bits();
+ handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
+ codeBlock->appendExceptionHandler(handlerToRegister);
+ });
+
+ // We set these to indicate to the stub to remove itself from the CodeBlock's
+ // exception handler table when it is deallocated.
+ codeBlockThatOwnsExceptionHandlers = codeBlock;
+ ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
+ callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
+ }
+
+ LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+ if (linkBuffer.didFailToAllocate()) {
+ if (verbose)
+ dataLog("Did fail to allocate.\n");
+ return AccessGenerationResult::GaveUp;
+ }
+
+ CodeLocationLabel successLabel = stubInfo.doneLocation();
+
+ linkBuffer.link(state.success, successLabel);
+
+ linkBuffer.link(failure, stubInfo.slowPathStartLocation());
+
+ if (verbose)
+ dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
+
+ MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
+ codeBlock, linkBuffer,
+ ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
+
+ bool doesCalls = false;
+ Vector<JSCell*> cellsToMark;
+ for (auto& entry : cases)
+ doesCalls |= entry->doesCalls(&cellsToMark);
+
+ m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
+ m_watchpoints = WTFMove(state.watchpoints);
+ if (!state.weakReferences.isEmpty())
+ m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
+ if (verbose)
+ dataLog("Returning: ", code.code(), "\n");
+
+ m_list = WTFMove(cases);
+
+ AccessGenerationResult::Kind resultKind;
+ if (m_list.size() >= Options::maxAccessVariantListSize())
+ resultKind = AccessGenerationResult::GeneratedFinalCode;
+ else
+ resultKind = AccessGenerationResult::GeneratedNewCode;
+
+ return AccessGenerationResult(resultKind, code.code());
+}
+
+void PolymorphicAccess::aboutToDie()
+{
+ if (m_stubRoutine)
+ m_stubRoutine->aboutToDie();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
+{
+ switch (kind) {
+ case AccessGenerationResult::MadeNoChanges:
+ out.print("MadeNoChanges");
+ return;
+ case AccessGenerationResult::GaveUp:
+ out.print("GaveUp");
+ return;
+ case AccessGenerationResult::Buffered:
+ out.print("Buffered");
+ return;
+ case AccessGenerationResult::GeneratedNewCode:
+ out.print("GeneratedNewCode");
+ return;
+ case AccessGenerationResult::GeneratedFinalCode:
+ out.print("GeneratedFinalCode");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::AccessType type)
+{
+ switch (type) {
+ case AccessCase::Load:
+ out.print("Load");
+ return;
+ case AccessCase::Transition:
+ out.print("Transition");
+ return;
+ case AccessCase::Replace:
+ out.print("Replace");
+ return;
+ case AccessCase::Miss:
+ out.print("Miss");
+ return;
+ case AccessCase::GetGetter:
+ out.print("GetGetter");
+ return;
+ case AccessCase::Getter:
+ out.print("Getter");
+ return;
+ case AccessCase::Setter:
+ out.print("Setter");
+ return;
+ case AccessCase::CustomValueGetter:
+ out.print("CustomValueGetter");
+ return;
+ case AccessCase::CustomAccessorGetter:
+ out.print("CustomAccessorGetter");
+ return;
+ case AccessCase::CustomValueSetter:
+ out.print("CustomValueSetter");
+ return;
+ case AccessCase::CustomAccessorSetter:
+ out.print("CustomAccessorSetter");
+ return;
+ case AccessCase::IntrinsicGetter:
+ out.print("IntrinsicGetter");
+ return;
+ case AccessCase::InHit:
+ out.print("InHit");
+ return;
+ case AccessCase::InMiss:
+ out.print("InMiss");
+ return;
+ case AccessCase::ArrayLength:
+ out.print("ArrayLength");
+ return;
+ case AccessCase::StringLength:
+ out.print("StringLength");
+ return;
+ case AccessCase::DirectArgumentsLength:
+ out.print("DirectArgumentsLength");
+ return;
+ case AccessCase::ScopedArgumentsLength:
+ out.print("ScopedArgumentsLength");
+ return;
+ case AccessCase::ModuleNamespaceLoad:
+ out.print("ModuleNamespaceLoad");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::State state)
+{
+ switch (state) {
+ case AccessCase::Primordial:
+ out.print("Primordial");
+ return;
+ case AccessCase::Committed:
+ out.print("Committed");
+ return;
+ case AccessCase::Generated:
+ out.print("Generated");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccess.h b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
new file mode 100644
index 000000000..d1852c7b5
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicAccess.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+#include "CodeOrigin.h"
+#include "JITStubRoutine.h"
+#include "JSFunctionInlines.h"
+#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CodeBlock;
+class PolymorphicAccess;
+class StructureStubInfo;
+class WatchpointsOnStructureStubInfo;
+class ScratchRegisterAllocator;
+
+class AccessGenerationResult {
+public:
+ enum Kind {
+ MadeNoChanges,
+ GaveUp,
+ Buffered,
+ GeneratedNewCode,
+ GeneratedFinalCode // Generated so much code that we never want to generate code again.
+ };
+
+ AccessGenerationResult()
+ {
+ }
+
+ AccessGenerationResult(Kind kind)
+ : m_kind(kind)
+ {
+ RELEASE_ASSERT(kind != GeneratedNewCode);
+ RELEASE_ASSERT(kind != GeneratedFinalCode);
+ }
+
+ AccessGenerationResult(Kind kind, MacroAssemblerCodePtr code)
+ : m_kind(kind)
+ , m_code(code)
+ {
+ RELEASE_ASSERT(kind == GeneratedNewCode || kind == GeneratedFinalCode);
+ RELEASE_ASSERT(code);
+ }
+
+ bool operator==(const AccessGenerationResult& other) const
+ {
+ return m_kind == other.m_kind && m_code == other.m_code;
+ }
+
+ bool operator!=(const AccessGenerationResult& other) const
+ {
+ return !(*this == other);
+ }
+
+ explicit operator bool() const
+ {
+ return *this != AccessGenerationResult();
+ }
+
+ Kind kind() const { return m_kind; }
+
+ const MacroAssemblerCodePtr& code() const { return m_code; }
+
+ bool madeNoChanges() const { return m_kind == MadeNoChanges; }
+ bool gaveUp() const { return m_kind == GaveUp; }
+ bool buffered() const { return m_kind == Buffered; }
+ bool generatedNewCode() const { return m_kind == GeneratedNewCode; }
+ bool generatedFinalCode() const { return m_kind == GeneratedFinalCode; }
+
+ // If we gave up on this attempt to generate code, or if we generated the "final" code, then we
+ // should give up after this.
+ bool shouldGiveUpNow() const { return gaveUp() || generatedFinalCode(); }
+
+ bool generatedSomeCode() const { return generatedNewCode() || generatedFinalCode(); }
+
+ void dump(PrintStream&) const;
+
+private:
+ Kind m_kind;
+ MacroAssemblerCodePtr m_code;
+};
+
+class PolymorphicAccess {
+ WTF_MAKE_NONCOPYABLE(PolymorphicAccess);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PolymorphicAccess();
+ ~PolymorphicAccess();
+
+ // When this fails (returns GaveUp), this will leave the old stub intact but you should not try
+ // to call this method again for that PolymorphicAccess instance.
+ AccessGenerationResult addCases(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, Vector<std::unique_ptr<AccessCase>, 2>);
+
+ AccessGenerationResult addCase(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, std::unique_ptr<AccessCase>);
+
+ AccessGenerationResult regenerate(VM&, CodeBlock*, StructureStubInfo&, const Identifier&);
+
+ bool isEmpty() const { return m_list.isEmpty(); }
+ unsigned size() const { return m_list.size(); }
+ const AccessCase& at(unsigned i) const { return *m_list[i]; }
+ const AccessCase& operator[](unsigned i) const { return *m_list[i]; }
+
+ // If this returns false then we are requesting a reset of the owning StructureStubInfo.
+ bool visitWeak(VM&) const;
+
+ // This returns true if it has marked everything it will ever marked. This can be used as an
+ // optimization to then avoid calling this method again during the fixpoint.
+ bool propagateTransitions(SlotVisitor&) const;
+
+ void aboutToDie();
+
+ void dump(PrintStream& out) const;
+ bool containsPC(void* pc) const
+ {
+ if (!m_stubRoutine)
+ return false;
+
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress();
+ }
+
+private:
+ friend class AccessCase;
+ friend class CodeBlock;
+ friend struct AccessGenerationState;
+
+ typedef Vector<std::unique_ptr<AccessCase>, 2> ListType;
+
+ void commit(
+ VM&, std::unique_ptr<WatchpointsOnStructureStubInfo>&, CodeBlock*, StructureStubInfo&,
+ const Identifier&, AccessCase&);
+
+ MacroAssemblerCodePtr regenerate(
+ VM&, CodeBlock*, StructureStubInfo&, const Identifier&, ListType& cases);
+
+ ListType m_list;
+ RefPtr<JITStubRoutine> m_stubRoutine;
+ std::unique_ptr<WatchpointsOnStructureStubInfo> m_watchpoints;
+ std::unique_ptr<Vector<WriteBarrier<JSCell>>> m_weakReferences;
+};
+
+struct AccessGenerationState {
+ AccessGenerationState()
+ : m_calculatedRegistersForCallAndExceptionHandling(false)
+ , m_needsToRestoreRegistersIfException(false)
+ , m_calculatedCallSiteIndex(false)
+ {
+ }
+ CCallHelpers* jit { nullptr };
+ ScratchRegisterAllocator* allocator;
+ ScratchRegisterAllocator::PreservedState preservedReusedRegisterState;
+ PolymorphicAccess* access { nullptr };
+ StructureStubInfo* stubInfo { nullptr };
+ MacroAssembler::JumpList success;
+ MacroAssembler::JumpList failAndRepatch;
+ MacroAssembler::JumpList failAndIgnore;
+ GPRReg baseGPR { InvalidGPRReg };
+ JSValueRegs valueRegs;
+ GPRReg scratchGPR { InvalidGPRReg };
+ const Identifier* ident;
+ std::unique_ptr<WatchpointsOnStructureStubInfo> watchpoints;
+ Vector<WriteBarrier<JSCell>> weakReferences;
+
+ Watchpoint* addWatchpoint(const ObjectPropertyCondition& = ObjectPropertyCondition());
+
+ void restoreScratch();
+ void succeed();
+
+ struct SpillState {
+ SpillState() = default;
+ SpillState(RegisterSet&& regs, unsigned usedStackBytes)
+ : spilledRegisters(WTFMove(regs))
+ , numberOfStackBytesUsedForRegisterPreservation(usedStackBytes)
+ {
+ }
+
+ RegisterSet spilledRegisters { };
+ unsigned numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits<unsigned>::max() };
+
+ bool isEmpty() const { return numberOfStackBytesUsedForRegisterPreservation == std::numeric_limits<unsigned>::max(); }
+ };
+
+ const RegisterSet& calculateLiveRegistersForCallAndExceptionHandling();
+
+ SpillState preserveLiveRegistersToStackForCall(const RegisterSet& extra = RegisterSet());
+
+ void restoreLiveRegistersFromStackForCallWithThrownException(const SpillState&);
+ void restoreLiveRegistersFromStackForCall(const SpillState&, const RegisterSet& dontRestore = RegisterSet());
+
+ const RegisterSet& liveRegistersForCall();
+
+ CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal();
+ CallSiteIndex callSiteIndexForExceptionHandling()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ RELEASE_ASSERT(m_calculatedCallSiteIndex);
+ return m_callSiteIndex;
+ }
+
+ const HandlerInfo& originalExceptionHandler();
+
+ bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; }
+ CallSiteIndex originalCallSiteIndex() const;
+
+ void emitExplicitExceptionHandler();
+
+ void setSpillStateForJSGetterSetter(SpillState& spillState)
+ {
+ if (!m_spillStateForJSGetterSetter.isEmpty()) {
+ ASSERT(m_spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation == spillState.numberOfStackBytesUsedForRegisterPreservation);
+ ASSERT(m_spillStateForJSGetterSetter.spilledRegisters == spillState.spilledRegisters);
+ }
+ m_spillStateForJSGetterSetter = spillState;
+ }
+ SpillState spillStateForJSGetterSetter() const { return m_spillStateForJSGetterSetter; }
+
+private:
+ const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite();
+
+ RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+ RegisterSet m_liveRegistersForCall;
+ CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits<unsigned>::max()) };
+ SpillState m_spillStateForJSGetterSetter;
+ bool m_calculatedRegistersForCallAndExceptionHandling : 1;
+ bool m_needsToRestoreRegistersIfException : 1;
+ bool m_calculatedCallSiteIndex : 1;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::AccessGenerationResult::Kind);
+void printInternal(PrintStream&, JSC::AccessCase::AccessType);
+void printInternal(PrintStream&, JSC::AccessCase::State);
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
deleted file mode 100644
index 61d97354f..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PolymorphicAccessStructureList_h
-#define PolymorphicAccessStructureList_h
-
-#include "JITStubRoutine.h"
-#include "Structure.h"
-#include "StructureChain.h"
-#include <wtf/Platform.h>
-
-#define POLYMORPHIC_LIST_CACHE_SIZE 8
-
-namespace JSC {
-
-// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
-// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
-// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
-// solution for now - will need to something smarter if/when we actually want mixed-mode operation.
-
-#if ENABLE(JIT)
-// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
-struct PolymorphicAccessStructureList {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- struct PolymorphicStubInfo {
- bool isChain;
- bool isDirect;
- RefPtr<JITStubRoutine> stubRoutine;
- WriteBarrier<Structure> base;
- union {
- WriteBarrierBase<Structure> proto;
- WriteBarrierBase<StructureChain> chain;
- } u;
-
- PolymorphicStubInfo()
- {
- u.proto.clear();
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.proto.clear();
- isChain = false;
- isDirect = _isDirect;
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.proto.set(vm, owner, _proto);
- isChain = false;
- isDirect = _isDirect;
- }
-
- void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool _isDirect)
- {
- stubRoutine = _stubRoutine;
- base.set(vm, owner, _base);
- u.chain.set(vm, owner, _chain);
- isChain = true;
- isDirect = _isDirect;
- }
- } list[POLYMORPHIC_LIST_CACHE_SIZE];
-
- PolymorphicAccessStructureList()
- {
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, isDirect);
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, firstProto, isDirect);
- }
-
- PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
- {
- list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect);
- }
-
- bool visitWeak(int count)
- {
- for (int i = 0; i < count; ++i) {
- PolymorphicStubInfo& info = list[i];
- if (!info.base) {
- // We're being marked during initialisation of an entry
- ASSERT(!info.u.proto);
- continue;
- }
-
- if (!Heap::isMarked(info.base.get()))
- return false;
- if (info.u.proto && !info.isChain
- && !Heap::isMarked(info.u.proto.get()))
- return false;
- if (info.u.chain && info.isChain
- && !Heap::isMarked(info.u.chain.get()))
- return false;
- }
-
- return true;
- }
-};
-
-#endif // ENABLE(JIT)
-
-} // namespace JSC
-
-#endif // PolymorphicAccessStructureList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
deleted file mode 100644
index 6a6ec8141..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "PolymorphicPutByIdList.h"
-
-#if ENABLE(JIT)
-
-#include "StructureStubInfo.h"
-
-namespace JSC {
-
-PutByIdAccess PutByIdAccess::fromStructureStubInfo(
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
-{
- PutByIdAccess result;
-
- switch (stubInfo.accessType) {
- case access_put_by_id_replace:
- result.m_type = Replace;
- result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure);
- result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath);
- break;
-
- case access_put_by_id_transition_direct:
- case access_put_by_id_transition_normal:
- result.m_type = Transition;
- result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure);
- result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure);
- result.m_chain.copyFrom(stubInfo.u.putByIdTransition.chain);
- result.m_stubRoutine = stubInfo.stubRoutine;
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- return result;
-}
-
-bool PutByIdAccess::visitWeak() const
-{
- switch (m_type) {
- case Replace:
- if (!Heap::isMarked(m_oldStructure.get()))
- return false;
- break;
- case Transition:
- if (!Heap::isMarked(m_oldStructure.get()))
- return false;
- if (!Heap::isMarked(m_newStructure.get()))
- return false;
- if (!Heap::isMarked(m_chain.get()))
- return false;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return false;
- }
- return true;
-}
-
-PolymorphicPutByIdList::PolymorphicPutByIdList(
- PutKind putKind,
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
- : m_kind(putKind)
-{
- m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo, initialSlowPath));
-}
-
-PolymorphicPutByIdList* PolymorphicPutByIdList::from(
- PutKind putKind,
- StructureStubInfo& stubInfo,
- MacroAssemblerCodePtr initialSlowPath)
-{
- if (stubInfo.accessType == access_put_by_id_list)
- return stubInfo.u.putByIdList.list;
-
- ASSERT(stubInfo.accessType == access_put_by_id_replace
- || stubInfo.accessType == access_put_by_id_transition_normal
- || stubInfo.accessType == access_put_by_id_transition_direct);
-
- PolymorphicPutByIdList* result =
- new PolymorphicPutByIdList(putKind, stubInfo, initialSlowPath);
-
- stubInfo.initPutByIdList(result);
-
- return result;
-}
-
-PolymorphicPutByIdList::~PolymorphicPutByIdList() { }
-
-bool PolymorphicPutByIdList::isFull() const
-{
- ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
- return size() == POLYMORPHIC_LIST_CACHE_SIZE;
-}
-
-bool PolymorphicPutByIdList::isAlmostFull() const
-{
- ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
- return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1;
-}
-
-void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess)
-{
- ASSERT(!isFull());
- // Make sure that the resizing optimizes for space, not time.
- m_list.resize(m_list.size() + 1);
- m_list.last() = putByIdAccess;
-}
-
-bool PolymorphicPutByIdList::visitWeak() const
-{
- for (unsigned i = 0; i < size(); ++i) {
- if (!at(i).visitWeak())
- return false;
- }
- return true;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
deleted file mode 100644
index d9fe2e7cf..000000000
--- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PolymorphicPutByIdList_h
-#define PolymorphicPutByIdList_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(JIT)
-
-#include "CodeOrigin.h"
-#include "MacroAssembler.h"
-#include "Opcode.h"
-#include "PutKind.h"
-#include "Structure.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class CodeBlock;
-struct StructureStubInfo;
-
-class PutByIdAccess {
-public:
- enum AccessType {
- Invalid,
- Transition,
- Replace
- };
-
- PutByIdAccess()
- : m_type(Invalid)
- {
- }
-
- static PutByIdAccess transition(
- VM& vm,
- JSCell* owner,
- Structure* oldStructure,
- Structure* newStructure,
- StructureChain* chain,
- PassRefPtr<JITStubRoutine> stubRoutine)
- {
- PutByIdAccess result;
- result.m_type = Transition;
- result.m_oldStructure.set(vm, owner, oldStructure);
- result.m_newStructure.set(vm, owner, newStructure);
- result.m_chain.set(vm, owner, chain);
- result.m_stubRoutine = stubRoutine;
- return result;
- }
-
- static PutByIdAccess replace(
- VM& vm,
- JSCell* owner,
- Structure* structure,
- PassRefPtr<JITStubRoutine> stubRoutine)
- {
- PutByIdAccess result;
- result.m_type = Replace;
- result.m_oldStructure.set(vm, owner, structure);
- result.m_stubRoutine = stubRoutine;
- return result;
- }
-
- static PutByIdAccess fromStructureStubInfo(
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- bool isSet() const { return m_type != Invalid; }
- bool operator!() const { return !isSet(); }
-
- AccessType type() const { return m_type; }
-
- bool isTransition() const { return m_type == Transition; }
- bool isReplace() const { return m_type == Replace; }
-
- Structure* oldStructure() const
- {
- // Using this instead of isSet() to make this assertion robust against the possibility
- // of additional access types being added.
- ASSERT(isTransition() || isReplace());
-
- return m_oldStructure.get();
- }
-
- Structure* structure() const
- {
- ASSERT(isReplace());
- return m_oldStructure.get();
- }
-
- Structure* newStructure() const
- {
- ASSERT(isTransition());
- return m_newStructure.get();
- }
-
- StructureChain* chain() const
- {
- ASSERT(isTransition());
- return m_chain.get();
- }
-
- PassRefPtr<JITStubRoutine> stubRoutine() const
- {
- ASSERT(isTransition() || isReplace());
- return m_stubRoutine;
- }
-
- bool visitWeak() const;
-
-private:
- friend class CodeBlock;
-
- AccessType m_type;
- WriteBarrier<Structure> m_oldStructure;
- WriteBarrier<Structure> m_newStructure;
- WriteBarrier<StructureChain> m_chain;
- RefPtr<JITStubRoutine> m_stubRoutine;
-};
-
-class PolymorphicPutByIdList {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- // Initialize from a stub info; this will place one element in the list and it will
- // be created by converting the stub info's put by id access information into our
- // PutByIdAccess.
- PolymorphicPutByIdList(
- PutKind,
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- // Either creates a new polymorphic put list, or returns the one that is already
- // in place.
- static PolymorphicPutByIdList* from(
- PutKind,
- StructureStubInfo&,
- MacroAssemblerCodePtr initialSlowPath);
-
- ~PolymorphicPutByIdList();
-
- MacroAssemblerCodePtr currentSlowPathTarget() const
- {
- return m_list.last().stubRoutine()->code().code();
- }
-
- void addAccess(const PutByIdAccess&);
-
- bool isEmpty() const { return m_list.isEmpty(); }
- unsigned size() const { return m_list.size(); }
- bool isFull() const;
- bool isAlmostFull() const; // True if adding an element would make isFull() true.
- const PutByIdAccess& at(unsigned i) const { return m_list[i]; }
- const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; }
-
- PutKind kind() const { return m_kind; }
-
- bool visitWeak() const;
-
-private:
- friend class CodeBlock;
-
- Vector<PutByIdAccess, 2> m_list;
- PutKind m_kind;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // PolymorphicPutByIdList_h
-
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
index ede8a3643..9c06e7ec2 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
@@ -26,85 +26,48 @@
#include "config.h"
#include "PreciseJumpTargets.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+
namespace JSC {
-template <size_t vectorSize>
-static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out)
+template <size_t vectorSize, typename Block, typename Instruction>
+static void getJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out)
{
- OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
- Instruction* current = instructionsBegin + bytecodeOffset;
- switch (opcodeID) {
- case op_jmp:
- out.append(bytecodeOffset + current[1].u.operand);
- break;
- case op_jtrue:
- case op_jfalse:
- case op_jeq_null:
- case op_jneq_null:
- out.append(bytecodeOffset + current[2].u.operand);
- break;
- case op_jneq_ptr:
- case op_jless:
- case op_jlesseq:
- case op_jgreater:
- case op_jgreatereq:
- case op_jnless:
- case op_jnlesseq:
- case op_jngreater:
- case op_jngreatereq:
- out.append(bytecodeOffset + current[3].u.operand);
- break;
- case op_switch_imm:
- case op_switch_char: {
- SimpleJumpTable& table = codeBlock->switchJumpTable(current[1].u.operand);
- for (unsigned i = table.branchOffsets.size(); i--;)
- out.append(bytecodeOffset + table.branchOffsets[i]);
- out.append(bytecodeOffset + current[2].u.operand);
- break;
- }
- case op_switch_string: {
- StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
- StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin();
- StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
- for (; iter != end; ++iter)
- out.append(bytecodeOffset + iter->value.branchOffset);
- out.append(bytecodeOffset + current[2].u.operand);
- break;
- }
- case op_get_pnames:
- out.append(bytecodeOffset + current[5].u.operand);
- break;
- case op_next_pname:
- out.append(bytecodeOffset + current[6].u.operand);
- break;
- case op_check_has_instance:
- out.append(bytecodeOffset + current[4].u.operand);
- break;
- case op_loop_hint:
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+ extractStoredJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+ out.append(bytecodeOffset + relativeOffset);
+ });
+ // op_loop_hint does not have jump target stored in bytecode instructions.
+ if (opcodeID == op_loop_hint)
out.append(bytecodeOffset);
- break;
- default:
- break;
- }
}
-void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
+enum class ComputePreciseJumpTargetsMode {
+ FollowCodeBlockClaim,
+ ForceCompute,
+};
+
+template<ComputePreciseJumpTargetsMode Mode, typename Block, typename Instruction, size_t vectorSize>
+void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, vectorSize>& out)
{
ASSERT(out.isEmpty());
// We will derive a superset of the jump targets that the code block thinks it has.
// So, if the code block claims there are none, then we are done.
- if (!codeBlock->numberOfJumpTargets())
+ if (Mode == ComputePreciseJumpTargetsMode::FollowCodeBlockClaim && !codeBlock->numberOfJumpTargets())
return;
- for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;)
+ for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) {
out.append(codeBlock->exceptionHandler(i).target);
-
+ out.append(codeBlock->exceptionHandler(i).start);
+ out.append(codeBlock->exceptionHandler(i).end);
+ }
+
Interpreter* interpreter = codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = codeBlock->instructions().begin();
- unsigned instructionCount = codeBlock->instructions().size();
for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
- OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out);
bytecodeOffset += opcodeLengths[opcodeID];
}
@@ -123,13 +86,37 @@ void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
lastValue = value;
}
out.resize(toIndex);
+ out.shrinkToFit();
}
-void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Vector<unsigned, 1>& out)
+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out)
{
- Interpreter* interpreter = codeBlock->vm()->interpreter;
- Instruction* instructionsBegin = codeBlock->instructions().begin();
- getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out);
+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions().begin(), codeBlock->instructions().size(), out);
+}
+
+void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out)
+{
+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out)
+{
+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out)
+{
+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out)
+{
+ getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
+}
+
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out)
+{
+ getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
index fb60f9b9b..bcc9346cd 100644
--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
@@ -23,17 +23,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PreciseJumpTargets_h
-#define PreciseJumpTargets_h
+#pragma once
#include "CodeBlock.h"
namespace JSC {
+class UnlinkedCodeBlock;
+struct UnlinkedInstruction;
+
+// Return a sorted list of bytecode index that are the destination of a jump.
void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out);
-void findJumpTargetsForBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, Vector<unsigned, 1>& out);
+void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out);
+void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out);
-} // namespace JSC
+void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out);
-#endif // PreciseJumpTargets_h
+void findJumpTargetsForBytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out);
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out);
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h
new file mode 100644
index 000000000..19fdcdceb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "InterpreterInlines.h"
+#include "Opcode.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+template<typename Block, typename Instruction, typename Function>
+inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function)
+{
+ OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+ Instruction* current = instructionsBegin + bytecodeOffset;
+ switch (opcodeID) {
+ case op_jmp:
+ function(current[1].u.operand);
+ break;
+ case op_jtrue:
+ case op_jfalse:
+ case op_jeq_null:
+ case op_jneq_null:
+ function(current[2].u.operand);
+ break;
+ case op_jneq_ptr:
+ case op_jless:
+ case op_jlesseq:
+ case op_jgreater:
+ case op_jgreatereq:
+ case op_jnless:
+ case op_jnlesseq:
+ case op_jngreater:
+ case op_jngreatereq:
+ function(current[3].u.operand);
+ break;
+ case op_switch_imm:
+ case op_switch_char: {
+ auto& table = codeBlock->switchJumpTable(current[1].u.operand);
+ for (unsigned i = table.branchOffsets.size(); i--;)
+ function(table.branchOffsets[i]);
+ function(current[2].u.operand);
+ break;
+ }
+ case op_switch_string: {
+ auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
+ auto iter = table.offsetTable.begin();
+ auto end = table.offsetTable.end();
+ for (; iter != end; ++iter)
+ function(iter->value.branchOffset);
+ function(current[2].u.operand);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp
new file mode 100644
index 000000000..b4fac570f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ProgramCodeBlock::s_info = {
+ "ProgramCodeBlock", &Base::s_info, 0,
+ CREATE_METHOD_TABLE(ProgramCodeBlock)
+};
+
+void ProgramCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<ProgramCodeBlock*>(cell)->~ProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h
new file mode 100644
index 000000000..8504ac4a0
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+namespace JSC {
+
+class ProgramCodeBlock : public GlobalCodeBlock {
+public:
+ typedef GlobalCodeBlock Base;
+ DECLARE_INFO;
+
+ static ProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ProgramCodeBlock& other)
+ {
+ ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap))
+ ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), CopyParsedBlock, other);
+ instance->finishCreation(*vm, CopyParsedBlock, other);
+ return instance;
+ }
+
+ static ProgramCodeBlock* create(VM* vm, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned firstLineColumnOffset)
+ {
+ ProgramCodeBlock* instance = new (NotNull, allocateCell<ProgramCodeBlock>(vm->heap))
+ ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), firstLineColumnOffset);
+ instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+ return instance;
+ }
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+ }
+
+private:
+ ProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ProgramCodeBlock& other)
+ : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+ {
+ }
+
+ ProgramCodeBlock(VM* vm, Structure* structure, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned firstLineColumnOffset)
+ : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, WTFMove(sourceProvider), 0, firstLineColumnOffset)
+ {
+ }
+
+ static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
new file mode 100644
index 000000000..a8388df39
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PropertyCondition.h"
+
+#include "GetterSetter.h"
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+static bool verbose = false;
+
+void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ if (!*this) {
+ out.print("<invalid>");
+ return;
+ }
+
+ out.print(m_kind, " of ", m_uid);
+ switch (m_kind) {
+ case Presence:
+ out.print(" at ", offset(), " with attributes ", attributes());
+ return;
+ case Absence:
+ case AbsenceOfSetter:
+ out.print(" with prototype ", inContext(JSValue(prototype()), context));
+ return;
+ case Equivalence:
+ out.print(" with ", inContext(requiredValue(), context));
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void PropertyCondition::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(
+ Structure* structure, JSObject* base) const
+{
+ if (verbose) {
+ dataLog(
+ "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ",
+ JSValue(base), " assuming impure property watchpoints are set.\n");
+ }
+
+ if (!*this) {
+ if (verbose)
+ dataLog("Invalid because unset.\n");
+ return false;
+ }
+
+ if (!structure->propertyAccessesAreCacheable()) {
+ if (verbose)
+ dataLog("Invalid because accesses are not cacheable.\n");
+ return false;
+ }
+
+ switch (m_kind) {
+ case Presence: {
+ unsigned currentAttributes;
+ PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+ if (currentOffset != offset() || currentAttributes != attributes()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(),
+ " but they are ", currentOffset, ", ", currentAttributes, "\n");
+ }
+ return false;
+ }
+ return true;
+ }
+
+ case Absence: {
+ if (structure->isDictionary()) {
+ if (verbose)
+ dataLog("Invalid because it's a dictionary.\n");
+ return false;
+ }
+
+ PropertyOffset currentOffset = structure->getConcurrently(uid());
+ if (currentOffset != invalidOffset) {
+ if (verbose)
+ dataLog("Invalid because the property exists at offset: ", currentOffset, "\n");
+ return false;
+ }
+
+ if (structure->storedPrototypeObject() != prototype()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+ "it should have been ", JSValue(prototype()), "\n");
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ case AbsenceOfSetter: {
+ if (structure->isDictionary()) {
+ if (verbose)
+ dataLog("Invalid because it's a dictionary.\n");
+ return false;
+ }
+
+ unsigned currentAttributes;
+ PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+ if (currentOffset != invalidOffset) {
+ if (currentAttributes & (Accessor | CustomAccessor)) {
+ if (verbose) {
+ dataLog(
+ "Invalid because we expected not to have a setter, but we have one at offset ",
+ currentOffset, " with attributes ", currentAttributes, "\n");
+ }
+ return false;
+ }
+ }
+
+ if (structure->storedPrototypeObject() != prototype()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+ "it should have been ", JSValue(prototype()), "\n");
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ case Equivalence: {
+ if (!base || base->structure() != structure) {
+ // Conservatively return false, since we cannot verify this one without having the
+ // object.
+ if (verbose) {
+ dataLog(
+ "Invalid because we don't have a base or the base has the wrong structure: ",
+ RawPointer(base), "\n");
+ }
+ return false;
+ }
+
+ // FIXME: This is somewhat racy, and maybe more risky than we want.
+ // https://bugs.webkit.org/show_bug.cgi?id=134641
+
+ PropertyOffset currentOffset = structure->getConcurrently(uid());
+ if (currentOffset == invalidOffset) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the base no long appears to have ", uid(), " on its structure: ",
+ RawPointer(base), "\n");
+ }
+ return false;
+ }
+
+ JSValue currentValue = base->getDirect(currentOffset);
+ if (currentValue != requiredValue()) {
+ if (verbose) {
+ dataLog(
+ "Invalid because the value is ", currentValue, " but we require ", requiredValue(),
+ "\n");
+ }
+ return false;
+ }
+
+ return true;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+ if (!*this)
+ return false;
+
+ switch (m_kind) {
+ case Presence:
+ case Absence:
+ case Equivalence:
+ return structure->needImpurePropertyWatchpoint();
+ default:
+ return false;
+ }
+}
+
+bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const
+{
+ if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base))
+ return false;
+
+ // Currently we assume that an impure property can cause a property to appear, and can also
+ // "shadow" an existing JS property on the same object. Hence it affects both presence and
+ // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters.
+ switch (m_kind) {
+ case Absence:
+ if (structure->typeInfo().getOwnPropertySlotIsImpure() || structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+ return false;
+ break;
+ case Presence:
+ case Equivalence:
+ if (structure->typeInfo().getOwnPropertySlotIsImpure())
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool PropertyCondition::isWatchableWhenValid(
+ Structure* structure, WatchabilityEffort effort) const
+{
+ if (structure->transitionWatchpointSetHasBeenInvalidated())
+ return false;
+
+ switch (m_kind) {
+ case Equivalence: {
+ PropertyOffset offset = structure->getConcurrently(uid());
+
+ // This method should only be called when some variant of isValid returned true, which
+ // implies that we already confirmed that the structure knows of the property. We should
+ // also have verified that the Structure is a cacheable dictionary, which means we
+ // shouldn't have a TOCTOU race either.
+ RELEASE_ASSERT(offset != invalidOffset);
+
+ WatchpointSet* set = nullptr;
+ switch (effort) {
+ case MakeNoChanges:
+ set = structure->propertyReplacementWatchpointSet(offset);
+ break;
+ case EnsureWatchability:
+ set = structure->ensurePropertyReplacementWatchpointSet(
+ *Heap::heap(structure)->vm(), offset);
+ break;
+ }
+
+ if (!set || !set->isStillValid())
+ return false;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+ Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+ return isStillValidAssumingImpurePropertyWatchpoint(structure, base)
+ && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isWatchable(
+ Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+ return isStillValid(structure, base)
+ && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isStillLive() const
+{
+ if (hasPrototype() && prototype() && !Heap::isMarked(prototype()))
+ return false;
+
+ if (hasRequiredValue()
+ && requiredValue()
+ && requiredValue().isCell()
+ && !Heap::isMarked(requiredValue().asCell()))
+ return false;
+
+ return true;
+}
+
+void PropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+ if (hasPrototype())
+ tracked.check(prototype());
+
+ if (hasRequiredValue())
+ tracked.check(requiredValue());
+}
+
+bool PropertyCondition::isValidValueForAttributes(VM& vm, JSValue value, unsigned attributes)
+{
+ bool attributesClaimAccessor = !!(attributes & Accessor);
+ bool valueClaimsAccessor = !!jsDynamicCast<GetterSetter*>(vm, value);
+ return attributesClaimAccessor == valueClaimsAccessor;
+}
+
+bool PropertyCondition::isValidValueForPresence(VM& vm, JSValue value) const
+{
+ return isValidValueForAttributes(vm, value, attributes());
+}
+
+PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(VM& vm, JSObject* base) const
+{
+ Structure* structure = base->structure();
+ if (!structure->isValidOffset(offset()))
+ return PropertyCondition();
+ JSValue value = base->getDirect(offset());
+ if (!isValidValueForPresence(vm, value))
+ return PropertyCondition();
+ return equivalenceWithoutBarrier(uid(), value);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition)
+{
+ switch (condition) {
+ case JSC::PropertyCondition::Presence:
+ out.print("Presence");
+ return;
+ case JSC::PropertyCondition::Absence:
+ out.print("Absence");
+ return;
+ case JSC::PropertyCondition::AbsenceOfSetter:
+ out.print("Absence");
+ return;
+ case JSC::PropertyCondition::Equivalence:
+ out.print("Equivalence");
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.h b/Source/JavaScriptCore/bytecode/PropertyCondition.h
new file mode 100644
index 000000000..163e8f3fb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PropertyCondition.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+class TrackedReferences;
+
+class PropertyCondition {
+public:
+ enum Kind {
+ Presence,
+ Absence,
+ AbsenceOfSetter,
+ Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure.
+ };
+
+ PropertyCondition()
+ : m_uid(nullptr)
+ , m_kind(Presence)
+ {
+ memset(&u, 0, sizeof(u));
+ }
+
+ PropertyCondition(WTF::HashTableDeletedValueType)
+ : m_uid(nullptr)
+ , m_kind(Absence)
+ {
+ memset(&u, 0, sizeof(u));
+ }
+
+ static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Presence;
+ result.u.presence.offset = offset;
+ result.u.presence.attributes = attributes;
+ return result;
+ }
+
+ static PropertyCondition presence(
+ VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+ {
+ return presenceWithoutBarrier(uid, offset, attributes);
+ }
+
+ // NOTE: The prototype is the storedPrototype not the prototypeForLookup.
+ static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Absence;
+ result.u.absence.prototype = prototype;
+ return result;
+ }
+
+ static PropertyCondition absence(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceWithoutBarrier(uid, prototype);
+ }
+
+ static PropertyCondition absenceOfSetterWithoutBarrier(
+ UniquedStringImpl* uid, JSObject* prototype)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = AbsenceOfSetter;
+ result.u.absence.prototype = prototype;
+ return result;
+ }
+
+ static PropertyCondition absenceOfSetter(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+ {
+ if (owner)
+ vm.heap.writeBarrier(owner);
+ return absenceOfSetterWithoutBarrier(uid, prototype);
+ }
+
+ static PropertyCondition equivalenceWithoutBarrier(
+ UniquedStringImpl* uid, JSValue value)
+ {
+ PropertyCondition result;
+ result.m_uid = uid;
+ result.m_kind = Equivalence;
+ result.u.equivalence.value = JSValue::encode(value);
+ return result;
+ }
+
+ static PropertyCondition equivalence(
+ VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value)
+ {
+ if (value.isCell() && owner)
+ vm.heap.writeBarrier(owner);
+ return equivalenceWithoutBarrier(uid, value);
+ }
+
+ explicit operator bool() const { return m_uid || m_kind != Presence; }
+
+ Kind kind() const { return m_kind; }
+ UniquedStringImpl* uid() const { return m_uid; }
+
+ bool hasOffset() const { return !!*this && m_kind == Presence; };
+ PropertyOffset offset() const
+ {
+ ASSERT(hasOffset());
+ return u.presence.offset;
+ }
+ bool hasAttributes() const { return !!*this && m_kind == Presence; };
+ unsigned attributes() const
+ {
+ ASSERT(hasAttributes());
+ return u.presence.attributes;
+ }
+
+ bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); }
+ JSObject* prototype() const
+ {
+ ASSERT(hasPrototype());
+ return u.absence.prototype;
+ }
+
+ bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; }
+ JSValue requiredValue() const
+ {
+ ASSERT(hasRequiredValue());
+ return JSValue::decode(u.equivalence.value);
+ }
+
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
+
+ unsigned hash() const
+ {
+ unsigned result = WTF::PtrHash<UniquedStringImpl*>::hash(m_uid) + static_cast<unsigned>(m_kind);
+ switch (m_kind) {
+ case Presence:
+ result ^= u.presence.offset;
+ result ^= u.presence.attributes;
+ break;
+ case Absence:
+ case AbsenceOfSetter:
+ result ^= WTF::PtrHash<JSObject*>::hash(u.absence.prototype);
+ break;
+ case Equivalence:
+ result ^= EncodedJSValueHash::hash(u.equivalence.value);
+ break;
+ }
+ return result;
+ }
+
+ bool operator==(const PropertyCondition& other) const
+ {
+ if (m_uid != other.m_uid)
+ return false;
+ if (m_kind != other.m_kind)
+ return false;
+ switch (m_kind) {
+ case Presence:
+ return u.presence.offset == other.u.presence.offset
+ && u.presence.attributes == other.u.presence.attributes;
+ case Absence:
+ case AbsenceOfSetter:
+ return u.absence.prototype == other.u.absence.prototype;
+ case Equivalence:
+ return u.equivalence.value == other.u.equivalence.value;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return !m_uid && m_kind == Absence;
+ }
+
+ // Two conditions are compatible if they are identical or if they speak of different uids. If
+ // false is returned, you have to decide how to resolve the conflict - for example if there is
+ // a Presence and an Equivalence then in some cases you'll want the more general of the two
+ // while in other cases you'll want the more specific of the two. This will also return false
+ // for contradictions, like Presence and Absence on the same uid. By convention, invalid
+ // conditions aren't compatible with anything.
+ bool isCompatibleWith(const PropertyCondition& other) const
+ {
+ if (!*this || !other)
+ return false;
+ return *this == other || uid() != other.uid();
+ }
+
+ // Checks if the object's structure claims that the property won't be intercepted.
+ bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const;
+
+ // Returns true if we need an impure property watchpoint to ensure validity even if
+ // isStillValidAccordingToStructure() returned true.
+ bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+
+ // Checks if the condition is still valid right now for the given object and structure.
+ // May conservatively return false, if the object and structure alone don't guarantee the
+ // condition. This happens for an Absence condition on an object that may have impure
+ // properties. If the object is not supplied, then a "true" return indicates that checking if
+ // an object has the given structure guarantees the condition still holds. If an object is
+ // supplied, then you may need to use some other watchpoints on the object to guarantee the
+ // condition in addition to the structure check.
+ bool isStillValid(Structure*, JSObject* base = nullptr) const;
+
+ // In some cases, the condition is not watchable, but could be made watchable by enabling the
+ // appropriate watchpoint. For example, replacement watchpoints are enabled only when some
+ // access is cached on the property in some structure. This is mainly to save space for
+ // dictionary properties or properties that never get very hot. But, it's always safe to
+ // enable watching, provided that this is called from the main thread.
+ enum WatchabilityEffort {
+ // This is the default. It means that we don't change the state of any Structure or
+ // object, and implies that if the property happens not to be watchable then we don't make
+ // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat
+ // preferable when first deciding whether to watch a condition for the first time (i.e.
+ // not from a watchpoint fire that causes us to see if we should adapt), since a
+ // watchpoint not being initialized for watching implies that maybe we don't know enough
+ // yet to make it profitable to watch -- as in, the thing being watched may not have
+ // stabilized yet. We prefer to only assume that a condition will hold if it has been
+ // known to hold for a while already.
+ MakeNoChanges,
+
+ // Do what it takes to ensure that the property can be watched, if doing so has no
+ // user-observable effect. For now this just means that we will ensure that a property
+ // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this
+ // from JIT threads, since the act of enabling watchpoints is not thread-safe.
+ EnsureWatchability
+ };
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure and possibly an impure property watchpoint.
+ bool isWatchableAssumingImpurePropertyWatchpoint(
+ Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+
+ // This means that it's still valid and we could enforce validity by setting a transition
+ // watchpoint on the structure.
+ bool isWatchable(
+ Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+
+ bool watchingRequiresStructureTransitionWatchpoint() const
+ {
+ // Currently, this is required for all of our conditions.
+ return !!*this;
+ }
+ bool watchingRequiresReplacementWatchpoint() const
+ {
+ return !!*this && m_kind == Equivalence;
+ }
+
+ // This means that the objects involved in this are still live.
+ bool isStillLive() const;
+
+ void validateReferences(const TrackedReferences&) const;
+
+ static bool isValidValueForAttributes(VM&, JSValue, unsigned attributes);
+
+ bool isValidValueForPresence(VM&, JSValue) const;
+
+ PropertyCondition attemptToMakeEquivalenceWithoutBarrier(VM&, JSObject* base) const;
+
+private:
+ bool isWatchableWhenValid(Structure*, WatchabilityEffort) const;
+
+ UniquedStringImpl* m_uid;
+ Kind m_kind;
+ union {
+ struct {
+ PropertyOffset offset;
+ unsigned attributes;
+ } presence;
+ struct {
+ JSObject* prototype;
+ } absence;
+ struct {
+ EncodedJSValue value;
+ } equivalence;
+ } u;
+};
+
+struct PropertyConditionHash {
+ static unsigned hash(const PropertyCondition& key) { return key.hash(); }
+ static bool equal(
+ const PropertyCondition& a, const PropertyCondition& b)
+ {
+ return a == b;
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::PropertyCondition::Kind);
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::PropertyCondition> {
+ typedef JSC::PropertyConditionHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::PropertyCondition> : SimpleClassHashTraits<JSC::PropertyCondition> { };
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp
new file mode 100644
index 000000000..63879c4de
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ProxyableAccessCase.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ProxyableAccessCase::ProxyableAccessCase(VM& vm, JSCell* owner, AccessType accessType, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
+ : Base(vm, owner, accessType, offset, structure, conditionSet)
+ , m_viaProxy(viaProxy)
+ , m_additionalSet(additionalSet)
+{
+}
+
+std::unique_ptr<AccessCase> ProxyableAccessCase::create(VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure, const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
+{
+ ASSERT(type == Load || type == Miss || type == GetGetter);
+ return std::unique_ptr<AccessCase>(new ProxyableAccessCase(vm, owner, type, offset, structure, conditionSet, viaProxy, additionalSet));
+}
+
+ProxyableAccessCase::~ProxyableAccessCase()
+{
+}
+
+std::unique_ptr<AccessCase> ProxyableAccessCase::clone() const
+{
+ std::unique_ptr<ProxyableAccessCase> result(new ProxyableAccessCase(*this));
+ result->resetState();
+ return WTFMove(result);
+}
+
+void ProxyableAccessCase::dumpImpl(PrintStream& out, CommaPrinter& comma) const
+{
+ Base::dumpImpl(out, comma);
+ out.print(comma, "viaProxy = ", viaProxy());
+ out.print(comma, "additionalSet = ", RawPointer(additionalSet()));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h
new file mode 100644
index 000000000..578be2228
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ProxyableAccessCase.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "AccessCase.h"
+
+namespace JSC {
+
+class ProxyableAccessCase : public AccessCase {
+public:
+ typedef AccessCase Base;
+
+ bool viaProxy() const override { return m_viaProxy; }
+ WatchpointSet* additionalSet() const override { return m_additionalSet.get(); }
+
+ static std::unique_ptr<AccessCase> create(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+ bool viaProxy = false, WatchpointSet* additionalSet = nullptr);
+
+ void dumpImpl(PrintStream&, CommaPrinter&) const override;
+ std::unique_ptr<AccessCase> clone() const override;
+
+ ~ProxyableAccessCase();
+
+protected:
+ ProxyableAccessCase(VM&, JSCell*, AccessType, PropertyOffset, Structure*, const ObjectPropertyConditionSet&, bool viaProxy, WatchpointSet* additionalSet);
+
+private:
+ bool m_viaProxy;
+ RefPtr<WatchpointSet> m_additionalSet;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
new file mode 100644
index 000000000..f28090049
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PutByIdFlags.h"
+
+#include "InferredType.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/PrintStream.h>
+#include <wtf/StringPrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, PutByIdFlags flags) {
+ CommaPrinter comma("|");
+ if (flags & PutByIdIsDirect)
+ out.print(comma, "IsDirect");
+
+ InferredType::Kind kind = InferredType::kindForFlags(flags);
+ out.print(comma, kind);
+ if (InferredType::hasStructure(kind))
+ out.print(":", bitwise_cast<int32_t>(decodeStructureID(flags)));
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdFlags.h b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
new file mode 100644
index 000000000..7decfb2eb
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdFlags.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "StructureIDTable.h"
+
+namespace JSC {
+
+enum PutByIdFlags : int32_t {
+ PutByIdNone = 0,
+
+ // This flag indicates that the put_by_id is direct. That means that we store the property without
+ // checking if the prototype chain has a setter.
+ PutByIdIsDirect = 0x1,
+ PutByIdPersistentFlagsMask = 0x1,
+
+ // NOTE: The values below must be in sync with what is in LowLevelInterpreter.asm.
+
+ // Determining the required inferred type involves first checking the primary type mask, and then
+ // using that to figure out the meaning of the secondary mask:
+ // switch (flags & PutByIdPrimaryTypeMask) {
+ // case PutByIdPrimaryTypeSecondary:
+ // switch (flags & PutByIdSecondaryTypeMask) {
+ // ...
+ // }
+ // break;
+ // case PutByIdPrimaryTypeObjectWithStructure:
+ // case PutByIdPrimaryTypeObjectWithStructureOrOther:
+ // StructureID structureID = decodeStructureID(flags);
+ // break;
+ // }
+ PutByIdPrimaryTypeMask = 0x6,
+ PutByIdPrimaryTypeSecondary = 0x0, // Need to check the secondary type mask for the type.
+ PutByIdPrimaryTypeObjectWithStructure = 0x2, // Secondary type has structure ID.
+ PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4, // Secondary type has structure ID.
+
+ PutByIdSecondaryTypeMask = -0x8,
+ PutByIdSecondaryTypeBottom = 0x0,
+ PutByIdSecondaryTypeBoolean = 0x8,
+ PutByIdSecondaryTypeOther = 0x10,
+ PutByIdSecondaryTypeInt32 = 0x18,
+ PutByIdSecondaryTypeNumber = 0x20,
+ PutByIdSecondaryTypeString = 0x28,
+ PutByIdSecondaryTypeSymbol = 0x30,
+ PutByIdSecondaryTypeObject = 0x38,
+ PutByIdSecondaryTypeObjectOrOther = 0x40,
+ PutByIdSecondaryTypeTop = 0x48
+};
+
+inline PutByIdFlags encodeStructureID(StructureID id)
+{
+#if USE(JSVALUE64)
+ return static_cast<PutByIdFlags>(static_cast<PutByIdFlags>(id) << 3);
+#else
+ PutByIdFlags result = bitwise_cast<PutByIdFlags>(id);
+ ASSERT(!(result & ~PutByIdSecondaryTypeMask));
+ return result;
+#endif
+}
+
+inline StructureID decodeStructureID(PutByIdFlags flags)
+{
+#if USE(JSVALUE64)
+ return static_cast<StructureID>(flags >> 3);
+#else
+ return bitwise_cast<StructureID>(flags & PutByIdSecondaryTypeMask);
+#endif
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::PutByIdFlags);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 17cf70897..fdadf7022 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,206 +27,393 @@
#include "PutByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "GetterSetterAccessCase.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
#include "Structure.h"
#include "StructureChain.h"
+#include "StructureStubInfo.h"
+#include <wtf/ListDump.h>
namespace JSC {
-PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
+{
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
+ return false;
+ }
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool PutByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
+
+ VM& vm = *profiledBlock->vm();
+
Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
+ return PutByIdStatus(NoInformation);
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) {
- PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!newStructureID) {
+ PropertyOffset offset = structure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
+ return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
}
+
+ Structure* newStructure = vm.heap.structureIDTable().get(newStructureID);
ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
- ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line)
- || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line));
-
- Structure* newStructure = instruction[6].u.structure.get();
- StructureChain* chain = instruction[7].u.structureChain.get();
- ASSERT(newStructure);
- ASSERT(chain);
-
- PropertyOffset offset = newStructure->getConcurrently(*profiledBlock->vm(), uid);
+ PropertyOffset offset = newStructure->getConcurrently(uid);
if (!isValidOffset(offset))
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(NoInformation);
- return PutByIdStatus(
- SimpleTransition, structure, newStructure,
- chain ? adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)) : 0,
- offset);
-#else
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif
+ ObjectPropertyConditionSet conditionSet;
+ if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) {
+ conditionSet =
+ generateConditionsForPropertySetterMissConcurrently(
+ *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(NoInformation);
+ }
+
+ return PutByIdVariant::transition(
+ structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid));
}
-PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(JIT)
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(DFG_JIT)
+ if (hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
- if (!stubInfo || !stubInfo->seen)
+ PutByIdStatus result = computeForStubInfo(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+ if (!result)
return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
- if (stubInfo->resetByGC)
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return result;
+#else // ENABLE(JIT)
+ UNUSED_PARAM(map);
+ return PutByIdStatus(NoInformation);
+#endif // ENABLE(JIT)
+}
- switch (stubInfo->accessType) {
- case access_unset:
- // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+#if ENABLE(JIT)
+PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+ return computeForStubInfo(
+ locker, baselineBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex));
+}
+
+PutByIdStatus PutByIdStatus::computeForStubInfo(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
+ UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return PutByIdStatus();
+
+ if (stubInfo->tookSlowPath)
+ return PutByIdStatus(TakesSlowPath);
+
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ // This means that we attempted to cache but failed for some reason.
+ return PutByIdStatus(TakesSlowPath);
- case access_put_by_id_replace: {
+ case CacheType::PutByIdReplace: {
PropertyOffset offset =
- stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(
- *profiledBlock->vm(), uid);
+ stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid);
if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleReplace,
- stubInfo->u.putByIdReplace.baseObjectStructure.get(),
- 0, 0,
- offset);
+ return PutByIdVariant::replace(
+ stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top);
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
- PropertyOffset offset =
- stubInfo->u.putByIdTransition.structure->getConcurrently(
- *profiledBlock->vm(), uid);
- if (isValidOffset(offset)) {
- return PutByIdStatus(
- SimpleTransition,
- stubInfo->u.putByIdTransition.previousStructure.get(),
- stubInfo->u.putByIdTransition.structure.get(),
- stubInfo->u.putByIdTransition.chain ? adoptRef(new IntendedStructureChain(
- profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(),
- stubInfo->u.putByIdTransition.chain.get())) : 0,
- offset);
+ case CacheType::Stub: {
+ PolymorphicAccess* list = stubInfo->u.stub;
+
+ PutByIdStatus result;
+ result.m_state = Simple;
+
+ State slowPathState = TakesSlowPath;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
}
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.viaProxy())
+ return PutByIdStatus(slowPathState);
+
+ PutByIdVariant variant;
+
+ switch (access.type()) {
+ case AccessCase::Replace: {
+ Structure* structure = access.structure();
+ PropertyOffset offset = structure->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::replace(
+ structure, offset, structure->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Transition: {
+ PropertyOffset offset =
+ access.newStructure()->getConcurrently(uid);
+ if (!isValidOffset(offset))
+ return PutByIdStatus(slowPathState);
+ ObjectPropertyConditionSet conditionSet = access.conditionSet();
+ if (!conditionSet.structuresEnsureValidity())
+ return PutByIdStatus(slowPathState);
+ variant = PutByIdVariant::transition(
+ access.structure(), access.newStructure(), conditionSet, offset,
+ access.newStructure()->inferredTypeDescriptorFor(uid));
+ break;
+ }
+
+ case AccessCase::Setter: {
+ Structure* structure = access.structure();
+
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return PutByIdStatus(slowPathState);
+
+ case ComplexGetStatus::Inlineable: {
+ std::unique_ptr<CallLinkStatus> callLinkStatus =
+ std::make_unique<CallLinkStatus>();
+ if (CallLinkInfo* callLinkInfo = access.as<GetterSetterAccessCase>().callLinkInfo()) {
+ *callLinkStatus = CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData);
+ }
+
+ variant = PutByIdVariant::setter(
+ structure, complexGetStatus.offset(), complexGetStatus.conditionSet(),
+ WTFMove(callLinkStatus));
+ } }
+ break;
+ }
+
+ case AccessCase::CustomValueSetter:
+ case AccessCase::CustomAccessorSetter:
+ return PutByIdStatus(MakesCalls);
+
+ default:
+ return PutByIdStatus(slowPathState);
+ }
+
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(slowPathState);
+ }
+
+ return result;
}
default:
- // FIXME: We should handle polymorphic PutById. We probably have some interesting things
- // we could do about it.
- return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
+ return PutByIdStatus(TakesSlowPath);
}
-#else // ENABLE(JIT)
- UNUSED_PARAM(map);
- return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset);
-#endif // ENABLE(JIT)
}
+#endif
-PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, StringImpl* uid, bool isDirect)
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
- return PutByIdStatus(TakesSlowPath);
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJSLocker locker(baselineBlock->m_lock);
+ if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+ return PutByIdStatus(TakesSlowPath);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, baselineBlock, codeOrigin.bytecodeIndex);
+ }
+
+ PutByIdStatus result;
+ {
+ ConcurrentJSLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfo(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+ // it would be better not to do that. But it means that we have to defend
+ // ourselves here.
+ if (result.isSimple())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
- if (!structure)
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+ if (parseIndex(*uid))
return PutByIdStatus(TakesSlowPath);
+
+ if (set.isEmpty())
+ return PutByIdStatus();
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
- return PutByIdStatus(TakesSlowPath);
+ PutByIdStatus result;
+ result.m_state = Simple;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return PutByIdStatus(TakesSlowPath);
- if (!structure->propertyAccessesAreCacheable())
- return PutByIdStatus(TakesSlowPath);
+ if (!structure->propertyAccessesAreCacheable())
+ return PutByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (isValidOffset(offset)) {
+ if (attributes & CustomAccessor)
+ return PutByIdStatus(MakesCalls);
+
+ if (attributes & (Accessor | ReadOnly))
+ return PutByIdStatus(TakesSlowPath);
+
+ WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+ if (!replaceSet || replaceSet->isStillValid()) {
+ // When this executes, it'll create, and fire, this replacement watchpoint set.
+ // That means that this has probably never executed or that something fishy is
+ // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+ // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+ // So, better leave this alone and take slow path.
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ PutByIdVariant variant =
+ PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+ if (!result.appendVariant(variant))
+ return PutByIdStatus(TakesSlowPath);
+ continue;
+ }
+
+ // Our hypothesis is that we're doing a transition. Before we prove that this is really
+ // true, we want to do some sanity checks.
- unsigned attributes;
- JSCell* specificValue;
- PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue);
- if (isValidOffset(offset)) {
- if (attributes & (Accessor | ReadOnly))
+ // Don't cache put transitions on dictionaries.
+ if (structure->isDictionary())
return PutByIdStatus(TakesSlowPath);
- if (specificValue) {
- // We need the PutById slow path to verify that we're storing the right value into
- // the specialized slot.
+
+ // If the structure corresponds to something that isn't an object, then give up, since
+ // we don't want to be adding properties to strings.
+ if (!structure->typeInfo().isObject())
return PutByIdStatus(TakesSlowPath);
+
+ ObjectPropertyConditionSet conditionSet;
+ if (!isDirect) {
+ conditionSet = generateConditionsForPropertySetterMissConcurrently(
+ globalObject->vm(), globalObject, structure, uid);
+ if (!conditionSet.isValid())
+ return PutByIdStatus(TakesSlowPath);
}
- return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
- }
- // Our hypothesis is that we're doing a transition. Before we prove that this is really
- // true, we want to do some sanity checks.
+ // We only optimize if there is already a structure that the transition is cached to.
+ Structure* transition =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+ if (!transition)
+ return PutByIdStatus(TakesSlowPath);
+ ASSERT(isValidOffset(offset));
- // Don't cache put transitions on dictionaries.
- if (structure->isDictionary())
- return PutByIdStatus(TakesSlowPath);
+ bool didAppend = result.appendVariant(
+ PutByIdVariant::transition(
+ structure, transition, conditionSet, offset,
+ transition->inferredTypeDescriptorFor(uid)));
+ if (!didAppend)
+ return PutByIdStatus(TakesSlowPath);
+ }
+
+ return result;
+}
- // If the structure corresponds to something that isn't an object, then give up, since
- // we don't want to be adding properties to strings.
- if (structure->typeInfo().type() == StringType)
- return PutByIdStatus(TakesSlowPath);
+bool PutByIdStatus::makesCalls() const
+{
+ if (m_state == MakesCalls)
+ return true;
+
+ if (m_state != Simple)
+ return false;
+
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].makesCalls())
+ return true;
+ }
- RefPtr<IntendedStructureChain> chain;
- if (!isDirect) {
- chain = adoptRef(new IntendedStructureChain(globalObject, structure));
+ return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+ switch (m_state) {
+ case NoInformation:
+ out.print("(NoInformation)");
+ return;
- // If the prototype chain has setters or read-only properties, then give up.
- if (chain->mayInterceptStoreTo(vm, uid))
- return PutByIdStatus(TakesSlowPath);
+ case Simple:
+ out.print("(", listDump(m_variants), ")");
+ return;
- // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
- // then give up. The dictionary case would only happen if this structure has not been
- // used in an optimized put_by_id transition. And really the only reason why we would
- // bail here is that I don't really feel like having the optimizing JIT go and flatten
- // dictionaries if we have evidence to suggest that those objects were never used as
- // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
- // the other checks below will fail.
- if (!chain->isNormalized())
- return PutByIdStatus(TakesSlowPath);
+ case TakesSlowPath:
+ out.print("(TakesSlowPath)");
+ return;
+ case MakesCalls:
+ out.print("(MakesCalls)");
+ return;
}
- // We only optimize if there is already a structure that the transition is cached to.
- // Among other things, this allows us to guard against a transition with a specific
- // value.
- //
- // - If we're storing a value that could be specific: this would only be a problem if
- // the existing transition did have a specific value already, since if it didn't,
- // then we would behave "as if" we were not storing a specific value. If it did
- // have a specific value, then we'll know - the fact that we pass 0 for
- // specificValue will tell us.
- //
- // - If we're not storing a value that could be specific: again, this would only be a
- // problem if the existing transition did have a specific value, which we check for
- // by passing 0 for the specificValue.
- Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, 0, offset);
- if (!transition)
- return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above.
- ASSERT(!transition->transitionDidInvolveSpecificValue());
- ASSERT(isValidOffset(offset));
-
- return PutByIdStatus(SimpleTransition, structure, transition, chain.release(), offset);
+ RELEASE_ASSERT_NOT_REACHED();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
index c0a1bc35c..1dd95cde4 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,12 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PutByIdStatus_h
-#define PutByIdStatus_h
+#pragma once
-#include "IntendedStructureChain.h"
-#include "PropertyOffset.h"
-#include "StructureStubInfo.h"
+#include "CallLinkStatus.h"
+#include "ExitingJITType.h"
+#include "PutByIdVariant.h"
#include <wtf/text/StringImpl.h>
namespace JSC {
@@ -38,86 +37,79 @@ class VM;
class JSGlobalObject;
class Structure;
class StructureChain;
+class StructureStubInfo;
+
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
class PutByIdStatus {
public:
enum State {
// It's uncached so we have no information.
NoInformation,
- // It's cached as a direct store into an object property for cases where the object
- // already has the property.
- SimpleReplace,
- // It's cached as a transition from one structure that lacks the property to one that
- // includes the property, and a direct store to this new property.
- SimpleTransition,
+ // It's cached as a simple store of some kind.
+ Simple,
// It's known to often take slow path.
- TakesSlowPath
+ TakesSlowPath,
+ // It's known to take paths that make calls.
+ MakesCalls
};
PutByIdStatus()
: m_state(NoInformation)
- , m_oldStructure(0)
- , m_newStructure(0)
- , m_structureChain(0)
- , m_offset(invalidOffset)
{
}
explicit PutByIdStatus(State state)
: m_state(state)
- , m_oldStructure(0)
- , m_newStructure(0)
- , m_structureChain(0)
- , m_offset(invalidOffset)
{
- ASSERT(m_state == NoInformation || m_state == TakesSlowPath);
+ ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls);
}
- PutByIdStatus(
- State state,
- Structure* oldStructure,
- Structure* newStructure,
- PassRefPtr<IntendedStructureChain> structureChain,
- PropertyOffset offset)
- : m_state(state)
- , m_oldStructure(oldStructure)
- , m_newStructure(newStructure)
- , m_structureChain(structureChain)
- , m_offset(offset)
+ PutByIdStatus(const PutByIdVariant& variant)
+ : m_state(Simple)
{
- ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == !m_oldStructure);
- ASSERT((m_state != SimpleTransition) == !m_newStructure);
- ASSERT(!((m_state != SimpleTransition) && m_structureChain));
- ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == invalidOffset));
+ m_variants.append(variant);
}
- static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, StringImpl* uid);
- static PutByIdStatus computeFor(VM&, JSGlobalObject*, Structure*, StringImpl* uid, bool isDirect);
+ static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+ static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect);
+
+ static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(JIT)
+ static PutByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
State state() const { return m_state; }
bool isSet() const { return m_state != NoInformation; }
bool operator!() const { return m_state == NoInformation; }
- bool isSimpleReplace() const { return m_state == SimpleReplace; }
- bool isSimpleTransition() const { return m_state == SimpleTransition; }
- bool takesSlowPath() const { return m_state == TakesSlowPath; }
+ bool isSimple() const { return m_state == Simple; }
+ bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
+ bool makesCalls() const;
- Structure* oldStructure() const { return m_oldStructure; }
- Structure* newStructure() const { return m_newStructure; }
- IntendedStructureChain* structureChain() const { return m_structureChain.get(); }
- PropertyOffset offset() const { return m_offset; }
+ size_t numVariants() const { return m_variants.size(); }
+ const Vector<PutByIdVariant, 1>& variants() const { return m_variants; }
+ const PutByIdVariant& at(size_t index) const { return m_variants[index]; }
+ const PutByIdVariant& operator[](size_t index) const { return at(index); }
+
+ void dump(PrintStream&) const;
private:
- static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, StringImpl* uid);
+#if ENABLE(DFG_JIT)
+ static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+ static PutByIdStatus computeForStubInfo(
+ const ConcurrentJSLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData);
+#endif
+ static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+
+ bool appendVariant(const PutByIdVariant&);
State m_state;
- Structure* m_oldStructure;
- Structure* m_newStructure;
- RefPtr<IntendedStructureChain> m_structureChain;
- PropertyOffset m_offset;
+ Vector<PutByIdVariant, 1> m_variants;
};
} // namespace JSC
-
-#endif // PutByIdStatus_h
-
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
new file mode 100644
index 000000000..9904c625b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PutByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+PutByIdVariant::PutByIdVariant(const PutByIdVariant& other)
+ : PutByIdVariant()
+{
+ *this = other;
+}
+
+PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other)
+{
+ m_kind = other.m_kind;
+ m_oldStructure = other.m_oldStructure;
+ m_newStructure = other.m_newStructure;
+ m_conditionSet = other.m_conditionSet;
+ m_offset = other.m_offset;
+ m_requiredType = other.m_requiredType;
+ if (other.m_callLinkStatus)
+ m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus);
+ else
+ m_callLinkStatus = nullptr;
+ return *this;
+}
+
+PutByIdVariant PutByIdVariant::replace(
+ const StructureSet& structure, PropertyOffset offset, const InferredType::Descriptor& requiredType)
+{
+ PutByIdVariant result;
+ result.m_kind = Replace;
+ result.m_oldStructure = structure;
+ result.m_offset = offset;
+ result.m_requiredType = requiredType;
+ return result;
+}
+
+PutByIdVariant PutByIdVariant::transition(
+ const StructureSet& oldStructure, Structure* newStructure,
+ const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset,
+ const InferredType::Descriptor& requiredType)
+{
+ PutByIdVariant result;
+ result.m_kind = Transition;
+ result.m_oldStructure = oldStructure;
+ result.m_newStructure = newStructure;
+ result.m_conditionSet = conditionSet;
+ result.m_offset = offset;
+ result.m_requiredType = requiredType;
+ return result;
+}
+
+PutByIdVariant PutByIdVariant::setter(
+ const StructureSet& structure, PropertyOffset offset,
+ const ObjectPropertyConditionSet& conditionSet,
+ std::unique_ptr<CallLinkStatus> callLinkStatus)
+{
+ PutByIdVariant result;
+ result.m_kind = Setter;
+ result.m_oldStructure = structure;
+ result.m_conditionSet = conditionSet;
+ result.m_offset = offset;
+ result.m_callLinkStatus = WTFMove(callLinkStatus);
+ result.m_requiredType = InferredType::Top;
+ return result;
+}
+
+Structure* PutByIdVariant::oldStructureForTransition() const
+{
+ ASSERT(kind() == Transition);
+ ASSERT(m_oldStructure.size() <= 2);
+ for (unsigned i = m_oldStructure.size(); i--;) {
+ Structure* structure = m_oldStructure[i];
+ if (structure != m_newStructure)
+ return structure;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return nullptr;
+}
+
+bool PutByIdVariant::writesStructures() const
+{
+ switch (kind()) {
+ case Transition:
+ case Setter:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::reallocatesStorage() const
+{
+ switch (kind()) {
+ case Transition:
+ return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity();
+ case Setter:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::makesCalls() const
+{
+ return kind() == Setter;
+}
+
+bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other)
+{
+ if (m_offset != other.m_offset)
+ return false;
+
+ if (m_requiredType != other.m_requiredType)
+ return false;
+
+ switch (m_kind) {
+ case Replace: {
+ switch (other.m_kind) {
+ case Replace: {
+ ASSERT(m_conditionSet.isEmpty());
+ ASSERT(other.m_conditionSet.isEmpty());
+
+ m_oldStructure.merge(other.m_oldStructure);
+ return true;
+ }
+
+ case Transition: {
+ PutByIdVariant newVariant = other;
+ if (newVariant.attemptToMergeTransitionWithReplace(*this)) {
+ *this = newVariant;
+ return true;
+ }
+ return false;
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ case Transition:
+ switch (other.m_kind) {
+ case Replace:
+ return attemptToMergeTransitionWithReplace(other);
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace)
+{
+ ASSERT(m_kind == Transition);
+ ASSERT(replace.m_kind == Replace);
+ ASSERT(m_offset == replace.m_offset);
+ ASSERT(!replace.writesStructures());
+ ASSERT(!replace.reallocatesStorage());
+ ASSERT(replace.conditionSet().isEmpty());
+
+ // This sort of merging only works when we have one path along which we add a new field which
+ // transitions to structure S while the other path was already on structure S. This doesn't
+ // work if we need to reallocate anything or if the replace path is polymorphic.
+
+ if (reallocatesStorage())
+ return false;
+
+ if (replace.m_oldStructure.onlyStructure() != m_newStructure)
+ return false;
+
+ m_oldStructure.merge(m_newStructure);
+ return true;
+}
+
+void PutByIdVariant::dump(PrintStream& out) const
+{
+ dumpInContext(out, 0);
+}
+
+void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ switch (kind()) {
+ case NotSet:
+ out.print("<empty>");
+ return;
+
+ case Replace:
+ out.print(
+ "<Replace: ", inContext(structure(), context), ", offset = ", offset(), ", ",
+ inContext(requiredType(), context), ">");
+ return;
+
+ case Transition:
+ out.print(
+ "<Transition: ", inContext(oldStructure(), context), " -> ",
+ pointerDumpInContext(newStructure(), context), ", [",
+ inContext(m_conditionSet, context), "], offset = ", offset(), ", ",
+ inContext(requiredType(), context), ">");
+ return;
+
+ case Setter:
+ out.print(
+ "<Setter: ", inContext(structure(), context), ", [",
+ inContext(m_conditionSet, context), "]");
+ out.print(", offset = ", m_offset);
+ out.print(", call = ", *m_callLinkStatus);
+ out.print(">");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.h b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
new file mode 100644
index 000000000..bda17bbf9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+
+class CallLinkStatus;
+
+class PutByIdVariant {
+public:
+ enum Kind {
+ NotSet,
+ Replace,
+ Transition,
+ Setter
+ };
+
+ PutByIdVariant()
+ : m_kind(NotSet)
+ , m_newStructure(nullptr)
+ , m_offset(invalidOffset)
+ {
+ }
+
+ PutByIdVariant(const PutByIdVariant&);
+ PutByIdVariant& operator=(const PutByIdVariant&);
+
+ static PutByIdVariant replace(const StructureSet&, PropertyOffset, const InferredType::Descriptor&);
+
+ static PutByIdVariant transition(
+ const StructureSet& oldStructure, Structure* newStructure,
+ const ObjectPropertyConditionSet&, PropertyOffset, const InferredType::Descriptor&);
+
+ static PutByIdVariant setter(
+ const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&,
+ std::unique_ptr<CallLinkStatus>);
+
+ Kind kind() const { return m_kind; }
+
+ bool isSet() const { return kind() != NotSet; }
+ bool operator!() const { return !isSet(); }
+
+ const StructureSet& structure() const
+ {
+ ASSERT(kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ const StructureSet& structureSet() const
+ {
+ return structure();
+ }
+
+ const StructureSet& oldStructure() const
+ {
+ ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ StructureSet& oldStructure()
+ {
+ ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+ return m_oldStructure;
+ }
+
+ Structure* oldStructureForTransition() const;
+
+ Structure* newStructure() const
+ {
+ ASSERT(kind() == Transition);
+ return m_newStructure;
+ }
+
+ InferredType::Descriptor requiredType() const
+ {
+ return m_requiredType;
+ }
+
+ bool writesStructures() const;
+ bool reallocatesStorage() const;
+ bool makesCalls() const;
+
+ const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+
+ // We don't support intrinsics for Setters (it would be sweet if we did) but we need this for templated helpers.
+ Intrinsic intrinsic() const { return NoIntrinsic; }
+
+ // This is needed for templated helpers.
+ bool isPropertyUnset() const { return false; }
+
+ PropertyOffset offset() const
+ {
+ ASSERT(isSet());
+ return m_offset;
+ }
+
+ CallLinkStatus* callLinkStatus() const
+ {
+ ASSERT(kind() == Setter);
+ return m_callLinkStatus.get();
+ }
+
+ bool attemptToMerge(const PutByIdVariant& other);
+
+ void dump(PrintStream&) const;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+ bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace);
+
+ Kind m_kind;
+ StructureSet m_oldStructure;
+ Structure* m_newStructure;
+ ObjectPropertyConditionSet m_conditionSet;
+ PropertyOffset m_offset;
+ InferredType::Descriptor m_requiredType;
+ std::unique_ptr<CallLinkStatus> m_callLinkStatus;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/PutKind.h b/Source/JavaScriptCore/bytecode/PutKind.h
index 7a1dd642e..611279f60 100644
--- a/Source/JavaScriptCore/bytecode/PutKind.h
+++ b/Source/JavaScriptCore/bytecode/PutKind.h
@@ -23,14 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PutKind_h
-#define PutKind_h
+#pragma once
namespace JSC {
enum PutKind { Direct, NotDirect };
} // namespace JSC
-
-#endif // PutKind_h
-
diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
index 121caf2c2..fcb86c0d0 100644
--- a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
+++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ReduceWhitespace_h
-#define ReduceWhitespace_h
+#pragma once
#include <wtf/text/CString.h>
@@ -34,5 +33,3 @@ namespace JSC {
CString reduceWhitespace(const CString&);
} // namespace JSC
-
-#endif // ReduceWhitespace_h
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
deleted file mode 100644
index d18dbc1ff..000000000
--- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "SamplingTool.h"
-
-#include "CodeBlock.h"
-#include "Interpreter.h"
-#include "Opcode.h"
-
-#if !OS(WINDOWS)
-#include <unistd.h>
-#endif
-
-namespace JSC {
-
-#if ENABLE(SAMPLING_FLAGS)
-
-void SamplingFlags::sample()
-{
- uint32_t mask = static_cast<uint32_t>(1 << 31);
- unsigned index;
-
- for (index = 0; index < 32; ++index) {
- if (mask & s_flags)
- break;
- mask >>= 1;
- }
-
- s_flagCounts[32 - index]++;
-}
-
-void SamplingFlags::start()
-{
- for (unsigned i = 0; i <= 32; ++i)
- s_flagCounts[i] = 0;
-}
-void SamplingFlags::stop()
-{
- uint64_t total = 0;
- for (unsigned i = 0; i <= 32; ++i)
- total += s_flagCounts[i];
-
- if (total) {
- dataLogF("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total);
- for (unsigned i = 0; i <= 32; ++i) {
- if (s_flagCounts[i])
- dataLogF(" [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total);
- }
- dataLogF("\n");
- } else
- dataLogF("\nSamplingFlags: no samples.\n\n");
-}
-uint64_t SamplingFlags::s_flagCounts[33];
-
-#else
-void SamplingFlags::start() {}
-void SamplingFlags::stop() {}
-#endif
-
-#if ENABLE(SAMPLING_REGIONS)
-volatile uintptr_t SamplingRegion::s_currentOrReserved;
-Spectrum<const char*>* SamplingRegion::s_spectrum;
-unsigned long SamplingRegion::s_noneOfTheAbove;
-unsigned SamplingRegion::s_numberOfSamplesSinceDump;
-
-SamplingRegion::Locker::Locker()
-{
- uintptr_t previous;
- while (true) {
- previous = s_currentOrReserved;
- if (previous & 1) {
-#if OS(UNIX)
- sched_yield();
-#endif
- continue;
- }
- if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous | 1))
- break;
- }
-}
-
-SamplingRegion::Locker::~Locker()
-{
- // We don't need the CAS, but we do it out of an
- // abundance of caution (and because it gives us a memory fence, which is
- // never bad).
- uintptr_t previous;
- do {
- previous = s_currentOrReserved;
- } while (!WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous & ~1));
-}
-
-void SamplingRegion::sample()
-{
- // Make sure we lock s_current.
- Locker locker;
-
- // Create a spectrum if we don't have one already.
- if (!s_spectrum)
- s_spectrum = new Spectrum<const char*>();
-
- ASSERT(s_currentOrReserved & 1);
-
- // Walk the region stack, and record each region we see.
- SamplingRegion* region = bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1);
- if (region) {
- for (; region; region = region->m_previous)
- s_spectrum->add(region->m_name);
- } else
- s_noneOfTheAbove++;
-
- if (s_numberOfSamplesSinceDump++ == SamplingThread::s_hertz) {
- s_numberOfSamplesSinceDump = 0;
- dumpInternal();
- }
-}
-
-void SamplingRegion::dump()
-{
- Locker locker;
-
- dumpInternal();
-}
-
-void SamplingRegion::dumpInternal()
-{
- if (!s_spectrum) {
- dataLogF("\nSamplingRegion: was never sampled.\n\n");
- return;
- }
-
- Vector<Spectrum<const char*>::KeyAndCount> list = s_spectrum->buildList();
-
- unsigned long total = s_noneOfTheAbove;
- for (unsigned i = list.size(); i--;)
- total += list[i].count;
-
- dataLogF("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total);
-
- for (unsigned i = list.size(); i--;)
- dataLogF(" %3.2lf%% %s\n", (100.0 * list[i].count) / total, list[i].key);
-}
-#else // ENABLE(SAMPLING_REGIONS)
-void SamplingRegion::dump() { }
-#endif // ENABLE(SAMPLING_REGIONS)
-
-/*
- Start with flag 16 set.
- By doing this the monitoring of lower valued flags will be masked out
- until flag 16 is explictly cleared.
-*/
-uint32_t SamplingFlags::s_flags = 1 << 15;
-
-
-#if OS(WINDOWS)
-
-static void sleepForMicroseconds(unsigned us)
-{
- unsigned ms = us / 1000;
- if (us && !ms)
- ms = 1;
- Sleep(ms);
-}
-
-#else
-
-static void sleepForMicroseconds(unsigned us)
-{
- usleep(us);
-}
-
-#endif
-
-static inline unsigned hertz2us(unsigned hertz)
-{
- return 1000000 / hertz;
-}
-
-
-SamplingTool* SamplingTool::s_samplingTool = 0;
-
-
-bool SamplingThread::s_running = false;
-unsigned SamplingThread::s_hertz = 10000;
-ThreadIdentifier SamplingThread::s_samplingThread;
-
-void SamplingThread::threadStartFunc(void*)
-{
- while (s_running) {
- sleepForMicroseconds(hertz2us(s_hertz));
-
-#if ENABLE(SAMPLING_FLAGS)
- SamplingFlags::sample();
-#endif
-#if ENABLE(SAMPLING_REGIONS)
- SamplingRegion::sample();
-#endif
-#if ENABLE(OPCODE_SAMPLING)
- SamplingTool::sample();
-#endif
- }
-}
-
-
-void SamplingThread::start(unsigned hertz)
-{
- ASSERT(!s_running);
- s_running = true;
- s_hertz = hertz;
-
- s_samplingThread = createThread(threadStartFunc, 0, "JavaScriptCore::Sampler");
-}
-
-void SamplingThread::stop()
-{
- ASSERT(s_running);
- s_running = false;
- waitForThreadCompletion(s_samplingThread);
-}
-
-
-void ScriptSampleRecord::sample(CodeBlock* codeBlock, Instruction* vPC)
-{
- if (!m_samples) {
- m_size = codeBlock->instructions().size();
- m_samples = static_cast<int*>(calloc(m_size, sizeof(int)));
- m_codeBlock = codeBlock;
- }
-
- ++m_sampleCount;
-
- unsigned offest = vPC - codeBlock->instructions().begin();
- // Since we don't read and write codeBlock and vPC atomically, this check
- // can fail if we sample mid op_call / op_ret.
- if (offest < m_size) {
- m_samples[offest]++;
- m_opcodeSampleCount++;
- }
-}
-
-void SamplingTool::doRun()
-{
- Sample sample(m_sample, m_codeBlock);
- ++m_sampleCount;
-
- if (sample.isNull())
- return;
-
- if (!sample.inHostFunction()) {
- unsigned opcodeID = m_interpreter->getOpcodeID(sample.vPC()[0].u.opcode);
-
- ++m_opcodeSampleCount;
- ++m_opcodeSamples[opcodeID];
-
- if (sample.inCTIFunction())
- m_opcodeSamplesInCTIFunctions[opcodeID]++;
- }
-
-#if ENABLE(CODEBLOCK_SAMPLING)
- if (CodeBlock* codeBlock = sample.codeBlock()) {
- MutexLocker locker(m_scriptSampleMapMutex);
- ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
- ASSERT(record);
- record->sample(codeBlock, sample.vPC());
- }
-#endif
-}
-
-void SamplingTool::sample()
-{
- s_samplingTool->doRun();
-}
-
-void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script)
-{
-#if ENABLE(CODEBLOCK_SAMPLING)
- MutexLocker locker(m_scriptSampleMapMutex);
- m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script)));
-#else
- UNUSED_PARAM(vm);
- UNUSED_PARAM(script);
-#endif
-}
-
-void SamplingTool::setup()
-{
- s_samplingTool = this;
-}
-
-#if ENABLE(OPCODE_SAMPLING)
-
-struct OpcodeSampleInfo {
- OpcodeID opcode;
- long long count;
- long long countInCTIFunctions;
-};
-
-struct LineCountInfo {
- unsigned line;
- unsigned count;
-};
-
-static int compareOpcodeIndicesSampling(const void* left, const void* right)
-{
- const OpcodeSampleInfo* leftSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(left);
- const OpcodeSampleInfo* rightSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(right);
-
- return (leftSampleInfo->count < rightSampleInfo->count) ? 1 : (leftSampleInfo->count > rightSampleInfo->count) ? -1 : 0;
-}
-
-#if ENABLE(CODEBLOCK_SAMPLING)
-static int compareLineCountInfoSampling(const void* left, const void* right)
-{
- const LineCountInfo* leftLineCount = reinterpret_cast<const LineCountInfo*>(left);
- const LineCountInfo* rightLineCount = reinterpret_cast<const LineCountInfo*>(right);
-
- return (leftLineCount->line > rightLineCount->line) ? 1 : (leftLineCount->line < rightLineCount->line) ? -1 : 0;
-}
-
-static int compareScriptSampleRecords(const void* left, const void* right)
-{
- const ScriptSampleRecord* const leftValue = *static_cast<const ScriptSampleRecord* const *>(left);
- const ScriptSampleRecord* const rightValue = *static_cast<const ScriptSampleRecord* const *>(right);
-
- return (leftValue->m_sampleCount < rightValue->m_sampleCount) ? 1 : (leftValue->m_sampleCount > rightValue->m_sampleCount) ? -1 : 0;
-}
-#endif
-
-void SamplingTool::dump(ExecState* exec)
-{
- // Tidies up SunSpider output by removing short scripts - such a small number of samples would likely not be useful anyhow.
- if (m_sampleCount < 10)
- return;
-
- // (1) Build and sort 'opcodeSampleInfo' array.
-
- OpcodeSampleInfo opcodeSampleInfo[numOpcodeIDs];
- for (int i = 0; i < numOpcodeIDs; ++i) {
- opcodeSampleInfo[i].opcode = static_cast<OpcodeID>(i);
- opcodeSampleInfo[i].count = m_opcodeSamples[i];
- opcodeSampleInfo[i].countInCTIFunctions = m_opcodeSamplesInCTIFunctions[i];
- }
-
- qsort(opcodeSampleInfo, numOpcodeIDs, sizeof(OpcodeSampleInfo), compareOpcodeIndicesSampling);
-
- // (2) Print Opcode sampling results.
-
- dataLogF("\nBytecode samples [*]\n");
- dataLogF(" sample %% of %% of | cti cti %%\n");
- dataLogF("opcode count VM total | count of self\n");
- dataLogF("------------------------------------------------------- | ----------------\n");
-
- for (int i = 0; i < numOpcodeIDs; ++i) {
- long long count = opcodeSampleInfo[i].count;
- if (!count)
- continue;
-
- OpcodeID opcodeID = opcodeSampleInfo[i].opcode;
-
- const char* opcodeName = opcodeNames[opcodeID];
- const char* opcodePadding = padOpcodeName(opcodeID, 28);
- double percentOfVM = (static_cast<double>(count) * 100) / m_opcodeSampleCount;
- double percentOfTotal = (static_cast<double>(count) * 100) / m_sampleCount;
- long long countInCTIFunctions = opcodeSampleInfo[i].countInCTIFunctions;
- double percentInCTIFunctions = (static_cast<double>(countInCTIFunctions) * 100) / count;
- debugDebugPrintf("%s:%s%-6lld %.3f%%\t%.3f%%\t | %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions);
- }
-
- dataLogF("\n[*] Samples inside host code are not charged to any Bytecode.\n\n");
- dataLogF("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_opcodeSampleCount) * 100) / m_sampleCount);
- dataLogF("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount);
- dataLogF("\tsample count:\tsamples inside this opcode\n");
- dataLogF("\t%% of VM:\tsample count / all opcode samples\n");
- dataLogF("\t%% of total:\tsample count / all samples\n");
- dataLogF("\t--------------\n");
- dataLogF("\tcti count:\tsamples inside a CTI function called by this opcode\n");
- dataLogF("\tcti %% of self:\tcti count / sample count\n");
-
-#if ENABLE(CODEBLOCK_SAMPLING)
-
- // (3) Build and sort 'codeBlockSamples' array.
-
- int scopeCount = m_scopeSampleMap->size();
- Vector<ScriptSampleRecord*> codeBlockSamples(scopeCount);
- ScriptSampleRecordMap::iterator iter = m_scopeSampleMap->begin();
- for (int i = 0; i < scopeCount; ++i, ++iter)
- codeBlockSamples[i] = iter->value.get();
-
- qsort(codeBlockSamples.begin(), scopeCount, sizeof(ScriptSampleRecord*), compareScriptSampleRecords);
-
- // (4) Print data from 'codeBlockSamples' array.
-
- dataLogF("\nCodeBlock samples\n\n");
-
- for (int i = 0; i < scopeCount; ++i) {
- ScriptSampleRecord* record = codeBlockSamples[i];
- CodeBlock* codeBlock = record->m_codeBlock;
-
- double blockPercent = (record->m_sampleCount * 100.0) / m_sampleCount;
-
- if (blockPercent >= 1) {
- //Instruction* code = codeBlock->instructions().begin();
- dataLogF("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent);
- if (i < 10) {
- HashMap<unsigned,unsigned> lineCounts;
- codeBlock->dump(exec);
-
- dataLogF(" Opcode and line number samples [*]\n\n");
- for (unsigned op = 0; op < record->m_size; ++op) {
- int count = record->m_samples[op];
- if (count) {
- dataLogF(" [% 4d] has sample count: % 4d\n", op, count);
- unsigned line = codeBlock->lineNumberForBytecodeOffset(op);
- lineCounts.set(line, (lineCounts.contains(line) ? lineCounts.get(line) : 0) + count);
- }
- }
- dataLogF("\n");
-
- int linesCount = lineCounts.size();
- Vector<LineCountInfo> lineCountInfo(linesCount);
- int lineno = 0;
- for (HashMap<unsigned,unsigned>::iterator iter = lineCounts.begin(); iter != lineCounts.end(); ++iter, ++lineno) {
- lineCountInfo[lineno].line = iter->key;
- lineCountInfo[lineno].count = iter->value;
- }
-
- qsort(lineCountInfo.begin(), linesCount, sizeof(LineCountInfo), compareLineCountInfoSampling);
-
- for (lineno = 0; lineno < linesCount; ++lineno) {
- dataLogF(" Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count);
- }
- dataLogF("\n");
- dataLogF(" [*] Samples inside host code are charged to the calling Bytecode.\n");
- dataLogF(" Samples on a call / return boundary are not charged to a specific opcode or line.\n\n");
- dataLogF(" Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast<double>(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount);
- }
- }
- }
-#else
- UNUSED_PARAM(exec);
-#endif
-}
-
-#else
-
-void SamplingTool::dump(ExecState*)
-{
-}
-
-#endif
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
deleted file mode 100644
index 1dfb8ecca..000000000
--- a/Source/JavaScriptCore/bytecode/SamplingTool.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SamplingTool_h
-#define SamplingTool_h
-
-#include "Strong.h"
-#include "Opcode.h"
-#include "SamplingCounter.h"
-#include <wtf/Assertions.h>
-#include <wtf/Atomics.h>
-#include <wtf/HashMap.h>
-#include <wtf/MainThread.h>
-#include <wtf/Spectrum.h>
-#include <wtf/Threading.h>
-
-namespace JSC {
-
- class ScriptExecutable;
-
- class SamplingFlags {
- public:
- JS_EXPORT_PRIVATE static void start();
- JS_EXPORT_PRIVATE static void stop();
-
-#if ENABLE(SAMPLING_FLAGS)
- static void setFlag(unsigned flag)
- {
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- s_flags |= 1u << (flag - 1);
- }
-
- static void clearFlag(unsigned flag)
- {
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- s_flags &= ~(1u << (flag - 1));
- }
-
- static void sample();
-
- class ScopedFlag {
- public:
- ScopedFlag(int flag)
- : m_flag(flag)
- {
- setFlag(flag);
- }
-
- ~ScopedFlag()
- {
- clearFlag(m_flag);
- }
-
- private:
- int m_flag;
- };
-
- static const void* addressOfFlags()
- {
- return &s_flags;
- }
-
-#endif
- private:
- JS_EXPORTDATA static uint32_t s_flags;
-#if ENABLE(SAMPLING_FLAGS)
- static uint64_t s_flagCounts[33];
-#endif
- };
-
-#if ENABLE(SAMPLING_REGIONS)
- class SamplingRegion {
- public:
- // Create a scoped sampling region using a C string constant name that describes
- // what you are doing. This must be a string constant that persists for the
- // lifetime of the process and is immutable.
- SamplingRegion(const char* name)
- {
- if (!isMainThread()) {
- m_name = 0;
- return;
- }
-
- m_name = name;
- exchangeCurrent(this, &m_previous);
- ASSERT(!m_previous || m_previous > this);
- }
-
- ~SamplingRegion()
- {
- if (!m_name)
- return;
-
- ASSERT(bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1) == this);
- exchangeCurrent(m_previous);
- }
-
- static void sample();
-
- JS_EXPORT_PRIVATE static void dump();
-
- private:
- const char* m_name;
- SamplingRegion* m_previous;
-
- static void exchangeCurrent(SamplingRegion* current, SamplingRegion** previousPtr = 0)
- {
- uintptr_t previous;
- while (true) {
- previous = s_currentOrReserved;
-
- // If it's reserved (i.e. sampling thread is reading it), loop around.
- if (previous & 1) {
-#if OS(UNIX)
- sched_yield();
-#endif
- continue;
- }
-
- // If we're going to CAS, then make sure previous is set.
- if (previousPtr)
- *previousPtr = bitwise_cast<SamplingRegion*>(previous);
-
- if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, bitwise_cast<uintptr_t>(current)))
- break;
- }
- }
-
- static void dumpInternal();
-
- class Locker {
- public:
- Locker();
- ~Locker();
- };
-
- static volatile uintptr_t s_currentOrReserved;
-
- // rely on identity hashing of string constants
- static Spectrum<const char*>* s_spectrum;
-
- static unsigned long s_noneOfTheAbove;
-
- static unsigned s_numberOfSamplesSinceDump;
- };
-#else // ENABLE(SAMPLING_REGIONS)
- class SamplingRegion {
- public:
- SamplingRegion(const char*) { }
- JS_EXPORT_PRIVATE void dump();
- };
-#endif // ENABLE(SAMPLING_REGIONS)
-
- class CodeBlock;
- class ExecState;
- class Interpreter;
- class ScopeNode;
- struct Instruction;
-
- struct ScriptSampleRecord {
- ScriptSampleRecord(VM& vm, ScriptExecutable* executable)
- : m_executable(vm, executable)
- , m_codeBlock(0)
- , m_sampleCount(0)
- , m_opcodeSampleCount(0)
- , m_samples(0)
- , m_size(0)
- {
- }
-
- ~ScriptSampleRecord()
- {
- if (m_samples)
- free(m_samples);
- }
-
- void sample(CodeBlock*, Instruction*);
-
- Strong<ScriptExecutable> m_executable;
- CodeBlock* m_codeBlock;
- int m_sampleCount;
- int m_opcodeSampleCount;
- int* m_samples;
- unsigned m_size;
- };
-
- typedef HashMap<ScriptExecutable*, std::unique_ptr<ScriptSampleRecord>> ScriptSampleRecordMap;
-
- class SamplingThread {
- public:
- // Sampling thread state.
- static bool s_running;
- static unsigned s_hertz;
- static ThreadIdentifier s_samplingThread;
-
- JS_EXPORT_PRIVATE static void start(unsigned hertz=10000);
- JS_EXPORT_PRIVATE static void stop();
-
- static void threadStartFunc(void*);
- };
-
- class SamplingTool {
- public:
- friend struct CallRecord;
-
-#if ENABLE(OPCODE_SAMPLING)
- class CallRecord {
- WTF_MAKE_NONCOPYABLE(CallRecord);
- public:
- CallRecord(SamplingTool* samplingTool, bool isHostCall = false)
- : m_samplingTool(samplingTool)
- , m_savedSample(samplingTool->m_sample)
- , m_savedCodeBlock(samplingTool->m_codeBlock)
- {
- if (isHostcall)
- samplingTool->m_sample |= 0x1;
- }
-
- ~CallRecord()
- {
- m_samplingTool->m_sample = m_savedSample;
- m_samplingTool->m_codeBlock = m_savedCodeBlock;
- }
-
- private:
- SamplingTool* m_samplingTool;
- intptr_t m_savedSample;
- CodeBlock* m_savedCodeBlock;
- };
-#else
- class CallRecord {
- WTF_MAKE_NONCOPYABLE(CallRecord);
- public:
- CallRecord(SamplingTool*, bool = false)
- {
- }
- };
-#endif
-
- SamplingTool(Interpreter* interpreter)
- : m_interpreter(interpreter)
- , m_codeBlock(0)
- , m_sample(0)
- , m_sampleCount(0)
- , m_opcodeSampleCount(0)
-#if ENABLE(CODEBLOCK_SAMPLING)
- , m_scopeSampleMap(adoptPtr(new ScriptSampleRecordMap))
-#endif
- {
- memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples));
- memset(m_opcodeSamplesInCTIFunctions, 0, sizeof(m_opcodeSamplesInCTIFunctions));
- }
-
- JS_EXPORT_PRIVATE void setup();
- void dump(ExecState*);
-
- void notifyOfScope(VM&, ScriptExecutable* scope);
-
- void sample(CodeBlock* codeBlock, Instruction* vPC)
- {
- ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
- m_codeBlock = codeBlock;
- m_sample = reinterpret_cast<intptr_t>(vPC);
- }
-
- CodeBlock** codeBlockSlot() { return &m_codeBlock; }
- intptr_t* sampleSlot() { return &m_sample; }
-
- void* encodeSample(Instruction* vPC, bool inCTIFunction = false, bool inHostFunction = false)
- {
- ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
- return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(vPC) | (static_cast<intptr_t>(inCTIFunction) << 1) | static_cast<intptr_t>(inHostFunction));
- }
-
- static void sample();
-
- private:
- class Sample {
- public:
- Sample(volatile intptr_t sample, CodeBlock* volatile codeBlock)
- : m_sample(sample)
- , m_codeBlock(codeBlock)
- {
- }
-
- bool isNull() { return !m_sample; }
- CodeBlock* codeBlock() { return m_codeBlock; }
- Instruction* vPC() { return reinterpret_cast<Instruction*>(m_sample & ~0x3); }
- bool inHostFunction() { return m_sample & 0x1; }
- bool inCTIFunction() { return m_sample & 0x2; }
-
- private:
- intptr_t m_sample;
- CodeBlock* m_codeBlock;
- };
-
- void doRun();
- static SamplingTool* s_samplingTool;
-
- Interpreter* m_interpreter;
-
- // State tracked by the main thread, used by the sampling thread.
- CodeBlock* m_codeBlock;
- intptr_t m_sample;
-
- // Gathered sample data.
- long long m_sampleCount;
- long long m_opcodeSampleCount;
- unsigned m_opcodeSamples[numOpcodeIDs];
- unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs];
-
-#if ENABLE(CODEBLOCK_SAMPLING)
- Mutex m_scriptSampleMapMutex;
- OwnPtr<ScriptSampleRecordMap> m_scopeSampleMap;
-#endif
- };
-
-} // namespace JSC
-
-#endif // SamplingTool_h
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
index 7789653f0..dc5a363b6 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
@@ -28,6 +28,7 @@
#include "CodeBlock.h"
#include "JSGlobalObject.h"
+#include "JSCInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h
index c18a6e904..21329ec43 100644
--- a/Source/JavaScriptCore/bytecode/SpecialPointer.h
+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SpecialPointer_h
-#define SpecialPointer_h
+#pragma once
namespace JSC {
@@ -41,6 +40,11 @@ enum Pointer {
};
} // namespace Special
+enum class LinkTimeConstant {
+ ThrowTypeErrorFunction,
+};
+const unsigned LinkTimeConstantCount = 1;
+
inline bool pointerIsFunction(Special::Pointer pointer)
{
ASSERT_UNUSED(pointer, pointer < Special::TableSize);
@@ -57,6 +61,3 @@ void* actualPointerFor(JSGlobalObject*, Special::Pointer);
void* actualPointerFor(CodeBlock*, Special::Pointer);
} // namespace JSC
-
-#endif // SpecialPointer_h
-
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
index 3917cca0f..45846e8c7 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,13 +29,17 @@
#include "config.h"
#include "SpeculatedType.h"
-#include "Arguments.h"
+#include "DirectArguments.h"
#include "JSArray.h"
+#include "JSCInlines.h"
#include "JSFunction.h"
-#include "Operations.h"
+#include "JSMap.h"
+#include "JSSet.h"
+#include "ProxyObject.h"
+#include "RegExpObject.h"
+#include "ScopedArguments.h"
#include "StringObject.h"
#include "ValueProfile.h"
-#include <wtf/BoundsCheckedPointer.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -127,8 +131,13 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
- if (value & SpecArguments)
- myOut.print("Arguments");
+ if (value & SpecDirectArguments)
+ myOut.print("Directarguments");
+ else
+ isTop = false;
+
+ if (value & SpecScopedArguments)
+ myOut.print("Scopedarguments");
else
isTop = false;
@@ -136,6 +145,31 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
myOut.print("Stringobject");
else
isTop = false;
+
+ if (value & SpecRegExpObject)
+ myOut.print("Regexpobject");
+ else
+ isTop = false;
+
+ if (value & SpecMapObject)
+ myOut.print("Mapobject");
+ else
+ isTop = false;
+
+ if (value & SpecSetObject)
+ myOut.print("Setobject");
+ else
+ isTop = false;
+
+ if (value & SpecProxyObject)
+ myOut.print("Proxyobject");
+ else
+ isTop = false;
+
+ if (value & SpecDerivedArray)
+ myOut.print("Derivedarray");
+ else
+ isTop = false;
}
if ((value & SpecString) == SpecString)
@@ -151,21 +185,35 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
}
+
+ if (value & SpecSymbol)
+ myOut.print("Symbol");
+ else
+ isTop = false;
}
- if (value & SpecInt32)
+ if (value == SpecInt32Only)
myOut.print("Int32");
- else
- isTop = false;
+ else {
+ if (value & SpecBoolInt32)
+ myOut.print("Boolint32");
+ else
+ isTop = false;
+
+ if (value & SpecNonBoolInt32)
+ myOut.print("Nonboolint32");
+ else
+ isTop = false;
+ }
- if (value & SpecInt52)
+ if (value & SpecInt52Only)
myOut.print("Int52");
- if ((value & SpecDouble) == SpecDouble)
- myOut.print("Double");
+ if ((value & SpecBytecodeDouble) == SpecBytecodeDouble)
+ myOut.print("Bytecodedouble");
else {
- if (value & SpecInt52AsDouble)
- myOut.print("Int52asdouble");
+ if (value & SpecAnyIntAsDouble)
+ myOut.print("AnyIntAsDouble");
else
isTop = false;
@@ -174,12 +222,15 @@ void dumpSpeculation(PrintStream& out, SpeculatedType value)
else
isTop = false;
- if (value & SpecDoubleNaN)
- myOut.print("Doublenan");
+ if (value & SpecDoublePureNaN)
+ myOut.print("Doublepurenan");
else
isTop = false;
}
+ if (value & SpecDoubleImpureNaN)
+ out.print("Doubleimpurenan");
+
if (value & SpecBoolean)
myOut.print("Bool");
else
@@ -229,24 +280,30 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
return "<Float32array>";
if (isFloat64ArraySpeculation(prediction))
return "<Float64array>";
- if (isArgumentsSpeculation(prediction))
- return "<Arguments>";
+ if (isDirectArgumentsSpeculation(prediction))
+ return "<DirectArguments>";
+ if (isScopedArgumentsSpeculation(prediction))
+ return "<ScopedArguments>";
if (isStringObjectSpeculation(prediction))
return "<StringObject>";
+ if (isRegExpObjectSpeculation(prediction))
+ return "<RegExpObject>";
if (isStringOrStringObjectSpeculation(prediction))
return "<StringOrStringObject>";
if (isObjectSpeculation(prediction))
return "<Object>";
if (isCellSpeculation(prediction))
return "<Cell>";
+ if (isBoolInt32Speculation(prediction))
+ return "<BoolInt32>";
if (isInt32Speculation(prediction))
return "<Int32>";
- if (isInt52AsDoubleSpeculation(prediction))
- return "<Int52AsDouble>";
+ if (isAnyIntAsDoubleSpeculation(prediction))
+ return "<AnyIntAsDouble>";
if (isInt52Speculation(prediction))
return "<Int52>";
- if (isMachineIntSpeculation(prediction))
- return "<MachineInt>";
+ if (isAnyIntSpeculation(prediction))
+ return "<AnyInt>";
if (isDoubleSpeculation(prediction))
return "<Double>";
if (isFullNumberSpeculation(prediction))
@@ -255,6 +312,8 @@ static const char* speculationToAbbreviatedString(SpeculatedType prediction)
return "<Boolean>";
if (isOtherSpeculation(prediction))
return "<Other>";
+ if (isMiscSpeculation(prediction))
+ return "<Misc>";
return "";
}
@@ -294,23 +353,47 @@ SpeculatedType speculationFromTypedArrayType(TypedArrayType type)
SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo)
{
+ if (classInfo == JSString::info())
+ return SpecString;
+
+ if (classInfo == Symbol::info())
+ return SpecSymbol;
+
if (classInfo == JSFinalObject::info())
return SpecFinalObject;
if (classInfo == JSArray::info())
return SpecArray;
- if (classInfo == Arguments::info())
- return SpecArguments;
+ if (classInfo == DirectArguments::info())
+ return SpecDirectArguments;
+
+ if (classInfo == ScopedArguments::info())
+ return SpecScopedArguments;
if (classInfo == StringObject::info())
return SpecStringObject;
+
+ if (classInfo == RegExpObject::info())
+ return SpecRegExpObject;
+
+ if (classInfo == JSMap::info())
+ return SpecMapObject;
+
+ if (classInfo == JSSet::info())
+ return SpecSetObject;
+
+ if (classInfo == ProxyObject::info())
+ return SpecProxyObject;
if (classInfo->isSubClassOf(JSFunction::info()))
return SpecFunction;
if (isTypedView(classInfo->typedArrayStorageType))
return speculationFromTypedArrayType(classInfo->typedArrayStorageType);
+
+ if (classInfo->isSubClassOf(JSArray::info()))
+ return SpecDerivedArray;
if (classInfo->isSubClassOf(JSObject::info()))
return SpecObjectOther;
@@ -322,14 +405,19 @@ SpeculatedType speculationFromStructure(Structure* structure)
{
if (structure->typeInfo().type() == StringType)
return SpecString;
+ if (structure->typeInfo().type() == SymbolType)
+ return SpecSymbol;
+ if (structure->typeInfo().type() == DerivedArrayType)
+ return SpecDerivedArray;
return speculationFromClassInfo(structure->classInfo());
}
SpeculatedType speculationFromCell(JSCell* cell)
{
- if (JSString* string = jsDynamicCast<JSString*>(cell)) {
+ if (cell->isString()) {
+ JSString* string = jsCast<JSString*>(cell);
if (const StringImpl* impl = string->tryGetValueImpl()) {
- if (impl->isIdentifier())
+ if (impl->isAtomic())
return SpecStringIdent;
}
return SpecStringVar;
@@ -341,14 +429,17 @@ SpeculatedType speculationFromValue(JSValue value)
{
if (value.isEmpty())
return SpecEmpty;
- if (value.isInt32())
- return SpecInt32;
+ if (value.isInt32()) {
+ if (value.asInt32() & ~1)
+ return SpecNonBoolInt32;
+ return SpecBoolInt32;
+ }
if (value.isDouble()) {
double number = value.asNumber();
if (number != number)
- return SpecDoubleNaN;
- if (value.isMachineInt())
- return SpecInt52AsDouble;
+ return SpecDoublePureNaN;
+ if (value.isAnyInt())
+ return SpecAnyIntAsDouble;
return SpecNonIntAsDouble;
}
if (value.isCell())
@@ -391,5 +482,163 @@ TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type)
return NotTypedArray;
}
+SpeculatedType speculationFromJSType(JSType type)
+{
+ switch (type) {
+ case StringType:
+ return SpecString;
+ case SymbolType:
+ return SpecSymbol;
+ case ArrayType:
+ return SpecArray;
+ case DerivedArrayType:
+ return SpecDerivedArray;
+ case RegExpObjectType:
+ return SpecRegExpObject;
+ case ProxyObjectType:
+ return SpecProxyObject;
+ case JSMapType:
+ return SpecMapObject;
+ case JSSetType:
+ return SpecSetObject;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ return SpecNone;
+}
+
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type)
+{
+ if (type & (SpecAnyInt | SpecAnyIntAsDouble))
+ type |= (SpecAnyInt | SpecAnyIntAsDouble);
+ if (type & SpecString)
+ type |= SpecString;
+ return type;
+}
+
+bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b)
+{
+ a = leastUpperBoundOfStrictlyEquivalentSpeculations(a);
+ b = leastUpperBoundOfStrictlyEquivalentSpeculations(b);
+
+ // Anything could be equal to a string.
+ if (a & SpecString)
+ return true;
+ if (b & SpecString)
+ return true;
+
+ // If both sides are definitely only objects, then equality is fairly sane.
+ if (isObjectSpeculation(a) && isObjectSpeculation(b))
+ return !!(a & b);
+
+ // If either side could be an object or not, then we could call toString or
+ // valueOf, which could return anything.
+ if (a & SpecObject)
+ return true;
+ if (b & SpecObject)
+ return true;
+
+ // Neither side is an object or string, so the world is relatively sane.
+ return !!(a & b);
+}
+
+SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b)
+{
+ SpeculatedType result = a | b;
+ // Impure NaN could become pure NaN during addition because addition may clear bits.
+ if (result & SpecDoubleImpureNaN)
+ result |= SpecDoublePureNaN;
+ // Values could overflow, or fractions could become integers.
+ if (result & SpecDoubleReal)
+ result |= SpecDoubleReal;
+ return result;
+}
+
+SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b)
+{
+ return typeOfDoubleSum(a, b);
+}
+
+SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b)
+{
+ return typeOfDoubleSum(a, b);
+}
+
+static SpeculatedType polluteDouble(SpeculatedType value)
+{
+ // Impure NaN could become pure NaN because the operation could clear some bits.
+ if (value & SpecDoubleImpureNaN)
+ value |= SpecDoubleNaN;
+ // Values could overflow, fractions could become integers, or an error could produce
+ // PureNaN.
+ if (value & SpecDoubleReal)
+ value |= SpecDoubleReal | SpecDoublePureNaN;
+ return value;
+}
+
+SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b)
+{
+ return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b)
+{
+ SpeculatedType result = a | b;
+ // Impure NaN could become pure NaN during addition because addition may clear bits.
+ if (result & SpecDoubleImpureNaN)
+ result |= SpecDoublePureNaN;
+ return result;
+}
+
+SpeculatedType typeOfDoubleNegation(SpeculatedType value)
+{
+ // Changing bits can make pure NaN impure and vice versa:
+ // 0xefff000000000000 (pure) - 0xffff000000000000 (impure)
+ if (value & SpecDoubleNaN)
+ value |= SpecDoubleNaN;
+ // We could get negative zero, which mixes SpecAnyIntAsDouble and SpecNotIntAsDouble.
+ // We could also overflow a large negative int into something that is no longer
+ // representable as an int.
+ if (value & SpecDoubleReal)
+ value |= SpecDoubleReal;
+ return value;
+}
+
+SpeculatedType typeOfDoubleAbs(SpeculatedType value)
+{
+ return typeOfDoubleNegation(value);
+}
+
+SpeculatedType typeOfDoubleRounding(SpeculatedType value)
+{
+ // Double Pure NaN can becomes impure when converted back from Float.
+ // and vice versa.
+ if (value & SpecDoubleNaN)
+ value |= SpecDoubleNaN;
+ // We might lose bits, which leads to a value becoming integer-representable.
+ if (value & SpecNonIntAsDouble)
+ value |= SpecAnyIntAsDouble;
+ return value;
+}
+
+SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue)
+{
+ // Math.pow() always return NaN if the exponent is NaN, unlike std::pow().
+ // We always set a pure NaN in that case.
+ if (yValue & SpecDoubleNaN)
+ xValue |= SpecDoublePureNaN;
+ return polluteDouble(xValue);
+}
+
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b)
+{
+ return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value)
+{
+ return polluteDouble(value);
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h
index eaf0af37a..e23fd2c5f 100644
--- a/Source/JavaScriptCore/bytecode/SpeculatedType.h
+++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,8 +26,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SpeculatedType_h
-#define SpeculatedType_h
+#pragma once
#include "JSCJSValue.h"
#include "TypedArrayType.h"
@@ -37,49 +36,62 @@ namespace JSC {
class Structure;
-typedef uint32_t SpeculatedType;
-static const SpeculatedType SpecNone = 0x00000000; // We don't know anything yet.
-static const SpeculatedType SpecFinalObject = 0x00000001; // It's definitely a JSFinalObject.
-static const SpeculatedType SpecArray = 0x00000002; // It's definitely a JSArray.
-static const SpeculatedType SpecFunction = 0x00000008; // It's definitely a JSFunction or one of its subclasses.
-static const SpeculatedType SpecInt8Array = 0x00000010; // It's definitely an Int8Array or one of its subclasses.
-static const SpeculatedType SpecInt16Array = 0x00000020; // It's definitely an Int16Array or one of its subclasses.
-static const SpeculatedType SpecInt32Array = 0x00000040; // It's definitely an Int32Array or one of its subclasses.
-static const SpeculatedType SpecUint8Array = 0x00000080; // It's definitely an Uint8Array or one of its subclasses.
-static const SpeculatedType SpecUint8ClampedArray = 0x00000100; // It's definitely an Uint8ClampedArray or one of its subclasses.
-static const SpeculatedType SpecUint16Array = 0x00000200; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecUint32Array = 0x00000400; // It's definitely an Uint32Array or one of its subclasses.
-static const SpeculatedType SpecFloat32Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses.
-static const SpeculatedType SpecFloat64Array = 0x00001000; // It's definitely an Uint16Array or one of its subclasses.
+typedef uint64_t SpeculatedType;
+static const SpeculatedType SpecNone = 0; // We don't know anything yet.
+static const SpeculatedType SpecFinalObject = 1ull << 0; // It's definitely a JSFinalObject.
+static const SpeculatedType SpecArray = 1ull << 1; // It's definitely a JSArray.
+static const SpeculatedType SpecFunction = 1ull << 2; // It's definitely a JSFunction.
+static const SpeculatedType SpecInt8Array = 1ull << 3; // It's definitely an Int8Array or one of its subclasses.
+static const SpeculatedType SpecInt16Array = 1ull << 4; // It's definitely an Int16Array or one of its subclasses.
+static const SpeculatedType SpecInt32Array = 1ull << 5; // It's definitely an Int32Array or one of its subclasses.
+static const SpeculatedType SpecUint8Array = 1ull << 6; // It's definitely an Uint8Array or one of its subclasses.
+static const SpeculatedType SpecUint8ClampedArray = 1ull << 7; // It's definitely an Uint8ClampedArray or one of its subclasses.
+static const SpeculatedType SpecUint16Array = 1ull << 8; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecUint32Array = 1ull << 9; // It's definitely an Uint32Array or one of its subclasses.
+static const SpeculatedType SpecFloat32Array = 1ull << 10; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecFloat64Array = 1ull << 11; // It's definitely an Uint16Array or one of its subclasses.
static const SpeculatedType SpecTypedArrayView = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array;
-static const SpeculatedType SpecArguments = 0x00002000; // It's definitely an Arguments object.
-static const SpeculatedType SpecStringObject = 0x00004000; // It's definitely a StringObject.
-static const SpeculatedType SpecObjectOther = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
-static const SpeculatedType SpecObject = 0x0000ffff; // Bitmask used for testing for any kind of object prediction.
-static const SpeculatedType SpecStringIdent = 0x00010000; // It's definitely a JSString, and it's an identifier.
-static const SpeculatedType SpecStringVar = 0x00020000; // It's definitely a JSString, and it's not an identifier.
-static const SpeculatedType SpecString = 0x00030000; // It's definitely a JSString.
-static const SpeculatedType SpecCellOther = 0x00040000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString.
-static const SpeculatedType SpecCell = 0x0007ffff; // It's definitely a JSCell.
-static const SpeculatedType SpecInt32 = 0x00800000; // It's definitely an Int32.
-static const SpeculatedType SpecInt52 = 0x01000000; // It's definitely an Int52 and we intend it to unbox it.
-static const SpeculatedType SpecMachineInt = 0x01800000; // It's something that we can do machine int arithmetic on.
-static const SpeculatedType SpecInt52AsDouble = 0x02000000; // It's definitely an Int52 and it's inside a double.
-static const SpeculatedType SpecInteger = 0x03800000; // It's definitely some kind of integer.
-static const SpeculatedType SpecNonIntAsDouble = 0x04000000; // It's definitely not an Int52 but it's a real number and it's a double.
-static const SpeculatedType SpecDoubleReal = 0x06000000; // It's definitely a non-NaN double.
-static const SpeculatedType SpecDoubleNaN = 0x08000000; // It's definitely a NaN.
-static const SpeculatedType SpecDouble = 0x0e000000; // It's either a non-NaN or a NaN double.
-static const SpeculatedType SpecBytecodeRealNumber = 0x06800000; // It's either an Int32 or a DoubleReal.
-static const SpeculatedType SpecFullRealNumber = 0x07800000; // It's either an Int32 or a DoubleReal, or a Int52.
-static const SpeculatedType SpecBytecodeNumber = 0x0e800000; // It's either an Int32 or a Double.
-static const SpeculatedType SpecFullNumber = 0x0f800000; // It's either an Int32, Int52, or a Double.
-static const SpeculatedType SpecBoolean = 0x10000000; // It's definitely a Boolean.
-static const SpeculatedType SpecOther = 0x20000000; // It's definitely none of the above.
-static const SpeculatedType SpecHeapTop = 0x3effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecEmpty = 0x40000000; // It's definitely an empty value marker.
-static const SpeculatedType SpecBytecodeTop = 0x7effffff; // It can be any of the above, except for SpecInt52.
-static const SpeculatedType SpecFullTop = 0x7fffffff; // It can be any of the above plus anything the DFG chooses.
+static const SpeculatedType SpecDirectArguments = 1ull << 12; // It's definitely a DirectArguments object.
+static const SpeculatedType SpecScopedArguments = 1ull << 13; // It's definitely a ScopedArguments object.
+static const SpeculatedType SpecStringObject = 1ull << 14; // It's definitely a StringObject.
+static const SpeculatedType SpecRegExpObject = 1ull << 15; // It's definitely a RegExpObject (and not any subclass of RegExpObject).
+static const SpeculatedType SpecMapObject = 1ull << 16; // It's definitely a Map object or one of its subclasses.
+static const SpeculatedType SpecSetObject = 1ull << 17; // It's definitely a Set object or one of its subclasses.
+static const SpeculatedType SpecProxyObject = 1ull << 18; // It's definitely a Proxy object or one of its subclasses.
+static const SpeculatedType SpecDerivedArray = 1ull << 19; // It's definitely a DerivedArray object.
+static const SpeculatedType SpecObjectOther = 1ull << 20; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
+static const SpeculatedType SpecObject = SpecFinalObject | SpecArray | SpecFunction | SpecTypedArrayView | SpecDirectArguments | SpecScopedArguments | SpecStringObject | SpecRegExpObject | SpecMapObject | SpecSetObject | SpecProxyObject | SpecDerivedArray | SpecObjectOther; // Bitmask used for testing for any kind of object prediction.
+static const SpeculatedType SpecStringIdent = 1ull << 21; // It's definitely a JSString, and it's an identifier.
+static const SpeculatedType SpecStringVar = 1ull << 22; // It's definitely a JSString, and it's not an identifier.
+static const SpeculatedType SpecString = SpecStringIdent | SpecStringVar; // It's definitely a JSString.
+static const SpeculatedType SpecSymbol = 1ull << 23; // It's definitely a Symbol.
+static const SpeculatedType SpecCellOther = 1ull << 24; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078
+static const SpeculatedType SpecCell = SpecObject | SpecString | SpecSymbol | SpecCellOther; // It's definitely a JSCell.
+static const SpeculatedType SpecBoolInt32 = 1ull << 25; // It's definitely an Int32 with value 0 or 1.
+static const SpeculatedType SpecNonBoolInt32 = 1ull << 26; // It's definitely an Int32 with value other than 0 or 1.
+static const SpeculatedType SpecInt32Only = SpecBoolInt32 | SpecNonBoolInt32; // It's definitely an Int32.
+static const SpeculatedType SpecInt52Only = 1ull << 27; // It's definitely an Int52 and we intend it to unbox it. It's also definitely not an Int32.
+static const SpeculatedType SpecAnyInt = SpecInt32Only | SpecInt52Only; // It's something that we can do machine int arithmetic on.
+static const SpeculatedType SpecAnyIntAsDouble = 1ull << 28; // It's definitely an Int52 and it's inside a double.
+static const SpeculatedType SpecNonIntAsDouble = 1ull << 29; // It's definitely not an Int52 but it's a real number and it's a double.
+static const SpeculatedType SpecDoubleReal = SpecNonIntAsDouble | SpecAnyIntAsDouble; // It's definitely a non-NaN double.
+static const SpeculatedType SpecDoublePureNaN = 1ull << 30; // It's definitely a NaN that is sae to tag (i.e. pure).
+static const SpeculatedType SpecDoubleImpureNaN = 1ull << 31; // It's definitely a NaN that is unsafe to tag (i.e. impure).
+static const SpeculatedType SpecDoubleNaN = SpecDoublePureNaN | SpecDoubleImpureNaN; // It's definitely some kind of NaN.
+static const SpeculatedType SpecBytecodeDouble = SpecDoubleReal | SpecDoublePureNaN; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN.
+static const SpeculatedType SpecFullDouble = SpecDoubleReal | SpecDoubleNaN; // It's either a non-NaN or a NaN double.
+static const SpeculatedType SpecBytecodeRealNumber = SpecInt32Only | SpecDoubleReal; // It's either an Int32 or a DoubleReal.
+static const SpeculatedType SpecFullRealNumber = SpecAnyInt | SpecDoubleReal; // It's either an Int32 or a DoubleReal, or a Int52.
+static const SpeculatedType SpecBytecodeNumber = SpecInt32Only | SpecBytecodeDouble; // It's either an Int32 or a Double, and the Double cannot be an impure NaN.
+static const SpeculatedType SpecFullNumber = SpecAnyInt | SpecFullDouble; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN.
+static const SpeculatedType SpecBoolean = 1ull << 32; // It's definitely a Boolean.
+static const SpeculatedType SpecOther = 1ull << 33; // It's definitely either Null or Undefined.
+static const SpeculatedType SpecMisc = SpecBoolean | SpecOther; // It's definitely either a boolean, Null, or Undefined.
+static const SpeculatedType SpecHeapTop = SpecCell | SpecBytecodeNumber | SpecMisc; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN.
+static const SpeculatedType SpecPrimitive = SpecString | SpecSymbol | SpecBytecodeNumber | SpecMisc; // It's any non-Object JSValue.
+static const SpeculatedType SpecEmpty = 1ull << 34; // It's definitely an empty value marker.
+static const SpeculatedType SpecBytecodeTop = SpecHeapTop | SpecEmpty; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN. Corresponds to what could be found in a bytecode local.
+static const SpeculatedType SpecFullTop = SpecBytecodeTop | SpecFullNumber; // It can be anything that bytecode could see plus exotic encodings of numbers.
typedef bool (*SpeculatedTypeChecker)(SpeculatedType);
@@ -94,6 +106,16 @@ inline bool isCellSpeculation(SpeculatedType value)
return !!(value & SpecCell) && !(value & ~SpecCell);
}
+inline bool isCellOrOtherSpeculation(SpeculatedType value)
+{
+ return !!value && !(value & ~(SpecCell | SpecOther));
+}
+
+inline bool isNotCellSpeculation(SpeculatedType value)
+{
+ return !(value & SpecCell) && value;
+}
+
inline bool isObjectSpeculation(SpeculatedType value)
{
return !!(value & SpecObject) && !(value & ~SpecObject);
@@ -119,11 +141,31 @@ inline bool isStringIdentSpeculation(SpeculatedType value)
return value == SpecStringIdent;
}
+inline bool isNotStringVarSpeculation(SpeculatedType value)
+{
+ return !(value & SpecStringVar);
+}
+
inline bool isStringSpeculation(SpeculatedType value)
{
return !!value && (value & SpecString) == value;
}
+inline bool isNotStringSpeculation(SpeculatedType value)
+{
+ return value && !(value & SpecString);
+}
+
+inline bool isStringOrOtherSpeculation(SpeculatedType value)
+{
+ return !!value && (value & (SpecString | SpecOther)) == value;
+}
+
+inline bool isSymbolSpeculation(SpeculatedType value)
+{
+ return value == SpecSymbol;
+}
+
inline bool isArraySpeculation(SpeculatedType value)
{
return value == SpecArray;
@@ -134,6 +176,16 @@ inline bool isFunctionSpeculation(SpeculatedType value)
return value == SpecFunction;
}
+inline bool isProxyObjectSpeculation(SpeculatedType value)
+{
+ return value == SpecProxyObject;
+}
+
+inline bool isDerivedArraySpeculation(SpeculatedType value)
+{
+ return value == SpecDerivedArray;
+}
+
inline bool isInt8ArraySpeculation(SpeculatedType value)
{
return value == SpecInt8Array;
@@ -179,9 +231,14 @@ inline bool isFloat64ArraySpeculation(SpeculatedType value)
return value == SpecFloat64Array;
}
-inline bool isArgumentsSpeculation(SpeculatedType value)
+inline bool isDirectArgumentsSpeculation(SpeculatedType value)
+{
+ return value == SpecDirectArguments;
+}
+
+inline bool isScopedArgumentsSpeculation(SpeculatedType value)
{
- return !!value && (value & SpecArguments) == value;
+ return value == SpecScopedArguments;
}
inline bool isActionableIntMutableArraySpeculation(SpeculatedType value)
@@ -210,13 +267,14 @@ inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value)
inline bool isActionableMutableArraySpeculation(SpeculatedType value)
{
return isArraySpeculation(value)
- || isArgumentsSpeculation(value)
|| isActionableTypedMutableArraySpeculation(value);
}
inline bool isActionableArraySpeculation(SpeculatedType value)
{
return isStringSpeculation(value)
+ || isDirectArgumentsSpeculation(value)
+ || isScopedArgumentsSpeculation(value)
|| isActionableMutableArraySpeculation(value);
}
@@ -235,49 +293,59 @@ inline bool isStringOrStringObjectSpeculation(SpeculatedType value)
return !!value && !(value & ~(SpecString | SpecStringObject));
}
+inline bool isRegExpObjectSpeculation(SpeculatedType value)
+{
+ return value == SpecRegExpObject;
+}
+
+inline bool isBoolInt32Speculation(SpeculatedType value)
+{
+ return value == SpecBoolInt32;
+}
+
inline bool isInt32Speculation(SpeculatedType value)
{
- return value == SpecInt32;
+ return value && !(value & ~SpecInt32Only);
}
-inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
+inline bool isNotInt32Speculation(SpeculatedType value)
{
- return !(value & (SpecDouble | SpecInt52));
+ return value && !(value & SpecInt32Only);
}
-inline bool isInt32SpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculation(SpeculatedType value)
{
- return isInt32Speculation(value & ~SpecOther);
+ return value && !(value & ~(SpecBoolean | SpecInt32Only));
}
-inline bool isInt52Speculation(SpeculatedType value)
+inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
{
- return value == SpecInt52;
+ return !(value & (SpecFullDouble | SpecInt52Only));
}
-inline bool isMachineIntSpeculation(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value)
{
- return !!value && (value & SpecMachineInt) == value;
+ return !(value & (SpecFullDouble | SpecInt52Only));
}
-inline bool isMachineIntSpeculationExpectingDefined(SpeculatedType value)
+inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value)
{
- return isMachineIntSpeculation(value & ~SpecOther);
+ return isInt32OrBooleanSpeculation(value & ~SpecOther);
}
-inline bool isMachineIntSpeculationForArithmetic(SpeculatedType value)
+inline bool isInt52Speculation(SpeculatedType value)
{
- return !(value & SpecDouble);
+ return value == SpecInt52Only;
}
-inline bool isInt52AsDoubleSpeculation(SpeculatedType value)
+inline bool isAnyIntSpeculation(SpeculatedType value)
{
- return value == SpecInt52AsDouble;
+ return !!value && (value & SpecAnyInt) == value;
}
-inline bool isIntegerSpeculation(SpeculatedType value)
+inline bool isAnyIntAsDoubleSpeculation(SpeculatedType value)
{
- return !!value && (value & SpecInteger) == value;
+ return value == SpecAnyIntAsDouble;
}
inline bool isDoubleRealSpeculation(SpeculatedType value)
@@ -287,12 +355,12 @@ inline bool isDoubleRealSpeculation(SpeculatedType value)
inline bool isDoubleSpeculation(SpeculatedType value)
{
- return !!value && (value & SpecDouble) == value;
+ return !!value && (value & SpecFullDouble) == value;
}
inline bool isDoubleSpeculationForArithmetic(SpeculatedType value)
{
- return !!(value & SpecDouble);
+ return !!(value & SpecFullDouble);
}
inline bool isBytecodeRealNumberSpeculation(SpeculatedType value)
@@ -315,14 +383,14 @@ inline bool isFullNumberSpeculation(SpeculatedType value)
return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber);
}
-inline bool isBytecodeNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value)
{
- return isBytecodeNumberSpeculation(value & ~SpecOther);
+ return value && !(value & ~(SpecFullNumber | SpecBoolean));
}
-inline bool isFullNumberSpeculationExpectingDefined(SpeculatedType value)
+inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value)
{
- return isFullNumberSpeculation(value & ~SpecOther);
+ return isFullNumberOrBooleanSpeculation(value & ~SpecOther);
}
inline bool isBooleanSpeculation(SpeculatedType value)
@@ -330,11 +398,21 @@ inline bool isBooleanSpeculation(SpeculatedType value)
return value == SpecBoolean;
}
+inline bool isNotBooleanSpeculation(SpeculatedType value)
+{
+ return value && !(value & SpecBoolean);
+}
+
inline bool isOtherSpeculation(SpeculatedType value)
{
return value == SpecOther;
}
+inline bool isMiscSpeculation(SpeculatedType value)
+{
+ return !!value && !(value & ~SpecMisc);
+}
+
inline bool isOtherOrEmptySpeculation(SpeculatedType value)
{
return !value || value == SpecOther;
@@ -345,6 +423,16 @@ inline bool isEmptySpeculation(SpeculatedType value)
return value == SpecEmpty;
}
+inline bool isUntypedSpeculationForArithmetic(SpeculatedType value)
+{
+ return !!(value & ~(SpecFullNumber | SpecBoolean));
+}
+
+inline bool isUntypedSpeculationForBitOps(SpeculatedType value)
+{
+ return !!(value & ~(SpecFullNumber | SpecBoolean | SpecOther));
+}
+
void dumpSpeculation(PrintStream&, SpeculatedType);
void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType);
@@ -378,10 +466,30 @@ SpeculatedType speculationFromClassInfo(const ClassInfo*);
SpeculatedType speculationFromStructure(Structure*);
SpeculatedType speculationFromCell(JSCell*);
SpeculatedType speculationFromValue(JSValue);
+SpeculatedType speculationFromJSType(JSType);
SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views.
TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType);
-} // namespace JSC
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType);
+
+bool valuesCouldBeEqual(SpeculatedType, SpeculatedType);
-#endif // SpeculatedType_h
+// Precise computation of the type of the result of a double computation after we
+// already know that the inputs are doubles and that the result must be a double. Use
+// the closest one of these that applies.
+SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleNegation(SpeculatedType);
+SpeculatedType typeOfDoubleAbs(SpeculatedType);
+SpeculatedType typeOfDoubleRounding(SpeculatedType);
+SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType);
+
+// This conservatively models the behavior of arbitrary double operations.
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType);
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.cpp b/Source/JavaScriptCore/bytecode/StructureSet.cpp
new file mode 100644
index 000000000..2ccb8f0ba
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/StructureSet.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StructureSet.h"
+
+#include "TrackedReferences.h"
+#include <wtf/CommaPrinter.h>
+
+namespace JSC {
+
+void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+ CommaPrinter comma;
+ out.print("[");
+ forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); });
+ out.print("]");
+}
+
+void StructureSet::dump(PrintStream& out) const
+{
+ dumpInContext(out, nullptr);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h
index 4cdcd01cb..8654ca500 100644
--- a/Source/JavaScriptCore/bytecode/StructureSet.h
+++ b/Source/JavaScriptCore/bytecode/StructureSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,165 +23,46 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef StructureSet_h
-#define StructureSet_h
+#pragma once
#include "ArrayProfile.h"
+#include "DumpContext.h"
#include "SpeculatedType.h"
#include "Structure.h"
-#include "DumpContext.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/Vector.h>
+#include <wtf/TinyPtrSet.h>
namespace JSC {
-namespace DFG {
-class StructureAbstractValue;
-}
+class TrackedReferences;
-class StructureSet {
+class StructureSet : public TinyPtrSet<Structure*> {
public:
- StructureSet() { }
-
- StructureSet(Structure* structure)
- {
- m_structures.append(structure);
- }
-
- void clear()
- {
- m_structures.clear();
- }
-
- void add(Structure* structure)
- {
- ASSERT(!contains(structure));
- m_structures.append(structure);
- }
-
- bool addAll(const StructureSet& other)
- {
- bool changed = false;
- for (size_t i = 0; i < other.size(); ++i) {
- if (contains(other[i]))
- continue;
- add(other[i]);
- changed = true;
- }
- return changed;
- }
+ // I really want to do this:
+ // using TinyPtrSet::TinyPtrSet;
+ //
+ // But I can't because Windows.
- void remove(Structure* structure)
+ StructureSet()
{
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] != structure)
- continue;
-
- m_structures[i] = m_structures.last();
- m_structures.removeLast();
- return;
- }
}
- bool contains(Structure* structure) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (m_structures[i] == structure)
- return true;
- }
- return false;
- }
-
- bool containsOnly(Structure* structure) const
- {
- if (size() != 1)
- return false;
- return singletonStructure() == structure;
- }
-
- bool isSubsetOf(const StructureSet& other) const
- {
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
- return true;
- }
-
- bool isSupersetOf(const StructureSet& other) const
- {
- return other.isSubsetOf(*this);
- }
-
- size_t size() const { return m_structures.size(); }
-
- // Call this if you know that the structure set must consist of exactly
- // one structure.
- Structure* singletonStructure() const
- {
- ASSERT(m_structures.size() == 1);
- return m_structures[0];
- }
-
- Structure* at(size_t i) const { return m_structures.at(i); }
-
- Structure* operator[](size_t i) const { return at(i); }
-
- Structure* last() const { return m_structures.last(); }
-
- SpeculatedType speculationFromStructures() const
- {
- SpeculatedType result = SpecNone;
-
- for (size_t i = 0; i < m_structures.size(); ++i)
- mergeSpeculation(result, speculationFromStructure(m_structures[i]));
-
- return result;
- }
-
- ArrayModes arrayModesFromStructures() const
- {
- ArrayModes result = 0;
-
- for (size_t i = 0; i < m_structures.size(); ++i)
- mergeArrayModes(result, asArrayModes(m_structures[i]->indexingType()));
-
- return result;
- }
-
- bool operator==(const StructureSet& other) const
+ StructureSet(Structure* structure)
+ : TinyPtrSet(structure)
{
- if (m_structures.size() != other.m_structures.size())
- return false;
-
- for (size_t i = 0; i < m_structures.size(); ++i) {
- if (!other.contains(m_structures[i]))
- return false;
- }
-
- return true;
}
- void dumpInContext(PrintStream& out, DumpContext* context) const
+ ALWAYS_INLINE StructureSet(const StructureSet& other)
+ : TinyPtrSet(other)
{
- CommaPrinter comma;
- out.print("[");
- for (size_t i = 0; i < m_structures.size(); ++i)
- out.print(comma, inContext(*m_structures[i], context));
- out.print("]");
}
- void dump(PrintStream& out) const
+ Structure* onlyStructure() const
{
- dumpInContext(out, 0);
+ return onlyEntry();
}
-private:
- friend class DFG::StructureAbstractValue;
-
- Vector<Structure*, 2> m_structures;
+ void dumpInContext(PrintStream&, DumpContext*) const;
+ void dump(PrintStream&) const;
};
} // namespace JSC
-
-#endif // StructureSet_h
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
index 5cfb3d1e8..f27e507b7 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,49 +29,67 @@
#if ENABLE(JIT)
#include "CodeBlock.h"
+#include "JSCInlines.h"
#include "StructureStubInfo.h"
namespace JSC {
-StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint() { }
+StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint()
+{
+ for (auto current = WTFMove(m_next); current; current = WTFMove(current->m_next)) { }
+}
StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- OwnPtr<StructureStubClearingWatchpoint>& head)
+ std::unique_ptr<StructureStubClearingWatchpoint>& head)
{
- head = adoptPtr(new StructureStubClearingWatchpoint(holder, head.release()));
+ head = std::make_unique<StructureStubClearingWatchpoint>(key, holder, WTFMove(head));
return head.get();
}
-void StructureStubClearingWatchpoint::fireInternal()
+void StructureStubClearingWatchpoint::fireInternal(const FireDetail&)
{
- // This will implicitly cause my own demise: stub reset removes all watchpoints.
- // That works, because deleting a watchpoint removes it from the set's list, and
- // the set's list traversal for firing is robust against the set changing.
- m_holder.codeBlock()->resetStub(*m_holder.stubInfo());
+ if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+ // This will implicitly cause my own demise: stub reset removes all watchpoints.
+ // That works, because deleting a watchpoint removes it from the set's list, and
+ // the set's list traversal for firing is robust against the set changing.
+ ConcurrentJSLocker locker(m_holder.codeBlock()->m_lock);
+ m_holder.stubInfo()->reset(m_holder.codeBlock());
+ return;
+ }
+
+ if (m_key.kind() == PropertyCondition::Presence) {
+ // If this was a presence condition, let's watch the property for replacements. This is profitable
+ // for the DFG, which will want the replacement set to be valid in order to do constant folding.
+ VM& vm = *Heap::heap(m_key.object())->vm();
+ m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset());
+ }
+
+ m_key.object()->structure()->addTransitionWatchpoint(this);
}
WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo()
{
}
-StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint()
+StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key)
{
- return StructureStubClearingWatchpoint::push(*this, m_head);
+ return StructureStubClearingWatchpoint::push(key, *this, m_head);
}
StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
- RefPtr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock,
- StructureStubInfo* stubInfo)
+ std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock,
+ StructureStubInfo* stubInfo, const ObjectPropertyCondition& key)
{
if (!holderRef)
- holderRef = adoptRef(new WatchpointsOnStructureStubInfo(codeBlock, stubInfo));
+ holderRef = std::make_unique<WatchpointsOnStructureStubInfo>(codeBlock, stubInfo);
else {
ASSERT(holderRef->m_codeBlock == codeBlock);
ASSERT(holderRef->m_stubInfo == stubInfo);
}
- return holderRef->addWatchpoint();
+ return holderRef->addWatchpoint(key);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
index 4c6bdecf4..665c56a98 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,60 +23,55 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef StructureStubClearingWatchpoint_h
-#define StructureStubClearingWatchpoint_h
+#pragma once
+#include "ObjectPropertyCondition.h"
#include "Watchpoint.h"
-#include <wtf/Platform.h>
#if ENABLE(JIT)
#include <wtf/FastMalloc.h>
#include <wtf/Noncopyable.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/RefPtr.h>
namespace JSC {
class CodeBlock;
+class StructureStubInfo;
class WatchpointsOnStructureStubInfo;
-struct StructureStubInfo;
class StructureStubClearingWatchpoint : public Watchpoint {
WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint);
WTF_MAKE_FAST_ALLOCATED;
public:
StructureStubClearingWatchpoint(
- WatchpointsOnStructureStubInfo& holder)
- : m_holder(holder)
- {
- }
-
- StructureStubClearingWatchpoint(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- PassOwnPtr<StructureStubClearingWatchpoint> next)
- : m_holder(holder)
- , m_next(next)
+ std::unique_ptr<StructureStubClearingWatchpoint> next)
+ : m_key(key)
+ , m_holder(holder)
+ , m_next(WTFMove(next))
{
}
virtual ~StructureStubClearingWatchpoint();
static StructureStubClearingWatchpoint* push(
+ const ObjectPropertyCondition& key,
WatchpointsOnStructureStubInfo& holder,
- OwnPtr<StructureStubClearingWatchpoint>& head);
+ std::unique_ptr<StructureStubClearingWatchpoint>& head);
protected:
- virtual void fireInternal() override;
+ void fireInternal(const FireDetail&) override;
private:
+ ObjectPropertyCondition m_key;
WatchpointsOnStructureStubInfo& m_holder;
- OwnPtr<StructureStubClearingWatchpoint> m_next;
+ std::unique_ptr<StructureStubClearingWatchpoint> m_next;
};
-class WatchpointsOnStructureStubInfo : public RefCounted<WatchpointsOnStructureStubInfo> {
+class WatchpointsOnStructureStubInfo {
+ WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo);
+ WTF_MAKE_FAST_ALLOCATED;
public:
WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo)
: m_codeBlock(codeBlock)
@@ -86,11 +81,11 @@ public:
~WatchpointsOnStructureStubInfo();
- StructureStubClearingWatchpoint* addWatchpoint();
+ StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key);
static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint(
- RefPtr<WatchpointsOnStructureStubInfo>& holderRef,
- CodeBlock*, StructureStubInfo*);
+ std::unique_ptr<WatchpointsOnStructureStubInfo>& holderRef,
+ CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key);
CodeBlock* codeBlock() const { return m_codeBlock; }
StructureStubInfo* stubInfo() const { return m_stubInfo; }
@@ -98,12 +93,9 @@ public:
private:
CodeBlock* m_codeBlock;
StructureStubInfo* m_stubInfo;
- OwnPtr<StructureStubClearingWatchpoint> m_head;
+ std::unique_ptr<StructureStubClearingWatchpoint> m_head;
};
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // StructureStubClearingWatchpoint_h
-
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
index 91413dfbf..70b767c57 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,108 +27,264 @@
#include "StructureStubInfo.h"
#include "JSObject.h"
-#include "PolymorphicPutByIdList.h"
-
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
+#include "Repatch.h"
namespace JSC {
#if ENABLE(JIT)
+
+static const bool verbose = false;
+
+StructureStubInfo::StructureStubInfo(AccessType accessType)
+ : callSiteIndex(UINT_MAX)
+ , accessType(accessType)
+ , cacheType(CacheType::Unset)
+ , countdown(1) // For a totally clear stub, we'll patch it after the first execution.
+ , repatchCount(0)
+ , numberOfCoolDowns(0)
+ , bufferingCountdown(Options::repatchBufferingCountdown())
+ , resetByGC(false)
+ , tookSlowPath(false)
+ , everConsidered(false)
+{
+}
+
+StructureStubInfo::~StructureStubInfo()
+{
+}
+
+void StructureStubInfo::initGetByIdSelf(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+ cacheType = CacheType::GetByIdSelf;
+
+ u.byIdSelf.baseObjectStructure.set(
+ *codeBlock->vm(), codeBlock, baseObjectStructure);
+ u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initArrayLength()
+{
+ cacheType = CacheType::ArrayLength;
+}
+
+void StructureStubInfo::initPutByIdReplace(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+ cacheType = CacheType::PutByIdReplace;
+
+ u.byIdSelf.baseObjectStructure.set(
+ *codeBlock->vm(), codeBlock, baseObjectStructure);
+ u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess> stub)
+{
+ cacheType = CacheType::Stub;
+ u.stub = stub.release();
+}
+
void StructureStubInfo::deref()
{
- switch (accessType) {
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
- delete polymorphicStructures;
+ switch (cacheType) {
+ case CacheType::Stub:
+ delete u.stub;
return;
- }
- case access_get_by_id_proto_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
- delete polymorphicStructures;
+ case CacheType::Unset:
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ case CacheType::ArrayLength:
return;
}
- case access_put_by_id_list:
- delete u.putByIdList.list;
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void StructureStubInfo::aboutToDie()
+{
+ switch (cacheType) {
+ case CacheType::Stub:
+ u.stub->aboutToDie();
return;
- case access_in_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
- delete polymorphicStructures;
+ case CacheType::Unset:
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ case CacheType::ArrayLength:
return;
}
- case access_get_by_id_self:
- case access_get_by_id_proto:
- case access_get_by_id_chain:
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- case access_put_by_id_replace:
- case access_unset:
- case access_get_by_id_generic:
- case access_put_by_id_generic:
- case access_get_array_length:
- case access_get_string_length:
- // These instructions don't have to release any allocated memory
- return;
- default:
- RELEASE_ASSERT_NOT_REACHED();
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+AccessGenerationResult StructureStubInfo::addAccessCase(
+ CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr<AccessCase> accessCase)
+{
+ VM& vm = *codeBlock->vm();
+
+ if (verbose)
+ dataLog("Adding access case: ", accessCase, "\n");
+
+ if (!accessCase)
+ return AccessGenerationResult::GaveUp;
+
+ AccessGenerationResult result;
+
+ if (cacheType == CacheType::Stub) {
+ result = u.stub->addCase(vm, codeBlock, *this, ident, WTFMove(accessCase));
+
+ if (verbose)
+ dataLog("Had stub, result: ", result, "\n");
+
+ if (!result.buffered()) {
+ bufferedStructures.clear();
+ return result;
+ }
+ } else {
+ std::unique_ptr<PolymorphicAccess> access = std::make_unique<PolymorphicAccess>();
+
+ Vector<std::unique_ptr<AccessCase>, 2> accessCases;
+
+ std::unique_ptr<AccessCase> previousCase =
+ AccessCase::fromStructureStubInfo(vm, codeBlock, *this);
+ if (previousCase)
+ accessCases.append(WTFMove(previousCase));
+
+ accessCases.append(WTFMove(accessCase));
+
+ result = access->addCases(vm, codeBlock, *this, ident, WTFMove(accessCases));
+
+ if (verbose)
+ dataLog("Created stub, result: ", result, "\n");
+
+ if (!result.buffered()) {
+ bufferedStructures.clear();
+ return result;
+ }
+
+ initStub(codeBlock, WTFMove(access));
+ }
+
+ RELEASE_ASSERT(!result.generatedSomeCode());
+
+ // If we didn't buffer any cases then bail. If this made no changes then we'll just try again
+ // subject to cool-down.
+ if (!result.buffered()) {
+ if (verbose)
+ dataLog("Didn't buffer anything, bailing.\n");
+ bufferedStructures.clear();
+ return result;
}
+
+ // The buffering countdown tells us if we should be repatching now.
+ if (bufferingCountdown) {
+ if (verbose)
+ dataLog("Countdown is too high: ", bufferingCountdown, ".\n");
+ return result;
+ }
+
+ // Forget the buffered structures so that all future attempts to cache get fully handled by the
+ // PolymorphicAccess.
+ bufferedStructures.clear();
+
+ result = u.stub->regenerate(vm, codeBlock, *this, ident);
+
+ if (verbose)
+ dataLog("Regeneration result: ", result, "\n");
+
+ RELEASE_ASSERT(!result.buffered());
+
+ if (!result.generatedSomeCode())
+ return result;
+
+ // If we generated some code then we don't want to attempt to repatch in the future until we
+ // gather enough cases.
+ bufferingCountdown = Options::repatchBufferingCountdown();
+ return result;
}
-bool StructureStubInfo::visitWeakReferences()
+void StructureStubInfo::reset(CodeBlock* codeBlock)
{
+ bufferedStructures.clear();
+
+ if (cacheType == CacheType::Unset)
+ return;
+
+ if (Options::verboseOSR()) {
+ // This can be called from GC destructor calls, so we don't try to do a full dump
+ // of the CodeBlock.
+ dataLog("Clearing structure cache (kind ", static_cast<int>(accessType), ") in ", RawPointer(codeBlock), ".\n");
+ }
+
switch (accessType) {
- case access_get_by_id_self:
- if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get()))
- return false;
+ case AccessType::TryGet:
+ resetGetByID(codeBlock, *this, GetByIDKind::Try);
break;
- case access_get_by_id_proto:
- if (!Heap::isMarked(u.getByIdProto.baseObjectStructure.get())
- || !Heap::isMarked(u.getByIdProto.prototypeStructure.get()))
- return false;
+ case AccessType::Get:
+ resetGetByID(codeBlock, *this, GetByIDKind::Normal);
break;
- case access_get_by_id_chain:
- if (!Heap::isMarked(u.getByIdChain.baseObjectStructure.get())
- || !Heap::isMarked(u.getByIdChain.chain.get()))
- return false;
+ case AccessType::Put:
+ resetPutByID(codeBlock, *this);
break;
- case access_get_by_id_self_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize))
- return false;
- break;
- }
- case access_get_by_id_proto_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdProtoList.listSize))
- return false;
+ case AccessType::In:
+ resetIn(codeBlock, *this);
break;
}
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- if (!Heap::isMarked(u.putByIdTransition.previousStructure.get())
- || !Heap::isMarked(u.putByIdTransition.structure.get())
- || !Heap::isMarked(u.putByIdTransition.chain.get()))
- return false;
- break;
- case access_put_by_id_replace:
- if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get()))
- return false;
- break;
- case access_put_by_id_list:
- if (!u.putByIdList.list->visitWeak())
- return false;
+
+ deref();
+ cacheType = CacheType::Unset;
+}
+
+void StructureStubInfo::visitWeakReferences(CodeBlock* codeBlock)
+{
+ VM& vm = *codeBlock->vm();
+
+ bufferedStructures.genericFilter(
+ [&] (Structure* structure) -> bool {
+ return Heap::isMarked(structure);
+ });
+
+ switch (cacheType) {
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ if (Heap::isMarked(u.byIdSelf.baseObjectStructure.get()))
+ return;
break;
- case access_in_list: {
- PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList;
- if (!polymorphicStructures->visitWeak(u.inList.listSize))
- return false;
+ case CacheType::Stub:
+ if (u.stub->visitWeak(vm))
+ return;
break;
- }
default:
- // The rest of the instructions don't require references, so there is no need to
- // do anything.
- break;
+ return;
}
+
+ reset(codeBlock);
+ resetByGC = true;
+}
+
+bool StructureStubInfo::propagateTransitions(SlotVisitor& visitor)
+{
+ switch (cacheType) {
+ case CacheType::Unset:
+ case CacheType::ArrayLength:
+ return true;
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ return u.byIdSelf.baseObjectStructure->markIfCheap(visitor);
+ case CacheType::Stub:
+ return u.stub->propagateTransitions(visitor);
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
return true;
}
-#endif
+
+bool StructureStubInfo::containsPC(void* pc) const
+{
+ if (cacheType != CacheType::Stub)
+ return false;
+ return u.stub->containsPC(pc);
+}
+
+#endif // ENABLE(JIT)
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index 5463f3e95..b091e2157 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,266 +23,194 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef StructureStubInfo_h
-#define StructureStubInfo_h
-
-#include <wtf/Platform.h>
+#pragma once
+#include "CodeBlock.h"
#include "CodeOrigin.h"
#include "Instruction.h"
#include "JITStubRoutine.h"
#include "MacroAssembler.h"
-#include "Opcode.h"
-#include "PolymorphicAccessStructureList.h"
+#include "ObjectPropertyConditionSet.h"
+#include "Options.h"
#include "RegisterSet.h"
#include "Structure.h"
+#include "StructureSet.h"
#include "StructureStubClearingWatchpoint.h"
-#include <wtf/OwnPtr.h>
namespace JSC {
#if ENABLE(JIT)
-class PolymorphicPutByIdList;
+class AccessCase;
+class AccessGenerationResult;
+class PolymorphicAccess;
-enum AccessType {
- access_get_by_id_self,
- access_get_by_id_proto,
- access_get_by_id_chain,
- access_get_by_id_self_list,
- access_get_by_id_proto_list,
- access_put_by_id_transition_normal,
- access_put_by_id_transition_direct,
- access_put_by_id_replace,
- access_put_by_id_list,
- access_unset,
- access_get_by_id_generic,
- access_put_by_id_generic,
- access_get_array_length,
- access_get_string_length,
- access_in_list
+enum class AccessType : int8_t {
+ Get,
+ TryGet,
+ Put,
+ In
};
-inline bool isGetByIdAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_get_by_id_self:
- case access_get_by_id_proto:
- case access_get_by_id_chain:
- case access_get_by_id_self_list:
- case access_get_by_id_proto_list:
- case access_get_by_id_generic:
- case access_get_array_length:
- case access_get_string_length:
- return true;
- default:
- return false;
- }
-}
-
-inline bool isPutByIdAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct:
- case access_put_by_id_replace:
- case access_put_by_id_list:
- case access_put_by_id_generic:
- return true;
- default:
- return false;
- }
-}
-
-inline bool isInAccess(AccessType accessType)
-{
- switch (accessType) {
- case access_in_list:
- return true;
- default:
- return false;
- }
-}
-
-struct StructureStubInfo {
- StructureStubInfo()
- : accessType(access_unset)
- , seen(false)
- , resetByGC(false)
- {
- }
-
- void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_get_by_id_self;
-
- u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure);
- }
-
- void initGetByIdChain(VM& vm, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain, unsigned count, bool isDirect)
- {
- accessType = access_get_by_id_chain;
-
- u.getByIdChain.baseObjectStructure.set(vm, owner, baseObjectStructure);
- u.getByIdChain.chain.set(vm, owner, chain);
- u.getByIdChain.count = count;
- u.getByIdChain.isDirect = isDirect;
- }
-
- void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize, bool didSelfPatching = false)
- {
- accessType = access_get_by_id_self_list;
-
- u.getByIdSelfList.structureList = structureList;
- u.getByIdSelfList.listSize = listSize;
- u.getByIdSelfList.didSelfPatching = didSelfPatching;
- }
+enum class CacheType : int8_t {
+ Unset,
+ GetByIdSelf,
+ PutByIdReplace,
+ Stub,
+ ArrayLength
+};
- void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
- {
- accessType = access_get_by_id_proto_list;
+class StructureStubInfo {
+ WTF_MAKE_NONCOPYABLE(StructureStubInfo);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ StructureStubInfo(AccessType);
+ ~StructureStubInfo();
- u.getByIdProtoList.structureList = structureList;
- u.getByIdProtoList.listSize = listSize;
- }
+ void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+ void initArrayLength();
+ void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+ void initStub(CodeBlock*, std::unique_ptr<PolymorphicAccess>);
- // PutById*
+ AccessGenerationResult addAccessCase(CodeBlock*, const Identifier&, std::unique_ptr<AccessCase>);
- void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
- {
- if (isDirect)
- accessType = access_put_by_id_transition_direct;
- else
- accessType = access_put_by_id_transition_normal;
+ void reset(CodeBlock*);
- u.putByIdTransition.previousStructure.set(vm, owner, previousStructure);
- u.putByIdTransition.structure.set(vm, owner, structure);
- u.putByIdTransition.chain.set(vm, owner, chain);
- }
+ void deref();
+ void aboutToDie();
- void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure)
- {
- accessType = access_put_by_id_replace;
+ // Check if the stub has weak references that are dead. If it does, then it resets itself,
+ // either entirely or just enough to ensure that those dead pointers don't get used anymore.
+ void visitWeakReferences(CodeBlock*);
- u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure);
- }
+ // This returns true if it has marked everything that it will ever mark.
+ bool propagateTransitions(SlotVisitor&);
- void initPutByIdList(PolymorphicPutByIdList* list)
- {
- accessType = access_put_by_id_list;
- u.putByIdList.list = list;
- }
-
- void initInList(PolymorphicAccessStructureList* list, int listSize)
+ ALWAYS_INLINE bool considerCaching(CodeBlock* codeBlock, Structure* structure)
{
- accessType = access_in_list;
- u.inList.structureList = list;
- u.inList.listSize = listSize;
- }
+ // We never cache non-cells.
+ if (!structure)
+ return false;
- void reset()
- {
- deref();
- accessType = access_unset;
- stubRoutine.clear();
- watchpoints.clear();
- }
-
- void deref();
-
- bool visitWeakReferences();
+ // This method is called from the Optimize variants of IC slow paths. The first part of this
+ // method tries to determine if the Optimize variant should really behave like the
+ // non-Optimize variant and leave the IC untouched.
+ //
+ // If we determine that we should do something to the IC then the next order of business is
+ // to determine if this Structure would impact the IC at all. We know that it won't, if we
+ // have already buffered something on its behalf. That's what the bufferedStructures set is
+ // for.
- bool seenOnce()
- {
- return seen;
+ everConsidered = true;
+ if (!countdown) {
+ // Check if we have been doing repatching too frequently. If so, then we should cool off
+ // for a while.
+ WTF::incrementWithSaturation(repatchCount);
+ if (repatchCount > Options::repatchCountForCoolDown()) {
+ // We've been repatching too much, so don't do it now.
+ repatchCount = 0;
+ // The amount of time we require for cool-down depends on the number of times we've
+ // had to cool down in the past. The relationship is exponential. The max value we
+ // allow here is 2^256 - 2, since the slow paths may increment the count to indicate
+ // that they'd like to temporarily skip patching just this once.
+ countdown = WTF::leftShiftWithSaturation(
+ static_cast<uint8_t>(Options::initialCoolDownCount()),
+ numberOfCoolDowns,
+ static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() - 1));
+ WTF::incrementWithSaturation(numberOfCoolDowns);
+
+ // We may still have had something buffered. Trigger generation now.
+ bufferingCountdown = 0;
+ return true;
+ }
+
+ // We don't want to return false due to buffering indefinitely.
+ if (!bufferingCountdown) {
+ // Note that when this returns true, it's possible that we will not even get an
+ // AccessCase because this may cause Repatch.cpp to simply do an in-place
+ // repatching.
+ return true;
+ }
+
+ bufferingCountdown--;
+
+ // Now protect the IC buffering. We want to proceed only if this is a structure that
+ // we don't already have a case buffered for. Note that if this returns true but the
+ // bufferingCountdown is not zero then we will buffer the access case for later without
+ // immediately generating code for it.
+ bool isNewlyAdded = bufferedStructures.add(structure);
+ if (isNewlyAdded) {
+ VM& vm = *codeBlock->vm();
+ vm.heap.writeBarrier(codeBlock);
+ }
+ return isNewlyAdded;
+ }
+ countdown--;
+ return false;
}
- void setSeen()
- {
- seen = true;
- }
-
- StructureStubClearingWatchpoint* addWatchpoint(CodeBlock* codeBlock)
- {
- return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
- watchpoints, codeBlock, this);
- }
-
- int8_t accessType;
- bool seen : 1;
- bool resetByGC : 1;
+ bool containsPC(void* pc) const;
CodeOrigin codeOrigin;
+ CallSiteIndex callSiteIndex;
+ union {
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ PropertyOffset offset;
+ } byIdSelf;
+ PolymorphicAccess* stub;
+ } u;
+
+ // Represents those structures that already have buffered AccessCases in the PolymorphicAccess.
+ // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same
+ // structure again during this buffering countdown, we will create an AccessCase object for it.
+ // That's not so bad - we'll get rid of the redundant ones once we regenerate.
+ StructureSet bufferedStructures;
+
struct {
- int8_t registersFlushed;
- int8_t callFrameRegister;
+ CodeLocationLabel start; // This is either the start of the inline IC for *byId caches, or the location of patchable jump for 'in' caches.
+ RegisterSet usedRegisters;
+ uint32_t inlineSize;
+ int32_t deltaFromStartToSlowPathCallLocation;
+ int32_t deltaFromStartToSlowPathStart;
+
int8_t baseGPR;
+ int8_t valueGPR;
#if USE(JSVALUE32_64)
int8_t valueTagGPR;
-#endif
- int8_t valueGPR;
- RegisterSet usedRegisters;
- int32_t deltaCallToDone;
- int32_t deltaCallToStorageLoad;
- int32_t deltaCallToJump;
- int32_t deltaCallToSlowCase;
- int32_t deltaCheckImmToCall;
-#if USE(JSVALUE64)
- int32_t deltaCallToLoadOrStore;
-#else
- int32_t deltaCallToTagLoadOrStore;
- int32_t deltaCallToPayloadLoadOrStore;
+ int8_t baseTagGPR;
#endif
} patch;
- union {
- struct {
- // It would be unwise to put anything here, as it will surely be overwritten.
- } unset;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } getByIdSelf;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<Structure> prototypeStructure;
- bool isDirect;
- } getByIdProto;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- WriteBarrierBase<StructureChain> chain;
- unsigned count : 31;
- bool isDirect : 1;
- } getByIdChain;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize : 31;
- bool didSelfPatching : 1;
- } getByIdSelfList;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } getByIdProtoList;
- struct {
- WriteBarrierBase<Structure> previousStructure;
- WriteBarrierBase<Structure> structure;
- WriteBarrierBase<StructureChain> chain;
- } putByIdTransition;
- struct {
- WriteBarrierBase<Structure> baseObjectStructure;
- } putByIdReplace;
- struct {
- PolymorphicPutByIdList* list;
- } putByIdList;
- struct {
- PolymorphicAccessStructureList* structureList;
- int listSize;
- } inList;
- } u;
+ CodeLocationCall slowPathCallLocation() { return patch.start.callAtOffset(patch.deltaFromStartToSlowPathCallLocation); }
+ CodeLocationLabel doneLocation() { return patch.start.labelAtOffset(patch.inlineSize); }
+ CodeLocationLabel slowPathStartLocation() { return patch.start.labelAtOffset(patch.deltaFromStartToSlowPathStart); }
+ CodeLocationJump patchableJumpForIn()
+ {
+ ASSERT(accessType == AccessType::In);
+ return patch.start.jumpAtOffset(0);
+ }
+
+ JSValueRegs valueRegs() const
+ {
+ return JSValueRegs(
+#if USE(JSVALUE32_64)
+ static_cast<GPRReg>(patch.valueTagGPR),
+#endif
+ static_cast<GPRReg>(patch.valueGPR));
+ }
+
- RefPtr<JITStubRoutine> stubRoutine;
- CodeLocationCall callReturnLocation;
- RefPtr<WatchpointsOnStructureStubInfo> watchpoints;
+ AccessType accessType;
+ CacheType cacheType;
+ uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement.
+ uint8_t repatchCount;
+ uint8_t numberOfCoolDowns;
+ uint8_t bufferingCountdown;
+ bool resetByGC : 1;
+ bool tookSlowPath : 1;
+ bool everConsidered : 1;
};
inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo)
@@ -290,14 +218,12 @@ inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStu
return structureStubInfo.codeOrigin;
}
-typedef HashMap<CodeOrigin, StructureStubInfo*> StubInfoMap;
-
#else
-typedef HashMap<int, void*> StubInfoMap;
+class StructureStubInfo;
#endif // ENABLE(JIT)
-} // namespace JSC
+typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap;
-#endif // StructureStubInfo_h
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SuperSampler.cpp b/Source/JavaScriptCore/bytecode/SuperSampler.cpp
new file mode 100644
index 000000000..a4e21f9fa
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SuperSampler.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SuperSampler.h"
+
+#include "MacroAssembler.h"
+#include "Options.h"
+#include <wtf/CurrentTime.h>
+#include <wtf/DataLog.h>
+#include <wtf/Lock.h>
+#include <wtf/Threading.h>
+
+namespace JSC {
+
+volatile uint32_t g_superSamplerCount;
+
+static StaticLock lock;
+static double in;
+static double out;
+
+void initializeSuperSampler()
+{
+ if (!Options::useSuperSampler())
+ return;
+
+ createThread(
+ "JSC Super Sampler",
+ [] () {
+ const int sleepQuantum = 10;
+ const int printingPeriod = 1000;
+ for (;;) {
+ for (int ms = 0; ms < printingPeriod; ms += sleepQuantum) {
+ {
+ LockHolder locker(lock);
+ if (g_superSamplerCount)
+ in++;
+ else
+ out++;
+ }
+ sleepMS(sleepQuantum);
+ }
+ printSuperSamplerState();
+ if (static_cast<int32_t>(g_superSamplerCount) < 0)
+ dataLog("WARNING: Super sampler undercount detected!\n");
+ }
+ });
+}
+
+void resetSuperSamplerState()
+{
+ LockHolder locker(lock);
+ in = 0;
+ out = 0;
+}
+
+void printSuperSamplerState()
+{
+ if (!Options::useSuperSampler())
+ return;
+
+ LockHolder locker(lock);
+ double percentage = 100.0 * in / (in + out);
+ if (percentage != percentage)
+ percentage = 0.0;
+ dataLog("Percent time behind super sampler flag: ", percentage, "\n");
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/SuperSampler.h b/Source/JavaScriptCore/bytecode/SuperSampler.h
new file mode 100644
index 000000000..c90f6d43a
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SuperSampler.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+class MacroAssembler;
+
+extern volatile uint32_t g_superSamplerCount;
+
+void initializeSuperSampler();
+
+class SuperSamplerScope {
+public:
+ SuperSamplerScope(bool doSample = true)
+ : m_doSample(doSample)
+ {
+ if (m_doSample)
+ g_superSamplerCount++;
+ }
+
+ ~SuperSamplerScope()
+ {
+ if (m_doSample)
+ g_superSamplerCount--;
+ }
+
+private:
+ bool m_doSample;
+};
+
+JS_EXPORT_PRIVATE void resetSuperSamplerState();
+JS_EXPORT_PRIVATE void printSuperSamplerState();
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.cpp b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
new file mode 100644
index 000000000..23d1e0800
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ToThisStatus.h"
+
+namespace JSC {
+
+ToThisStatus merge(ToThisStatus a, ToThisStatus b)
+{
+ switch (a) {
+ case ToThisOK:
+ return b;
+ case ToThisConflicted:
+ return ToThisConflicted;
+ case ToThisClearedByGC:
+ if (b == ToThisConflicted)
+ return ToThisConflicted;
+ return ToThisClearedByGC;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return ToThisConflicted;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ToThisStatus status)
+{
+ switch (status) {
+ case ToThisOK:
+ out.print("OK");
+ return;
+ case ToThisConflicted:
+ out.print("Conflicted");
+ return;
+ case ToThisClearedByGC:
+ out.print("ClearedByGC");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.h b/Source/JavaScriptCore/bytecode/ToThisStatus.h
new file mode 100644
index 000000000..ded012ae7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ToThisStatus.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+enum ToThisStatus {
+ ToThisOK,
+ ToThisConflicted,
+ ToThisClearedByGC
+};
+
+ToThisStatus merge(ToThisStatus, ToThisStatus);
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::ToThisStatus);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
index edf8e228d..ae213d54b 100644
--- a/Source/JavaScriptCore/bytecode/ProfiledCodeBlockJettisoningWatchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,38 +24,57 @@
*/
#include "config.h"
-#include "ProfiledCodeBlockJettisoningWatchpoint.h"
+#include "TrackedReferences.h"
-#include "CodeBlock.h"
-#include "DFGCommon.h"
-#include "DFGExitProfile.h"
+#include "JSCInlines.h"
+#include <wtf/CommaPrinter.h>
namespace JSC {
-void ProfiledCodeBlockJettisoningWatchpoint::fireInternal()
+TrackedReferences::TrackedReferences()
{
- if (DFG::shouldShowDisassembly()) {
- dataLog(
- "Firing profiled watchpoint ", RawPointer(this), " on ", *m_codeBlock, " due to ",
- m_exitKind, " at ", m_codeOrigin, "\n");
- }
-
- // FIXME: Maybe this should call alternative().
- // https://bugs.webkit.org/show_bug.cgi?id=123677
- CodeBlock* machineBaselineCodeBlock = m_codeBlock->baselineAlternative();
- CodeBlock* sourceBaselineCodeBlock =
- baselineCodeBlockForOriginAndBaselineCodeBlock(
- m_codeOrigin, machineBaselineCodeBlock);
-
- if (sourceBaselineCodeBlock) {
- sourceBaselineCodeBlock->addFrequentExitSite(
- DFG::FrequentExitSite(m_codeOrigin.bytecodeIndex, m_exitKind));
- }
+}
+
+TrackedReferences::~TrackedReferences()
+{
+}
+
+void TrackedReferences::add(JSCell* cell)
+{
+ if (cell)
+ m_references.add(cell);
+}
+
+void TrackedReferences::add(JSValue value)
+{
+ if (value.isCell())
+ add(value.asCell());
+}
+
+void TrackedReferences::check(JSCell* cell) const
+{
+ if (!cell)
+ return;
- m_codeBlock->jettison(CountReoptimization);
+ if (m_references.contains(cell))
+ return;
- if (isOnList())
- remove();
+ dataLog("Found untracked reference: ", JSValue(cell), "\n");
+ dataLog("All tracked references: ", *this, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void TrackedReferences::check(JSValue value) const
+{
+ if (value.isCell())
+ check(value.asCell());
+}
+
+void TrackedReferences::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ for (JSCell* cell : m_references)
+ out.print(comma, RawPointer(cell));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.h b/Source/JavaScriptCore/bytecode/TrackedReferences.h
new file mode 100644
index 000000000..a1021675c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TrackedReferences.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "JSCell.h"
+#include <wtf/HashSet.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class TrackedReferences {
+public:
+ TrackedReferences();
+ ~TrackedReferences();
+
+ void add(JSCell*);
+ void add(JSValue);
+
+ void check(JSCell*) const;
+ void check(JSValue) const;
+
+ void dump(PrintStream&) const;
+
+private:
+ HashSet<JSCell*> m_references;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/TypeLocation.h b/Source/JavaScriptCore/bytecode/TypeLocation.h
new file mode 100644
index 000000000..bc75923fd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/TypeLocation.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "TypeSet.h"
+
+namespace JSC {
+
+enum TypeProfilerGlobalIDFlags {
+ TypeProfilerNeedsUniqueIDGeneration = -1,
+ TypeProfilerNoGlobalIDExists = -2,
+ TypeProfilerReturnStatement = -3
+};
+
+typedef intptr_t GlobalVariableID;
+
+class TypeLocation {
+public:
+ TypeLocation()
+ : m_lastSeenType(TypeNothing)
+ , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX)
+ , m_instructionTypeSet(TypeSet::create())
+ , m_globalTypeSet(nullptr)
+ {
+ }
+
+ GlobalVariableID m_globalVariableID;
+ RuntimeType m_lastSeenType;
+ intptr_t m_sourceID;
+ unsigned m_divotStart;
+ unsigned m_divotEnd;
+ unsigned m_divotForFunctionOffsetIfReturnStatement;
+ RefPtr<TypeSet> m_instructionTypeSet;
+ RefPtr<TypeSet> m_globalTypeSet;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
index 1dfb5ac6a..53defbfb3 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,191 +28,52 @@
#include "UnlinkedCodeBlock.h"
#include "BytecodeGenerator.h"
+#include "BytecodeRewriter.h"
#include "ClassInfo.h"
#include "CodeCache.h"
-#include "Executable.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
#include "JSString.h"
-#include "Operations.h"
#include "Parser.h"
+#include "PreciseJumpTargetsInlines.h"
#include "SourceProvider.h"
#include "Structure.h"
#include "SymbolTable.h"
+#include "UnlinkedEvalCodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
#include "UnlinkedInstructionStream.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
#include <wtf/DataLog.h>
namespace JSC {
-const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
-const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
-const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) };
-const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
-const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
-const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
-static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
- RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>(&vm, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error);
-
- if (!body) {
- ASSERT(error.m_type != ParserError::ErrorNone);
- return 0;
- }
-
- if (executable->forceUsesArguments())
- body->setUsesArguments();
- body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle());
- executable->recordParse(body->features(), body->hasCapturedVariables());
-
- UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct));
- OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(vm, body.get(), result, debuggerMode, profilerMode)));
- error = generator->generate();
- body->destroyData();
- if (error.m_type != ParserError::ErrorNone)
- return 0;
- return result;
-}
-
-unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v)
-{
- unsigned numberOfConstants = numberOfConstantRegisters();
- for (unsigned i = 0; i < numberOfConstants; ++i) {
- if (getConstant(FirstConstantRegisterIndex + i) == v)
- return i;
- }
- return addConstant(v);
-}
-
-UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode)
- : Base(*vm, structure)
- , m_numCapturedVariables(node->capturedVariableCount())
- , m_forceUsesArguments(node->usesArguments())
- , m_isInStrictContext(node->isStrictMode())
- , m_hasCapturedVariables(node->hasCapturedVariables())
- , m_isFromGlobalCode(isFromGlobalCode)
- , m_name(node->ident())
- , m_inferredName(node->inferredName())
- , m_parameters(node->parameters())
- , m_firstLineOffset(node->firstLine() - source.firstLine())
- , m_lineCount(node->lastLine() - node->firstLine())
- , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset())
- , m_unlinkedBodyStartColumn(node->startColumn())
- , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
- , m_startOffset(node->source().startOffset() - source.startOffset())
- , m_sourceLength(node->source().length())
- , m_features(node->features())
- , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle())
-{
-}
-
-size_t UnlinkedFunctionExecutable::parameterCount() const
-{
- return m_parameters->size();
-}
-
-void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
-{
- UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell);
- ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
- Base::visitChildren(thisObject, visitor);
- visitor.append(&thisObject->m_codeBlockForCall);
- visitor.append(&thisObject->m_codeBlockForConstruct);
- visitor.append(&thisObject->m_nameValue);
- visitor.append(&thisObject->m_symbolTableForCall);
- visitor.append(&thisObject->m_symbolTableForConstruct);
-}
-
-FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& source, size_t lineOffset, size_t sourceOffset)
-{
- unsigned firstLine = lineOffset + m_firstLineOffset;
- unsigned startOffset = sourceOffset + m_startOffset;
- bool startColumnIsOnFirstSourceLine = !m_firstLineOffset;
- unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1);
- bool endColumnIsOnStartLine = !m_lineCount;
- unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1);
- SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
- return FunctionExecutable::create(vm, code, this, firstLine, firstLine + m_lineCount, startColumn, endColumn);
-}
-
-UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception)
-{
- ParserError error;
- VM& vm = exec->vm();
- CodeCache* codeCache = vm.codeCache();
- UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error);
-
- if (exec->lexicalGlobalObject()->hasDebugger())
- exec->lexicalGlobalObject()->debugger()->sourceParsed(exec, source.provider(), error.m_line, error.m_message);
-
- if (error.m_type != ParserError::ErrorNone) {
- *exception = error.toErrorObject(exec->lexicalGlobalObject(), source);
- return 0;
- }
-
- return executable;
-}
-
-UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error)
-{
- switch (specializationKind) {
- case CodeForCall:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get())
- return codeBlock;
- break;
- case CodeForConstruct:
- if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get())
- return codeBlock;
- break;
- }
-
- UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock(vm, this, source, specializationKind, debuggerMode, profilerMode, error);
-
- if (error.m_type != ParserError::ErrorNone)
- return 0;
-
- switch (specializationKind) {
- case CodeForCall:
- m_codeBlockForCall.set(vm, this, result);
- m_symbolTableForCall.set(vm, this, result->symbolTable());
- break;
- case CodeForConstruct:
- m_codeBlockForConstruct.set(vm, this, result);
- m_symbolTableForConstruct.set(vm, this, result->symbolTable());
- break;
- }
- return result;
-}
-
-String UnlinkedFunctionExecutable::paramString() const
-{
- FunctionParameters& parameters = *m_parameters;
- StringBuilder builder;
- for (size_t pos = 0; pos < parameters.size(); ++pos) {
- if (!builder.isEmpty())
- builder.appendLiteral(", ");
- parameters.at(pos)->toString(builder);
- }
- return builder.toString();
-}
-
-UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
+UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
: Base(*vm, structure)
, m_numVars(0)
- , m_numCalleeRegisters(0)
+ , m_numCalleeLocals(0)
, m_numParameters(0)
- , m_vm(vm)
- , m_argumentsRegister(VirtualRegister())
, m_globalObjectRegister(VirtualRegister())
- , m_needsFullScopeChain(info.m_needsActivation)
- , m_usesEval(info.m_usesEval)
- , m_isNumericCompareFunction(false)
- , m_isStrictMode(info.m_isStrictMode)
- , m_isConstructor(info.m_isConstructor)
+ , m_usesEval(info.usesEval())
+ , m_isStrictMode(info.isStrictMode())
+ , m_isConstructor(info.isConstructor())
, m_hasCapturedVariables(false)
- , m_firstLine(0)
+ , m_isBuiltinFunction(info.isBuiltinFunction())
+ , m_superBinding(static_cast<unsigned>(info.superBinding()))
+ , m_scriptMode(static_cast<unsigned>(info.scriptMode()))
+ , m_isArrowFunctionContext(info.isArrowFunctionContext())
+ , m_isClassContext(info.isClassContext())
+ , m_wasCompiledWithDebuggingOpcodes(debuggerMode == DebuggerMode::DebuggerOn || Options::forceDebuggerBytecodeGeneration())
+ , m_constructorKind(static_cast<unsigned>(info.constructorKind()))
+ , m_derivedContextType(static_cast<unsigned>(info.derivedContextType()))
+ , m_evalContextType(static_cast<unsigned>(info.evalContextType()))
, m_lineCount(0)
, m_endColumn(UINT_MAX)
+ , m_didOptimize(MixedTriState)
+ , m_parseMode(info.parseMode())
, m_features(0)
, m_codeType(codeType)
, m_arrayProfileCount(0)
@@ -220,32 +81,38 @@ UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType code
, m_objectAllocationProfileCount(0)
, m_valueProfileCount(0)
, m_llintCallLinkInfoCount(0)
-#if ENABLE(BYTECODE_COMMENTS)
- , m_bytecodeCommentIterator(0)
-#endif
{
-
+ for (auto& constantRegisterIndex : m_linkTimeConstants)
+ constantRegisterIndex = 0;
+ ASSERT(m_constructorKind == static_cast<unsigned>(info.constructorKind()));
}
void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell);
ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
Base::visitChildren(thisObject, visitor);
- visitor.append(&thisObject->m_symbolTable);
+ auto locker = holdLock(*thisObject);
for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr)
- visitor.append(ptr);
+ visitor.append(*ptr);
for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr)
- visitor.append(ptr);
+ visitor.append(*ptr);
visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size());
+ if (thisObject->m_unlinkedInstructions)
+ visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes());
if (thisObject->m_rareData) {
for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++)
- visitor.append(&thisObject->m_rareData->m_regexps[i]);
+ visitor.append(thisObject->m_rareData->m_regexps[i]);
}
}
+size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell)
+{
+ UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell);
+ size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0;
+ return Base::estimatedSize(cell) + extraSize;
+}
+
int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < instructions().count());
@@ -258,8 +125,8 @@ int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
return line;
}
-inline void UnlinkedCodeBlock::getLineAndColumn(ExpressionRangeInfo& info,
- unsigned& line, unsigned& column)
+inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info,
+ unsigned& line, unsigned& column) const
{
switch (info.mode) {
case ExpressionRangeInfo::FatLineMode:
@@ -292,6 +159,7 @@ static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& i
case DidReachBreakpoint: event = " DidReachBreakpoint"; break;
case WillLeaveCallFrame: event = " WillLeaveCallFrame"; break;
case WillExecuteStatement: event = " WillExecuteStatement"; break;
+ case WillExecuteExpression: event = " WillExecuteExpression"; break;
}
}
dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event);
@@ -315,7 +183,7 @@ void UnlinkedCodeBlock::dumpExpressionRangeInfo()
#endif
void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset,
- int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+ int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
{
ASSERT(bytecodeOffset < instructions().count());
@@ -328,7 +196,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
return;
}
- Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo;
+ const Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo;
int low = 0;
int high = expressionInfo.size();
@@ -343,7 +211,7 @@ void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset
if (!low)
low = 1;
- ExpressionRangeInfo& info = expressionInfo[low - 1];
+ const ExpressionRangeInfo& info = expressionInfo[low - 1];
startOffset = info.startOffset;
endOffset = info.endOffset;
divot = info.divotPoint;
@@ -404,51 +272,145 @@ void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset,
m_expressionInfo.append(info);
}
-void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot)
{
- UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell);
- ASSERT_GC_OBJECT_INHERITS(thisObject, info());
- COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
- ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren());
- Base::visitChildren(thisObject, visitor);
- for (size_t i = 0, end = thisObject->m_functionDeclarations.size(); i != end; i++)
- visitor.append(&thisObject->m_functionDeclarations[i].second);
+ static const bool verbose = false;
+ if (!m_rareData) {
+ if (verbose)
+ dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+ startDivot = UINT_MAX;
+ endDivot = UINT_MAX;
+ return false;
+ }
+
+ auto iter = m_rareData->m_typeProfilerInfoMap.find(bytecodeOffset);
+ if (iter == m_rareData->m_typeProfilerInfoMap.end()) {
+ if (verbose)
+ dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+ startDivot = UINT_MAX;
+ endDivot = UINT_MAX;
+ return false;
+ }
+
+ RareData::TypeProfilerExpressionRange& range = iter->value;
+ startDivot = range.m_startDivot;
+ endDivot = range.m_endDivot;
+ return true;
}
-UnlinkedCodeBlock::~UnlinkedCodeBlock()
+void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot)
{
+ createRareDataIfNecessary();
+ RareData::TypeProfilerExpressionRange range;
+ range.m_startDivot = startDivot;
+ range.m_endDivot = endDivot;
+ m_rareData->m_typeProfilerInfoMap.set(instructionOffset, range);
}
-void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
+UnlinkedCodeBlock::~UnlinkedCodeBlock()
{
- jsCast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock();
}
-void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
+void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions)
{
- jsCast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock();
+ ASSERT(instructions);
+ {
+ auto locker = holdLock(*this);
+ m_unlinkedInstructions = WTFMove(instructions);
+ }
+ Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes());
}
-void UnlinkedFunctionCodeBlock::destroy(JSCell* cell)
+const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
{
- jsCast<UnlinkedFunctionCodeBlock*>(cell)->~UnlinkedFunctionCodeBlock();
+ ASSERT(m_unlinkedInstructions.get());
+ return *m_unlinkedInstructions;
}
-void UnlinkedFunctionExecutable::destroy(JSCell* cell)
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
{
- jsCast<UnlinkedFunctionExecutable*>(cell)->~UnlinkedFunctionExecutable();
+ return handlerForIndex(bytecodeOffset, requiredHandler);
}
-void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions)
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
{
- m_unlinkedInstructions = std::move(instructions);
+ if (!m_rareData)
+ return nullptr;
+ return UnlinkedHandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
}
-const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
+void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter)
{
- ASSERT(m_unlinkedInstructions.get());
- return *m_unlinkedInstructions;
+ // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and
+ // the insertion information.
+
+ BytecodeGraph<UnlinkedCodeBlock>& graph = rewriter.graph();
+ UnlinkedInstruction* instructionsBegin = graph.instructions().begin();
+
+ for (int bytecodeOffset = 0, instructionCount = graph.instructions().size(); bytecodeOffset < instructionCount;) {
+ UnlinkedInstruction* current = instructionsBegin + bytecodeOffset;
+ OpcodeID opcodeID = current[0].u.opcode;
+ extractStoredJumpTargetsForBytecodeOffset(this, vm()->interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+ relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset);
+ });
+ bytecodeOffset += opcodeLength(opcodeID);
+ }
+
+ // Then, exception handlers should be adjusted.
+ if (m_rareData) {
+ for (UnlinkedHandlerInfo& handler : m_rareData->m_exceptionHandlers) {
+ handler.target = rewriter.adjustAbsoluteOffset(handler.target);
+ handler.start = rewriter.adjustAbsoluteOffset(handler.start);
+ handler.end = rewriter.adjustAbsoluteOffset(handler.end);
+ }
+
+ for (size_t i = 0; i < m_rareData->m_opProfileControlFlowBytecodeOffsets.size(); ++i)
+ m_rareData->m_opProfileControlFlowBytecodeOffsets[i] = rewriter.adjustAbsoluteOffset(m_rareData->m_opProfileControlFlowBytecodeOffsets[i]);
+
+ if (!m_rareData->m_typeProfilerInfoMap.isEmpty()) {
+ HashMap<unsigned, RareData::TypeProfilerExpressionRange> adjustedTypeProfilerInfoMap;
+ for (auto& entry : m_rareData->m_typeProfilerInfoMap)
+ adjustedTypeProfilerInfoMap.set(rewriter.adjustAbsoluteOffset(entry.key), entry.value);
+ m_rareData->m_typeProfilerInfoMap.swap(adjustedTypeProfilerInfoMap);
+ }
+ }
+
+ for (size_t i = 0; i < m_propertyAccessInstructions.size(); ++i)
+ m_propertyAccessInstructions[i] = rewriter.adjustAbsoluteOffset(m_propertyAccessInstructions[i]);
+
+ for (size_t i = 0; i < m_expressionInfo.size(); ++i)
+ m_expressionInfo[i].instructionOffset = rewriter.adjustAbsoluteOffset(m_expressionInfo[i].instructionOffset);
+
+ // Then, modify the unlinked instructions.
+ rewriter.applyModification();
+
+ // And recompute the jump target based on the modified unlinked instructions.
+ m_jumpTargets.clear();
+ recomputePreciseJumpTargets(this, graph.instructions().begin(), graph.instructions().size(), m_jumpTargets);
}
+void UnlinkedCodeBlock::shrinkToFit()
+{
+ auto locker = holdLock(*this);
+
+ m_jumpTargets.shrinkToFit();
+ m_identifiers.shrinkToFit();
+ m_bitVectors.shrinkToFit();
+ m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
+ m_functionDecls.shrinkToFit();
+ m_functionExprs.shrinkToFit();
+ m_propertyAccessInstructions.shrinkToFit();
+ m_expressionInfo.shrinkToFit();
+
+ if (m_rareData) {
+ m_rareData->m_exceptionHandlers.shrinkToFit();
+ m_rareData->m_regexps.shrinkToFit();
+ m_rareData->m_constantBuffers.shrinkToFit();
+ m_rareData->m_switchJumpTables.shrinkToFit();
+ m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ m_rareData->m_expressionInfoFatPositions.shrinkToFit();
+ }
}
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
index b9dae2d5c..f0574976c 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,41 +23,42 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef UnlinkedCodeBlock_h
-#define UnlinkedCodeBlock_h
+#pragma once
#include "BytecodeConventions.h"
#include "CodeSpecializationKind.h"
#include "CodeType.h"
+#include "ConstructAbility.h"
#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
#include "Identifier.h"
#include "JSCell.h"
#include "JSString.h"
+#include "LockDuringMarking.h"
#include "ParserModes.h"
#include "RegExp.h"
#include "SpecialPointer.h"
-#include "SymbolTable.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VariableEnvironment.h"
#include "VirtualRegister.h"
-
-#include <wtf/Compression.h>
-#include <wtf/RefCountedArray.h>
+#include <wtf/BitVector.h>
+#include <wtf/TriState.h>
#include <wtf/Vector.h>
namespace JSC {
+class BytecodeRewriter;
class Debugger;
-class FunctionBodyNode;
class FunctionExecutable;
-class FunctionParameters;
-class JSScope;
-struct ParserError;
+class ParserError;
class ScriptExecutable;
class SourceCode;
class SourceProvider;
-class SymbolTable;
class UnlinkedCodeBlock;
class UnlinkedFunctionCodeBlock;
+class UnlinkedFunctionExecutable;
class UnlinkedInstructionStream;
+struct ExecutableInfo;
typedef unsigned UnlinkedValueProfile;
typedef unsigned UnlinkedArrayProfile;
@@ -65,134 +66,12 @@ typedef unsigned UnlinkedArrayAllocationProfile;
typedef unsigned UnlinkedObjectAllocationProfile;
typedef unsigned UnlinkedLLIntCallLinkInfo;
-struct ExecutableInfo {
- ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor)
- : m_needsActivation(needsActivation)
- , m_usesEval(usesEval)
- , m_isStrictMode(isStrictMode)
- , m_isConstructor(isConstructor)
- {
- }
- bool m_needsActivation;
- bool m_usesEval;
- bool m_isStrictMode;
- bool m_isConstructor;
-};
-
-class UnlinkedFunctionExecutable : public JSCell {
-public:
- friend class CodeCache;
- typedef JSCell Base;
- static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionBodyNode* node, bool isFromGlobalCode = false)
- {
- UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, node, isFromGlobalCode);
- instance->finishCreation(*vm);
- return instance;
- }
-
- const Identifier& name() const { return m_name; }
- const Identifier& inferredName() const { return m_inferredName; }
- JSString* nameValue() const { return m_nameValue.get(); }
- SymbolTable* symbolTable(CodeSpecializationKind kind)
- {
- return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get();
- }
- size_t parameterCount() const;
- bool isInStrictContext() const { return m_isInStrictContext; }
- FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; }
-
- unsigned firstLineOffset() const { return m_firstLineOffset; }
- unsigned lineCount() const { return m_lineCount; }
- unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
- unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
- unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
- unsigned startOffset() const { return m_startOffset; }
- unsigned sourceLength() { return m_sourceLength; }
-
- String paramString() const;
-
- UnlinkedFunctionCodeBlock* codeBlockFor(VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&);
-
- static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception);
-
- FunctionExecutable* link(VM&, const SourceCode&, size_t lineOffset, size_t sourceOffset);
-
- void clearCodeForRecompilation()
- {
- m_symbolTableForCall.clear();
- m_symbolTableForConstruct.clear();
- m_codeBlockForCall.clear();
- m_codeBlockForConstruct.clear();
- }
-
- FunctionParameters* parameters() { return m_parameters.get(); }
-
- void recordParse(CodeFeatures features, bool hasCapturedVariables)
- {
- m_features = features;
- m_hasCapturedVariables = hasCapturedVariables;
- }
-
- bool forceUsesArguments() const { return m_forceUsesArguments; }
-
- CodeFeatures features() const { return m_features; }
- bool hasCapturedVariables() const { return m_hasCapturedVariables; }
-
- static const bool needsDestruction = true;
- static const bool hasImmortalStructure = true;
- static void destroy(JSCell*);
-
-private:
- UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, FunctionBodyNode*, bool isFromGlobalCode);
- WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall;
- WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct;
-
- unsigned m_numCapturedVariables : 29;
- bool m_forceUsesArguments : 1;
- bool m_isInStrictContext : 1;
- bool m_hasCapturedVariables : 1;
- bool m_isFromGlobalCode : 1;
-
- Identifier m_name;
- Identifier m_inferredName;
- WriteBarrier<JSString> m_nameValue;
- WriteBarrier<SymbolTable> m_symbolTableForCall;
- WriteBarrier<SymbolTable> m_symbolTableForConstruct;
- RefPtr<FunctionParameters> m_parameters;
- unsigned m_firstLineOffset;
- unsigned m_lineCount;
- unsigned m_unlinkedFunctionNameStart;
- unsigned m_unlinkedBodyStartColumn;
- unsigned m_unlinkedBodyEndColumn;
- unsigned m_startOffset;
- unsigned m_sourceLength;
-
- CodeFeatures m_features;
-
- FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle;
-
-protected:
- void finishCreation(VM& vm)
- {
- Base::finishCreation(vm);
- m_nameValue.set(vm, this, jsString(&vm, name().string()));
- }
-
- static void visitChildren(JSCell*, SlotVisitor&);
-
-public:
- static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
- {
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags;
-
- DECLARE_EXPORT_INFO;
-};
-
struct UnlinkedStringJumpTable {
- typedef HashMap<RefPtr<StringImpl>, int32_t> StringOffsetTable;
+ struct OffsetLocation {
+ int32_t branchOffset;
+ };
+
+ typedef HashMap<RefPtr<StringImpl>, OffsetLocation> StringOffsetTable;
StringOffsetTable offsetTable;
inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset)
@@ -201,7 +80,7 @@ struct UnlinkedStringJumpTable {
StringOffsetTable::const_iterator loc = offsetTable.find(value);
if (loc == end)
return defaultOffset;
- return loc->value;
+ return loc->value.branchOffset;
}
};
@@ -218,13 +97,6 @@ struct UnlinkedSimpleJumpTable {
}
};
-struct UnlinkedHandlerInfo {
- uint32_t start;
- uint32_t end;
- uint32_t target;
- uint32_t scopeDepth;
-};
-
struct UnlinkedInstruction {
UnlinkedInstruction() { u.operand = 0; }
UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; }
@@ -239,31 +111,36 @@ struct UnlinkedInstruction {
class UnlinkedCodeBlock : public JSCell {
public:
typedef JSCell Base;
+ static const unsigned StructureFlags = Base::StructureFlags;
+
static const bool needsDestruction = true;
- static const bool hasImmortalStructure = true;
enum { CallFunction, ApplyFunction };
+ typedef UnlinkedInstruction Instruction;
+ typedef Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> UnpackedInstructions;
+
bool isConstructor() const { return m_isConstructor; }
bool isStrictMode() const { return m_isStrictMode; }
bool usesEval() const { return m_usesEval; }
-
- bool needsFullScopeChain() const { return m_needsFullScopeChain; }
- void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
+ SourceParseMode parseMode() const { return m_parseMode; }
+ bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+ DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); }
+ EvalContextType evalContextType() const { return static_cast<EvalContextType>(m_evalContextType); }
+ bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+ bool isClassContext() const { return m_isClassContext; }
void addExpressionInfo(unsigned instructionOffset, int divot,
int startOffset, int endOffset, unsigned line, unsigned column);
+ void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot);
+
bool hasExpressionInfo() { return m_expressionInfo.size(); }
+ const Vector<ExpressionRangeInfo>& expressionInfo() { return m_expressionInfo; }
// Special registers
void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
- void setActivationRegister(VirtualRegister activationRegister) { m_activationRegister = activationRegister; }
-
- void setArgumentsRegister(VirtualRegister argumentsRegister) { m_argumentsRegister = argumentsRegister; }
- bool usesArguments() const { return m_argumentsRegister.isValid(); }
- VirtualRegister argumentsRegister() const { return m_argumentsRegister; }
-
+ void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; }
bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); }
void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; }
@@ -277,8 +154,10 @@ public:
unsigned addRegExp(RegExp* r)
{
createRareDataIfNecessary();
+ VM& vm = *this->vm();
+ auto locker = lockDuringMarking(vm.heap, *this);
unsigned size = m_rareData->m_regexps.size();
- m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_vm, this, r));
+ m_rareData->m_regexps.append(WriteBarrier<RegExp>(vm, this, r));
return size;
}
unsigned numberOfRegExps() const
@@ -296,19 +175,47 @@ public:
const Identifier& identifier(int index) const { return m_identifiers[index]; }
const Vector<Identifier>& identifiers() const { return m_identifiers; }
- size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
- unsigned addConstant(JSValue v)
+ const Vector<BitVector>& bitVectors() const { return m_bitVectors; }
+ BitVector& bitVector(size_t i) { return m_bitVectors[i]; }
+ unsigned addBitVector(BitVector&& bitVector)
+ {
+ m_bitVectors.append(WTFMove(bitVector));
+ return m_bitVectors.size() - 1;
+ }
+
+ unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other)
+ {
+ VM& vm = *this->vm();
+ auto locker = lockDuringMarking(vm.heap, *this);
+ unsigned result = m_constantRegisters.size();
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantRegisters.last().set(vm, this, v);
+ m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation);
+ return result;
+ }
+ unsigned addConstant(LinkTimeConstant type)
{
+ VM& vm = *this->vm();
+ auto locker = lockDuringMarking(vm.heap, *this);
unsigned result = m_constantRegisters.size();
+ ASSERT(result);
+ unsigned index = static_cast<unsigned>(type);
+ ASSERT(index < LinkTimeConstantCount);
+ m_linkTimeConstants[index] = result;
m_constantRegisters.append(WriteBarrier<Unknown>());
- m_constantRegisters.last().set(*m_vm, this, v);
+ m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
return result;
}
- unsigned addOrFindConstant(JSValue);
+ unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type)
+ {
+ unsigned index = static_cast<unsigned>(type);
+ ASSERT(index < LinkTimeConstantCount);
+ return m_linkTimeConstants[index];
+ }
const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
- ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+ const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
// Jumps
size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
@@ -316,38 +223,25 @@ public:
unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
- void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
- bool isNumericCompareFunction() const { return m_isNumericCompareFunction; }
+ UnlinkedHandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+ UnlinkedHandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
- void shrinkToFit()
- {
- m_jumpTargets.shrinkToFit();
- m_identifiers.shrinkToFit();
- m_constantRegisters.shrinkToFit();
- m_functionDecls.shrinkToFit();
- m_functionExprs.shrinkToFit();
- m_propertyAccessInstructions.shrinkToFit();
- m_expressionInfo.shrinkToFit();
-
-#if ENABLE(BYTECODE_COMMENTS)
- m_bytecodeComments.shrinkToFit();
-#endif
- if (m_rareData) {
- m_rareData->m_exceptionHandlers.shrinkToFit();
- m_rareData->m_regexps.shrinkToFit();
- m_rareData->m_constantBuffers.shrinkToFit();
- m_rareData->m_switchJumpTables.shrinkToFit();
- m_rareData->m_stringSwitchJumpTables.shrinkToFit();
- m_rareData->m_expressionInfoFatPositions.shrinkToFit();
- }
- }
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
+ JSParserScriptMode scriptMode() const { return static_cast<JSParserScriptMode>(m_scriptMode); }
+
+ void shrinkToFit();
void setInstructions(std::unique_ptr<UnlinkedInstructionStream>);
const UnlinkedInstructionStream& instructions() const;
+ int numCalleeLocals() const { return m_numCalleeLocals; }
+
int m_numVars;
int m_numCapturedVars;
- int m_numCalleeRegisters;
+ int m_numCalleeLocals;
// Jump Tables
@@ -361,18 +255,22 @@ public:
unsigned addFunctionDecl(UnlinkedFunctionExecutable* n)
{
+ VM& vm = *this->vm();
+ auto locker = lockDuringMarking(vm.heap, *this);
unsigned size = m_functionDecls.size();
m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionDecls.last().set(*m_vm, this, n);
+ m_functionDecls.last().set(vm, this, n);
return size;
}
UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
size_t numberOfFunctionDecls() { return m_functionDecls.size(); }
unsigned addFunctionExpr(UnlinkedFunctionExecutable* n)
{
+ VM& vm = *this->vm();
+ auto locker = lockDuringMarking(vm.heap, *this);
unsigned size = m_functionExprs.size();
m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>());
- m_functionExprs.last().set(*m_vm, this, n);
+ m_functionExprs.last().set(vm, this, n);
return size;
}
UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
@@ -380,13 +278,9 @@ public:
// Exception handling support
size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
- void addExceptionHandler(const UnlinkedHandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
+ void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); }
UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
- SymbolTable* symbolTable() const { return m_symbolTable.get(); }
-
- VM* vm() const { return m_vm; }
-
UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; }
unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }
UnlinkedArrayAllocationProfile addArrayAllocationProfile() { return m_arrayAllocationProfileCount++; }
@@ -402,8 +296,7 @@ public:
CodeType codeType() const { return m_codeType; }
VirtualRegister thisRegister() const { return m_thisRegister; }
- VirtualRegister activationRegister() const { return m_activationRegister; }
-
+ VirtualRegister scopeRegister() const { return m_scopeRegister; }
void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
{
@@ -436,94 +329,125 @@ public:
return m_rareData->m_constantBuffers[index];
}
- bool hasRareData() const { return m_rareData; }
+ bool hasRareData() const { return m_rareData.get(); }
int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
- int& startOffset, int& endOffset, unsigned& line, unsigned& column);
+ int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
+
+ bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot);
- void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount, unsigned endColumn)
+ void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned lineCount, unsigned endColumn)
{
m_features = features;
m_hasCapturedVariables = hasCapturedVariables;
- m_firstLine = firstLine;
m_lineCount = lineCount;
// For the UnlinkedCodeBlock, startColumn is always 0.
m_endColumn = endColumn;
}
+ const String& sourceURLDirective() const { return m_sourceURLDirective; }
+ const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+ void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+ void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
CodeFeatures codeFeatures() const { return m_features; }
bool hasCapturedVariables() const { return m_hasCapturedVariables; }
- unsigned firstLine() const { return m_firstLine; }
unsigned lineCount() const { return m_lineCount; }
ALWAYS_INLINE unsigned startColumn() const { return 0; }
unsigned endColumn() const { return m_endColumn; }
+ void addOpProfileControlFlowBytecodeOffset(size_t offset)
+ {
+ createRareDataIfNecessary();
+ m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset);
+ }
+ const Vector<size_t>& opProfileControlFlowBytecodeOffsets() const
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_opProfileControlFlowBytecodeOffsets;
+ }
+ bool hasOpProfileControlFlowBytecodeOffsets() const
+ {
+ return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty();
+ }
+
void dumpExpressionRangeInfo(); // For debugging purpose only.
+ bool wasCompiledWithDebuggingOpcodes() const { return m_wasCompiledWithDebuggingOpcodes; }
+
+ TriState didOptimize() const { return m_didOptimize; }
+ void setDidOptimize(TriState didOptimize) { m_didOptimize = didOptimize; }
+
protected:
- UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&);
+ UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&, DebuggerMode);
~UnlinkedCodeBlock();
void finishCreation(VM& vm)
{
Base::finishCreation(vm);
- if (codeType() == GlobalCode)
- return;
- m_symbolTable.set(vm, this, SymbolTable::create(vm));
}
private:
+ friend class BytecodeRewriter;
+ void applyModification(BytecodeRewriter&);
void createRareDataIfNecessary()
{
- if (!m_rareData)
- m_rareData = adoptPtr(new RareData);
+ if (!m_rareData) {
+ auto locker = lockDuringMarking(*heap(), *this);
+ m_rareData = std::make_unique<RareData>();
+ }
}
- void getLineAndColumn(ExpressionRangeInfo&, unsigned& line, unsigned& column);
-
- std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions;
+ void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const;
int m_numParameters;
- VM* m_vm;
+
+ std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions;
VirtualRegister m_thisRegister;
- VirtualRegister m_argumentsRegister;
- VirtualRegister m_activationRegister;
+ VirtualRegister m_scopeRegister;
VirtualRegister m_globalObjectRegister;
- bool m_needsFullScopeChain : 1;
- bool m_usesEval : 1;
- bool m_isNumericCompareFunction : 1;
- bool m_isStrictMode : 1;
- bool m_isConstructor : 1;
- bool m_hasCapturedVariables : 1;
- unsigned m_firstLine;
+ String m_sourceURLDirective;
+ String m_sourceMappingURLDirective;
+
+ unsigned m_usesEval : 1;
+ unsigned m_isStrictMode : 1;
+ unsigned m_isConstructor : 1;
+ unsigned m_hasCapturedVariables : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_superBinding : 1;
+ unsigned m_scriptMode: 1;
+ unsigned m_isArrowFunctionContext : 1;
+ unsigned m_isClassContext : 1;
+ unsigned m_wasCompiledWithDebuggingOpcodes : 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_derivedContextType : 2;
+ unsigned m_evalContextType : 2;
unsigned m_lineCount;
unsigned m_endColumn;
+ TriState m_didOptimize;
+ SourceParseMode m_parseMode;
CodeFeatures m_features;
CodeType m_codeType;
Vector<unsigned> m_jumpTargets;
+ Vector<unsigned> m_propertyAccessInstructions;
+
// Constant Pools
Vector<Identifier> m_identifiers;
+ Vector<BitVector> m_bitVectors;
Vector<WriteBarrier<Unknown>> m_constantRegisters;
+ Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
typedef Vector<WriteBarrier<UnlinkedFunctionExecutable>> FunctionExpressionVector;
FunctionExpressionVector m_functionDecls;
FunctionExpressionVector m_functionExprs;
-
- WriteBarrier<SymbolTable> m_symbolTable;
-
- Vector<unsigned> m_propertyAccessInstructions;
-
-#if ENABLE(BYTECODE_COMMENTS)
- Vector<Comment> m_bytecodeComments;
- size_t m_bytecodeCommentIterator;
-#endif
+ std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants;
unsigned m_arrayProfileCount;
unsigned m_arrayAllocationProfileCount;
@@ -548,159 +472,25 @@ public:
Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables;
Vector<ExpressionRangeInfo::FatPosition> m_expressionInfoFatPositions;
+
+ struct TypeProfilerExpressionRange {
+ unsigned m_startDivot;
+ unsigned m_endDivot;
+ };
+ HashMap<unsigned, TypeProfilerExpressionRange> m_typeProfilerInfoMap;
+ Vector<size_t> m_opProfileControlFlowBytecodeOffsets;
};
private:
- OwnPtr<RareData> m_rareData;
+ std::unique_ptr<RareData> m_rareData;
Vector<ExpressionRangeInfo> m_expressionInfo;
protected:
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
static void visitChildren(JSCell*, SlotVisitor&);
+ static size_t estimatedSize(JSCell*);
public:
DECLARE_INFO;
};
-class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock {
-public:
- typedef UnlinkedCodeBlock Base;
-
-protected:
- UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
- : Base(vm, structure, codeType, info)
- {
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
- DECLARE_INFO;
-};
-
-class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock {
-private:
- friend class CodeCache;
- static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info)
- {
- UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info);
- instance->finishCreation(*vm);
- return instance;
- }
-
-public:
- typedef UnlinkedGlobalCodeBlock Base;
- static void destroy(JSCell*);
-
- void addFunctionDeclaration(VM& vm, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable)
- {
- m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(vm, this, functionExecutable)));
- }
-
- void addVariableDeclaration(const Identifier& name, bool isConstant)
- {
- m_varDeclarations.append(std::make_pair(name, isConstant));
- }
-
- typedef Vector<std::pair<Identifier, bool>> VariableDeclations;
- typedef Vector<std::pair<Identifier, WriteBarrier<UnlinkedFunctionExecutable>> > FunctionDeclations;
-
- const VariableDeclations& variableDeclarations() const { return m_varDeclarations; }
- const FunctionDeclations& functionDeclarations() const { return m_functionDeclarations; }
-
- static void visitChildren(JSCell*, SlotVisitor&);
-
-private:
- UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
- : Base(vm, structure, GlobalCode, info)
- {
- }
-
- VariableDeclations m_varDeclarations;
- FunctionDeclations m_functionDeclarations;
-
-public:
- static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
- {
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
- DECLARE_INFO;
-};
-
-class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock {
-private:
- friend class CodeCache;
-
- static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info)
- {
- UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info);
- instance->finishCreation(*vm);
- return instance;
- }
-
-public:
- typedef UnlinkedGlobalCodeBlock Base;
- static void destroy(JSCell*);
-
- const Identifier& variable(unsigned index) { return m_variables[index]; }
- unsigned numVariables() { return m_variables.size(); }
- void adoptVariables(Vector<Identifier, 0, UnsafeVectorOverflow>& variables)
- {
- ASSERT(m_variables.isEmpty());
- m_variables.swap(variables);
- }
-
-private:
- UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info)
- : Base(vm, structure, EvalCode, info)
- {
- }
-
- Vector<Identifier, 0, UnsafeVectorOverflow> m_variables;
-
-public:
- static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
- {
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
- DECLARE_INFO;
-};
-
-class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock {
-public:
- static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info)
- {
- UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info);
- instance->finishCreation(*vm);
- return instance;
- }
-
- typedef UnlinkedCodeBlock Base;
- static void destroy(JSCell*);
-
-private:
- UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info)
- : Base(vm, structure, codeType, info)
- {
- }
-
-public:
- static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
- {
- return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
- }
-
- static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags;
-
- DECLARE_INFO;
-};
-
}
-
-#endif // UnlinkedCodeBlock_h
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp
new file mode 100644
index 000000000..07f991688
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedEvalCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
+
+void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h
new file mode 100644
index 000000000..3130ea448
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedEvalCodeBlock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+ typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ {
+ UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info, debuggerMode);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ static void destroy(JSCell*);
+
+ const Identifier& variable(unsigned index) { return m_variables[index]; }
+ unsigned numVariables() { return m_variables.size(); }
+ void adoptVariables(Vector<Identifier, 0, UnsafeVectorOverflow>& variables)
+ {
+ ASSERT(m_variables.isEmpty());
+ m_variables.swap(variables);
+ }
+
+private:
+ UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ : Base(vm, structure, EvalCode, info, debuggerMode)
+ {
+ }
+
+ Vector<Identifier, 0, UnsafeVectorOverflow> m_variables;
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
+ }
+
+ DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp
new file mode 100644
index 000000000..151d56077
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+
+void UnlinkedFunctionCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<UnlinkedFunctionCodeBlock*>(cell)->~UnlinkedFunctionCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h
new file mode 100644
index 000000000..b5482b65c
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionCodeBlock.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock {
+public:
+ typedef UnlinkedCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ {
+ UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info, debuggerMode);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ static void destroy(JSCell*);
+
+private:
+ UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ : Base(vm, structure, codeType, info, debuggerMode)
+ {
+ }
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
+ }
+
+ DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
new file mode 100644
index 000000000..2481db5dd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionExecutable.h"
+
+#include "BytecodeGenerator.h"
+#include "ClassInfo.h"
+#include "CodeCache.h"
+#include "Debugger.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
+#include "Parser.h"
+#include "SourceProvider.h"
+#include "Structure.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell.");
+
+const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
+
+static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock(
+ VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source,
+ CodeSpecializationKind kind, DebuggerMode debuggerMode,
+ UnlinkedFunctionKind functionKind, ParserError& error, SourceParseMode parseMode)
+{
+ JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin;
+ JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict;
+ JSParserScriptMode scriptMode = executable->scriptMode();
+ ASSERT(isFunctionParseMode(executable->parseMode()));
+ std::unique_ptr<FunctionNode> function = parse<FunctionNode>(
+ &vm, source, executable->name(), builtinMode, strictMode, scriptMode, executable->parseMode(), executable->superBinding(), error, nullptr);
+
+ if (!function) {
+ ASSERT(error.isValid());
+ return nullptr;
+ }
+
+ function->finishParsing(executable->name(), executable->functionMode());
+ executable->recordParse(function->features(), function->hasCapturedVariables());
+
+ bool isClassContext = executable->superBinding() == SuperBinding::Needed;
+
+ UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), scriptMode, executable->superBinding(), parseMode, executable->derivedContextType(), false, isClassContext, EvalContextType::FunctionEvalContext), debuggerMode);
+
+ error = BytecodeGenerator::generate(vm, function.get(), result, debuggerMode, executable->parentScopeTDZVariables());
+
+ if (error.isValid())
+ return nullptr;
+ return result;
+}
+
+UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& parentSource, SourceCode&& parentSourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType)
+ : Base(*vm, structure)
+ , m_firstLineOffset(node->firstLine() - parentSource.firstLine().oneBasedInt())
+ , m_lineCount(node->lastLine() - node->firstLine())
+ , m_unlinkedFunctionNameStart(node->functionNameStart() - parentSource.startOffset())
+ , m_unlinkedBodyStartColumn(node->startColumn())
+ , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
+ , m_startOffset(node->source().startOffset() - parentSource.startOffset())
+ , m_sourceLength(node->source().length())
+ , m_parametersStartOffset(node->parametersStart())
+ , m_typeProfilingStartOffset(node->functionKeywordStart())
+ , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1)
+ , m_parameterCount(node->parameterCount())
+ , m_features(0)
+ , m_sourceParseMode(node->parseMode())
+ , m_isInStrictContext(node->isInStrictContext())
+ , m_hasCapturedVariables(false)
+ , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction)
+ , m_constructAbility(static_cast<unsigned>(constructAbility))
+ , m_constructorKind(static_cast<unsigned>(node->constructorKind()))
+ , m_functionMode(static_cast<unsigned>(node->functionMode()))
+ , m_scriptMode(static_cast<unsigned>(scriptMode))
+ , m_superBinding(static_cast<unsigned>(node->superBinding()))
+ , m_derivedContextType(static_cast<unsigned>(derivedContextType))
+ , m_name(node->ident())
+ , m_ecmaName(node->ecmaName())
+ , m_inferredName(node->inferredName())
+ , m_parentSourceOverride(WTFMove(parentSourceOverride))
+ , m_classSource(node->classSource())
+{
+ // Make sure these bitfields are adequately wide.
+ ASSERT(m_constructAbility == static_cast<unsigned>(constructAbility));
+ ASSERT(m_constructorKind == static_cast<unsigned>(node->constructorKind()));
+ ASSERT(m_functionMode == static_cast<unsigned>(node->functionMode()));
+ ASSERT(m_scriptMode == static_cast<unsigned>(scriptMode));
+ ASSERT(m_superBinding == static_cast<unsigned>(node->superBinding()));
+ ASSERT(m_derivedContextType == static_cast<unsigned>(derivedContextType));
+
+ m_parentScopeTDZVariables.swap(parentScopeTDZVariables);
+}
+
+void UnlinkedFunctionExecutable::destroy(JSCell* cell)
+{
+ static_cast<UnlinkedFunctionExecutable*>(cell)->~UnlinkedFunctionExecutable();
+}
+
+void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ Base::visitChildren(thisObject, visitor);
+ visitor.append(thisObject->m_unlinkedCodeBlockForCall);
+ visitor.append(thisObject->m_unlinkedCodeBlockForConstruct);
+}
+
+FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& passedParentSource, std::optional<int> overrideLineNumber, Intrinsic intrinsic)
+{
+ const SourceCode& parentSource = m_parentSourceOverride.isNull() ? passedParentSource : m_parentSourceOverride;
+ unsigned firstLine = parentSource.firstLine().oneBasedInt() + m_firstLineOffset;
+ unsigned startOffset = parentSource.startOffset() + m_startOffset;
+ unsigned lineCount = m_lineCount;
+
+ unsigned startColumn = linkedStartColumn(parentSource.startColumn().oneBasedInt());
+ unsigned endColumn = linkedEndColumn(startColumn);
+
+ SourceCode source(parentSource.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
+ FunctionOverrides::OverrideInfo overrideInfo;
+ bool hasFunctionOverride = false;
+
+ if (UNLIKELY(Options::functionOverrides())) {
+ hasFunctionOverride = FunctionOverrides::initializeOverrideFor(source, overrideInfo);
+ if (UNLIKELY(hasFunctionOverride)) {
+ firstLine = overrideInfo.firstLine;
+ lineCount = overrideInfo.lineCount;
+ startColumn = overrideInfo.startColumn;
+ endColumn = overrideInfo.endColumn;
+ source = overrideInfo.sourceCode;
+ }
+ }
+
+ FunctionExecutable* result = FunctionExecutable::create(vm, source, this, firstLine + lineCount, endColumn, intrinsic);
+ if (overrideLineNumber)
+ result->setOverrideLineNumber(*overrideLineNumber);
+
+ if (UNLIKELY(hasFunctionOverride)) {
+ result->overrideParameterAndTypeProfilingStartEndOffsets(
+ overrideInfo.parametersStartOffset,
+ overrideInfo.typeProfilingStartOffset,
+ overrideInfo.typeProfilingEndOffset);
+ }
+
+ return result;
+}
+
+UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(
+ const Identifier& name, ExecState& exec, const SourceCode& source,
+ JSObject*& exception, int overrideLineNumber)
+{
+ ParserError error;
+ VM& vm = exec.vm();
+ auto& globalObject = *exec.lexicalGlobalObject();
+ CodeCache* codeCache = vm.codeCache();
+ DebuggerMode debuggerMode = globalObject.hasInteractiveDebugger() ? DebuggerOn : DebuggerOff;
+ UnlinkedFunctionExecutable* executable = codeCache->getUnlinkedGlobalFunctionExecutable(vm, name, source, debuggerMode, error);
+
+ if (globalObject.hasDebugger())
+ globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message());
+
+ if (error.isValid()) {
+ exception = error.toErrorObject(&globalObject, source, overrideLineNumber);
+ return nullptr;
+ }
+
+ return executable;
+}
+
+UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::unlinkedCodeBlockFor(
+ VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind,
+ DebuggerMode debuggerMode, ParserError& error, SourceParseMode parseMode)
+{
+ switch (specializationKind) {
+ case CodeForCall:
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForCall.get())
+ return codeBlock;
+ break;
+ case CodeForConstruct:
+ if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForConstruct.get())
+ return codeBlock;
+ break;
+ }
+
+ UnlinkedFunctionCodeBlock* result = generateUnlinkedFunctionCodeBlock(
+ vm, this, source, specializationKind, debuggerMode,
+ isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction,
+ error, parseMode);
+
+ if (error.isValid())
+ return nullptr;
+
+ switch (specializationKind) {
+ case CodeForCall:
+ m_unlinkedCodeBlockForCall.set(vm, this, result);
+ break;
+ case CodeForConstruct:
+ m_unlinkedCodeBlockForConstruct.set(vm, this, result);
+ break;
+ }
+ return result;
+}
+
+void UnlinkedFunctionExecutable::setInvalidTypeProfilingOffsets()
+{
+ m_typeProfilingStartOffset = std::numeric_limits<unsigned>::max();
+ m_typeProfilingEndOffset = std::numeric_limits<unsigned>::max();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
new file mode 100644
index 000000000..9c258505b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeConventions.h"
+#include "CodeSpecializationKind.h"
+#include "CodeType.h"
+#include "ConstructAbility.h"
+#include "ExecutableInfo.h"
+#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
+#include "Identifier.h"
+#include "Intrinsic.h"
+#include "JSCell.h"
+#include "JSString.h"
+#include "ParserModes.h"
+#include "RegExp.h"
+#include "SpecialPointer.h"
+#include "VariableEnvironment.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+class FunctionMetadataNode;
+class FunctionExecutable;
+class ParserError;
+class SourceCode;
+class SourceProvider;
+class UnlinkedFunctionCodeBlock;
+
+enum UnlinkedFunctionKind {
+ UnlinkedNormalFunction,
+ UnlinkedBuiltinFunction,
+};
+
+class UnlinkedFunctionExecutable final : public JSCell {
+public:
+ friend class CodeCache;
+ friend class VM;
+
+ typedef JSCell Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType, SourceCode&& parentSourceOverride = SourceCode())
+ {
+ UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap))
+ UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTFMove(parentSourceOverride), node, unlinkedFunctionKind, constructAbility, scriptMode, parentScopeTDZVariables, derivedContextType);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ const Identifier& name() const { return m_name; }
+ const Identifier& ecmaName() const { return m_ecmaName; }
+ void setEcmaName(const Identifier& name) { m_ecmaName = name; }
+ const Identifier& inferredName() const { return m_inferredName; }
+ unsigned parameterCount() const { return m_parameterCount; }; // Excluding 'this'!
+ SourceParseMode parseMode() const { return static_cast<SourceParseMode>(m_sourceParseMode); };
+
+ const SourceCode& classSource() const { return m_classSource; };
+ void setClassSource(const SourceCode& source) { m_classSource = source; };
+
+ bool isInStrictContext() const { return m_isInStrictContext; }
+ FunctionMode functionMode() const { return static_cast<FunctionMode>(m_functionMode); }
+ ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); }
+ SuperBinding superBinding() const { return static_cast<SuperBinding>(m_superBinding); }
+
+ unsigned lineCount() const { return m_lineCount; }
+ unsigned linkedStartColumn(unsigned parentStartColumn) const { return m_unlinkedBodyStartColumn + (!m_firstLineOffset ? parentStartColumn : 1); }
+ unsigned linkedEndColumn(unsigned startColumn) const { return m_unlinkedBodyEndColumn + (!m_lineCount ? startColumn : 1); }
+
+ unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
+ unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
+ unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
+ unsigned startOffset() const { return m_startOffset; }
+ unsigned sourceLength() { return m_sourceLength; }
+ unsigned parametersStartOffset() const { return m_parametersStartOffset; }
+ unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; }
+ unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; }
+ void setInvalidTypeProfilingOffsets();
+
+ UnlinkedFunctionCodeBlock* unlinkedCodeBlockFor(
+ VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode,
+ ParserError&, SourceParseMode);
+
+ static UnlinkedFunctionExecutable* fromGlobalCode(
+ const Identifier&, ExecState&, const SourceCode&, JSObject*& exception,
+ int overrideLineNumber);
+
+ JS_EXPORT_PRIVATE FunctionExecutable* link(VM&, const SourceCode& parentSource, std::optional<int> overrideLineNumber = std::nullopt, Intrinsic = NoIntrinsic);
+
+ void clearCode()
+ {
+ m_unlinkedCodeBlockForCall.clear();
+ m_unlinkedCodeBlockForConstruct.clear();
+ }
+
+ void recordParse(CodeFeatures features, bool hasCapturedVariables)
+ {
+ m_features = features;
+ m_hasCapturedVariables = hasCapturedVariables;
+ }
+
+ CodeFeatures features() const { return m_features; }
+ bool hasCapturedVariables() const { return m_hasCapturedVariables; }
+
+ static const bool needsDestruction = true;
+ static void destroy(JSCell*);
+
+ bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+ ConstructAbility constructAbility() const { return static_cast<ConstructAbility>(m_constructAbility); }
+ JSParserScriptMode scriptMode() const { return static_cast<JSParserScriptMode>(m_scriptMode); }
+ bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; }
+ const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; }
+
+ bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+
+ JSC::DerivedContextType derivedContextType() const {return static_cast<JSC::DerivedContextType>(m_derivedContextType); }
+
+ const String& sourceURLDirective() const { return m_sourceURLDirective; }
+ const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+ void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+ void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
+private:
+ UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, SourceCode&& parentSourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, JSParserScriptMode, VariableEnvironment&, JSC::DerivedContextType);
+
+ unsigned m_firstLineOffset;
+ unsigned m_lineCount;
+ unsigned m_unlinkedFunctionNameStart;
+ unsigned m_unlinkedBodyStartColumn;
+ unsigned m_unlinkedBodyEndColumn;
+ unsigned m_startOffset;
+ unsigned m_sourceLength;
+ unsigned m_parametersStartOffset;
+ unsigned m_typeProfilingStartOffset;
+ unsigned m_typeProfilingEndOffset;
+ unsigned m_parameterCount;
+ CodeFeatures m_features;
+ SourceParseMode m_sourceParseMode;
+ unsigned m_isInStrictContext : 1;
+ unsigned m_hasCapturedVariables : 1;
+ unsigned m_isBuiltinFunction : 1;
+ unsigned m_constructAbility: 1;
+ unsigned m_constructorKind : 2;
+ unsigned m_functionMode : 2; // FunctionMode
+ unsigned m_scriptMode: 1; // JSParserScriptMode
+ unsigned m_superBinding : 1;
+ unsigned m_derivedContextType: 2;
+
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForCall;
+ WriteBarrier<UnlinkedFunctionCodeBlock> m_unlinkedCodeBlockForConstruct;
+
+ Identifier m_name;
+ Identifier m_ecmaName;
+ Identifier m_inferredName;
+ SourceCode m_parentSourceOverride;
+ SourceCode m_classSource;
+
+ String m_sourceURLDirective;
+ String m_sourceMappingURLDirective;
+
+ VariableEnvironment m_parentScopeTDZVariables;
+
+protected:
+ static void visitChildren(JSCell*, SlotVisitor&);
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
+ }
+
+ DECLARE_EXPORT_INFO;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h
new file mode 100644
index 000000000..343862e64
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedGlobalCodeBlock.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock {
+public:
+ typedef UnlinkedCodeBlock Base;
+
+protected:
+ UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ : Base(vm, structure, codeType, info, debuggerMode)
+ {
+ }
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
index 2e07f4f47..e8762ff66 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp
@@ -26,82 +26,9 @@
#include "config.h"
#include "UnlinkedInstructionStream.h"
-namespace JSC {
+#include "Opcode.h"
-// Unlinked instructions are packed in a simple stream format.
-//
-// The first byte is always the opcode.
-// It's followed by an opcode-dependent number of argument values.
-// The first 3 bits of each value determines the format:
-//
-// 5-bit positive integer (1 byte total)
-// 5-bit negative integer (1 byte total)
-// 13-bit positive integer (2 bytes total)
-// 13-bit negative integer (2 bytes total)
-// 5-bit constant register index, based at 0x40000000 (1 byte total)
-// 13-bit constant register index, based at 0x40000000 (2 bytes total)
-// 32-bit raw value (5 bytes total)
-
-enum PackedValueType {
- Positive5Bit = 0,
- Negative5Bit,
- Positive13Bit,
- Negative13Bit,
- ConstantRegister5Bit,
- ConstantRegister13Bit,
- Full32Bit
-};
-
-UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
- : m_stream(stream)
- , m_index(0)
-{
-}
-
-inline unsigned char UnlinkedInstructionStream::Reader::read8()
-{
- return m_stream.m_data.data()[m_index++];
-}
-
-inline unsigned UnlinkedInstructionStream::Reader::read32()
-{
- const unsigned char* data = &m_stream.m_data.data()[m_index];
- unsigned char type = data[0] >> 5;
-
- switch (type) {
- case Positive5Bit:
- m_index++;
- return data[0];
- case Negative5Bit:
- m_index++;
- return 0xffffffe0 | data[0];
- case Positive13Bit:
- m_index += 2;
- return ((data[0] & 0x1F) << 8) | data[1];
- case Negative13Bit:
- m_index += 2;
- return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
- case ConstantRegister5Bit:
- m_index++;
- return 0x40000000 | (data[0] & 0x1F);
- case ConstantRegister13Bit:
- m_index += 2;
- return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
- default:
- ASSERT(type == Full32Bit);
- m_index += 5;
- return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
- }
-}
-
-const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
-{
- m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8());
- unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
- for (unsigned i = 1; i < opLength; ++i)
- m_unpackedBuffer[i].u.index = read32();
- return m_unpackedBuffer;
-}
+namespace JSC {
static void append8(unsigned char*& ptr, unsigned char value)
{
@@ -150,7 +77,7 @@ static void append32(unsigned char*& ptr, unsigned value)
*(ptr++) = (value >> 24) & 0xff;
}
-UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction>& instructions)
+UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions)
: m_instructionCount(instructions.size())
{
Vector<unsigned char> buffer;
@@ -177,6 +104,11 @@ UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstru
m_data = RefCountedArray<unsigned char>(buffer);
}
+size_t UnlinkedInstructionStream::sizeInBytes() const
+{
+ return m_data.size() * sizeof(unsigned char);
+}
+
#ifndef NDEBUG
const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const
{
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
index 5a919a29e..ef139adf7 100644
--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
+++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h
@@ -24,19 +24,21 @@
*/
-#ifndef UnlinkedInstructionStream_h
-#define UnlinkedInstructionStream_h
+#pragma once
+#include "Opcode.h"
#include "UnlinkedCodeBlock.h"
#include <wtf/RefCountedArray.h>
namespace JSC {
class UnlinkedInstructionStream {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction>&);
+ explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&);
unsigned count() const { return m_instructionCount; }
+ size_t sizeInBytes() const;
class Reader {
public:
@@ -69,6 +71,79 @@ private:
unsigned m_instructionCount;
};
-} // namespace JSC
+// Unlinked instructions are packed in a simple stream format.
+//
+// The first byte is always the opcode.
+// It's followed by an opcode-dependent number of argument values.
+// The first 3 bits of each value determines the format:
+//
+// 5-bit positive integer (1 byte total)
+// 5-bit negative integer (1 byte total)
+// 13-bit positive integer (2 bytes total)
+// 13-bit negative integer (2 bytes total)
+// 5-bit constant register index, based at 0x40000000 (1 byte total)
+// 13-bit constant register index, based at 0x40000000 (2 bytes total)
+// 32-bit raw value (5 bytes total)
+
+enum PackedValueType {
+ Positive5Bit = 0,
+ Negative5Bit,
+ Positive13Bit,
+ Negative13Bit,
+ ConstantRegister5Bit,
+ ConstantRegister13Bit,
+ Full32Bit
+};
-#endif // UnlinkedInstructionStream_h
+ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
+ : m_stream(stream)
+ , m_index(0)
+{
+}
+
+ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8()
+{
+ return m_stream.m_data.data()[m_index++];
+}
+
+ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32()
+{
+ const unsigned char* data = &m_stream.m_data.data()[m_index];
+ unsigned char type = data[0] >> 5;
+
+ switch (type) {
+ case Positive5Bit:
+ m_index++;
+ return data[0];
+ case Negative5Bit:
+ m_index++;
+ return 0xffffffe0 | data[0];
+ case Positive13Bit:
+ m_index += 2;
+ return ((data[0] & 0x1F) << 8) | data[1];
+ case Negative13Bit:
+ m_index += 2;
+ return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
+ case ConstantRegister5Bit:
+ m_index++;
+ return 0x40000000 | (data[0] & 0x1F);
+ case ConstantRegister13Bit:
+ m_index += 2;
+ return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
+ default:
+ ASSERT(type == Full32Bit);
+ m_index += 5;
+ return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
+ }
+}
+
+ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
+{
+ m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8());
+ unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
+ for (unsigned i = 1; i < opLength; ++i)
+ m_unpackedBuffer[i].u.index = read32();
+ return m_unpackedBuffer;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp
new file mode 100644
index 000000000..00f36c0ac
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedModuleProgramCodeBlock::s_info = { "UnlinkedModuleProgramCodeBlock", &Base::s_info, nullptr, CREATE_METHOD_TABLE(UnlinkedModuleProgramCodeBlock) };
+
+void UnlinkedModuleProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ UnlinkedModuleProgramCodeBlock* thisObject = jsCast<UnlinkedModuleProgramCodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<UnlinkedModuleProgramCodeBlock*>(cell)->~UnlinkedModuleProgramCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h
new file mode 100644
index 000000000..8676a2438
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedModuleProgramCodeBlock.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedModuleProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+ typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedModuleProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ {
+ UnlinkedModuleProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedModuleProgramCodeBlock>(vm->heap)) UnlinkedModuleProgramCodeBlock(vm, vm->unlinkedModuleProgramCodeBlockStructure.get(), info, debuggerMode);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ static void destroy(JSCell*);
+
+ static void visitChildren(JSCell*, SlotVisitor&);
+
+ // This offset represents the constant register offset to the stored symbol table that represents the layout of the
+ // module environment. This symbol table is created by the byte code generator since the module environment includes
+ // the top-most lexical captured variables inside the module code. This means that, once the module environment is
+ // allocated and instantiated from this symbol table, it is titely coupled with the specific unlinked module program
+ // code block and the stored symbol table. So before executing the module code, we should not clear the unlinked module
+ // program code block in the module executable. This requirement is met because the garbage collector only clears
+ // unlinked code in (1) unmarked executables and (2) function executables.
+ //
+ // Since the function code may be executed repeatedly and the environment of each function execution is different,
+ // the function code need to allocate and instantiate the environment in the prologue of the function code. On the
+ // other hand, the module code is executed only once. So we can instantiate the module environment outside the module
+ // code. At that time, we construct the module environment by using the symbol table that is held by the module executable.
+ // The symbol table held by the executable is the cloned one from one in the unlinked code block. Instantiating the module
+ // environment before executing and linking the module code is required to link the imported bindings between the modules.
+ //
+ // The unlinked module program code block only holds the pre-cloned symbol table in its constant register pool. It does
+ // not hold the instantiated module environment. So while the module environment requires the specific unlinked module
+ // program code block, the unlinked module code block can be used for the module environment instantiated from this
+ // unlinked code block. There is 1:N relation between the unlinked module code block and the module environments. So the
+ // unlinked module program code block can be cached.
+ //
+ // On the other hand, the linked code block for the module environment includes the resolved references to the imported
+ // bindings. The imported binding references the other module environment, so the linked code block is titly coupled
+ // with the specific set of the module environments. Thus, the linked code block should not be cached.
+ int moduleEnvironmentSymbolTableConstantRegisterOffset() { return m_moduleEnvironmentSymbolTableConstantRegisterOffset; }
+ void setModuleEnvironmentSymbolTableConstantRegisterOffset(int offset)
+ {
+ m_moduleEnvironmentSymbolTableConstantRegisterOffset = offset;
+ }
+
+private:
+ UnlinkedModuleProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ : Base(vm, structure, ModuleCode, info, debuggerMode)
+ {
+ }
+
+ int m_moduleEnvironmentSymbolTableConstantRegisterOffset { 0 };
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedModuleProgramCodeBlockType, StructureFlags), info());
+ }
+
+ DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp
new file mode 100644
index 000000000..95df29990
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
+
+void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
+{
+ static_cast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock();
+}
+
+}
diff --git a/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h
new file mode 100644
index 000000000..290eae47f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/UnlinkedProgramCodeBlock.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+ typedef UnlinkedGlobalCodeBlock Base;
+ static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+ static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ {
+ UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info, debuggerMode);
+ instance->finishCreation(*vm);
+ return instance;
+ }
+
+ static void destroy(JSCell*);
+
+ void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; }
+ const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; }
+
+ void setLexicalDeclarations(const VariableEnvironment& environment) { m_lexicalDeclarations = environment; }
+ const VariableEnvironment& lexicalDeclarations() const { return m_lexicalDeclarations; }
+
+ static void visitChildren(JSCell*, SlotVisitor&);
+
+private:
+ UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+ : Base(vm, structure, GlobalCode, info, debuggerMode)
+ {
+ }
+
+ VariableEnvironment m_varDeclarations;
+ VariableEnvironment m_lexicalDeclarations;
+
+public:
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+ {
+ return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
+ }
+
+ DECLARE_INFO;
+};
+
+}
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index 0790f79da..8724eb47d 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,14 +26,14 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ValueProfile_h
-#define ValueProfile_h
+#pragma once
-#include "ConcurrentJITLock.h"
+#include "ConcurrentJSLock.h"
#include "Heap.h"
#include "JSArray.h"
#include "SpeculatedType.h"
#include "Structure.h"
+#include "TagRegistersMode.h"
#include "WriteBarrier.h"
#include <wtf/PrintStream.h>
#include <wtf/StringPrintStream.h>
@@ -106,7 +106,7 @@ struct ValueProfileBase {
return false;
}
- CString briefDescription(const ConcurrentJITLocker& locker)
+ CString briefDescription(const ConcurrentJSLocker& locker)
{
computeUpdatedPrediction(locker);
@@ -134,7 +134,7 @@ struct ValueProfileBase {
// Updates the prediction and returns the new one. Never call this from any thread
// that isn't executing the code.
- SpeculatedType computeUpdatedPrediction(const ConcurrentJITLocker&)
+ SpeculatedType computeUpdatedPrediction(const ConcurrentJSLocker&)
{
for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
JSValue value = JSValue::decode(m_buckets[i]);
@@ -207,6 +207,3 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
}
} // namespace JSC
-
-#endif // ValueProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
index 5032684dd..9c083b04a 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,7 +27,7 @@
#include "ValueRecovery.h"
#include "CodeBlock.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
@@ -86,34 +86,40 @@ void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const
case InFPR:
out.print(fpr());
return;
+ case UnboxedDoubleInFPR:
+ out.print("double(", fpr(), ")");
+ return;
#if USE(JSVALUE32_64)
case InPair:
out.print("pair(", tagGPR(), ", ", payloadGPR(), ")");
return;
#endif
case DisplacedInJSStack:
- out.printf("*%d", virtualRegister().offset());
+ out.print("*", virtualRegister());
return;
case Int32DisplacedInJSStack:
- out.printf("*int32(%d)", virtualRegister().offset());
+ out.print("*int32(", virtualRegister(), ")");
return;
case Int52DisplacedInJSStack:
- out.printf("*int52(%d)", virtualRegister().offset());
+ out.print("*int52(", virtualRegister(), ")");
return;
case StrictInt52DisplacedInJSStack:
- out.printf("*strictInt52(%d)", virtualRegister().offset());
+ out.print("*strictInt52(", virtualRegister(), ")");
return;
case DoubleDisplacedInJSStack:
- out.printf("*double(%d)", virtualRegister().offset());
+ out.print("*double(", virtualRegister(), ")");
return;
case CellDisplacedInJSStack:
- out.printf("*cell(%d)", virtualRegister().offset());
+ out.print("*cell(", virtualRegister(), ")");
return;
case BooleanDisplacedInJSStack:
- out.printf("*bool(%d)", virtualRegister().offset());
+ out.print("*bool(", virtualRegister(), ")");
+ return;
+ case DirectArgumentsThatWereNotCreated:
+ out.print("DirectArguments(", nodeID(), ")");
return;
- case ArgumentsThatWereNotCreated:
- out.printf("arguments");
+ case ClonedArgumentsThatWereNotCreated:
+ out.print("ClonedArguments(", nodeID(), ")");
return;
case Constant:
out.print("[", inContext(constant(), context), "]");
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h
index 3af2c3409..c98fd2075 100644
--- a/Source/JavaScriptCore/bytecode/ValueRecovery.h
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,22 +23,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ValueRecovery_h
-#define ValueRecovery_h
+#pragma once
+#include "DFGMinifiedID.h"
#include "DataFormat.h"
#if ENABLE(JIT)
#include "GPRInfo.h"
#include "FPRInfo.h"
+#include "Reg.h"
#endif
#include "JSCJSValue.h"
#include "MacroAssembler.h"
#include "VirtualRegister.h"
-#include <wtf/Platform.h>
namespace JSC {
struct DumpContext;
+struct InlineCallFrame;
// Describes how to recover a given bytecode virtual register at a given
// code point.
@@ -54,6 +55,7 @@ enum ValueRecoveryTechnique {
InPair,
#endif
InFPR,
+ UnboxedDoubleInFPR,
// It's in the stack, but at a different location.
DisplacedInJSStack,
// It's in the stack, at a different location, and it's unboxed.
@@ -63,8 +65,9 @@ enum ValueRecoveryTechnique {
DoubleDisplacedInJSStack,
CellDisplacedInJSStack,
BooleanDisplacedInJSStack,
- // It's an Arguments object.
- ArgumentsThatWereNotCreated,
+ // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG.
+ DirectArgumentsThatWereNotCreated,
+ ClonedArgumentsThatWereNotCreated,
// It's a constant.
Constant,
// Don't know how to recover it.
@@ -80,6 +83,19 @@ public:
bool isSet() const { return m_technique != DontKnow; }
bool operator!() const { return !isSet(); }
+
+#if ENABLE(JIT)
+ static ValueRecovery inRegister(Reg reg, DataFormat dataFormat)
+ {
+ if (reg.isGPR())
+ return inGPR(reg.gpr(), dataFormat);
+
+ ASSERT(reg.isFPR());
+ return inFPR(reg.fpr(), dataFormat);
+ }
+#endif
+
+ explicit operator bool() const { return isSet(); }
static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat)
{
@@ -115,10 +131,14 @@ public:
}
#endif
- static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr)
+ static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr, DataFormat dataFormat)
{
+ ASSERT(dataFormat == DataFormatDouble || dataFormat & DataFormatJS);
ValueRecovery result;
- result.m_technique = InFPR;
+ if (dataFormat == DataFormatDouble)
+ result.m_technique = UnboxedDoubleInFPR;
+ else
+ result.m_technique = InFPR;
result.m_source.fpr = fpr;
return result;
}
@@ -168,18 +188,27 @@ public:
return result;
}
- static ValueRecovery argumentsThatWereNotCreated()
+ static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id)
{
ValueRecovery result;
- result.m_technique = ArgumentsThatWereNotCreated;
+ result.m_technique = DirectArgumentsThatWereNotCreated;
+ result.m_source.nodeID = id.bits();
return result;
}
+ static ValueRecovery clonedArgumentsThatWereNotCreated(DFG::MinifiedID id)
+ {
+ ValueRecovery result;
+ result.m_technique = ClonedArgumentsThatWereNotCreated;
+ result.m_source.nodeID = id.bits();
+ return result;
+ }
+
ValueRecoveryTechnique technique() const { return m_technique; }
bool isConstant() const { return m_technique == Constant; }
-
- bool isInRegisters() const
+
+ bool isInGPR() const
{
switch (m_technique) {
case InGPR:
@@ -188,19 +217,81 @@ public:
case UnboxedCellInGPR:
case UnboxedInt52InGPR:
case UnboxedStrictInt52InGPR:
-#if USE(JSVALUE32_64)
- case InPair:
-#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isInFPR() const
+ {
+ switch (m_technique) {
case InFPR:
+ case UnboxedDoubleInFPR:
return true;
default:
return false;
}
}
+
+ bool isInRegisters() const
+ {
+ return isInJSValueRegs() || isInGPR() || isInFPR();
+ }
+
+ bool isInJSStack() const
+ {
+ switch (m_technique) {
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ DataFormat dataFormat() const
+ {
+ switch (m_technique) {
+ case InGPR:
+ case InFPR:
+ case DisplacedInJSStack:
+ case Constant:
+#if USE(JSVALUE32_64)
+ case InPair:
+#endif
+ return DataFormatJS;
+ case UnboxedInt32InGPR:
+ case Int32DisplacedInJSStack:
+ return DataFormatInt32;
+ case UnboxedInt52InGPR:
+ case Int52DisplacedInJSStack:
+ return DataFormatInt52;
+ case UnboxedStrictInt52InGPR:
+ case StrictInt52DisplacedInJSStack:
+ return DataFormatStrictInt52;
+ case UnboxedBooleanInGPR:
+ case BooleanDisplacedInJSStack:
+ return DataFormatBoolean;
+ case UnboxedCellInGPR:
+ case CellDisplacedInJSStack:
+ return DataFormatCell;
+ case UnboxedDoubleInFPR:
+ case DoubleDisplacedInJSStack:
+ return DataFormatDouble;
+ default:
+ return DataFormatNone;
+ }
+ }
MacroAssembler::RegisterID gpr() const
{
- ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UnboxedInt52InGPR || m_technique == UnboxedStrictInt52InGPR || m_technique == UnboxedCellInGPR);
+ ASSERT(isInGPR());
return m_source.gpr;
}
@@ -216,29 +307,101 @@ public:
ASSERT(m_technique == InPair);
return m_source.pair.payloadGPR;
}
-#endif
+
+ bool isInJSValueRegs() const
+ {
+ return m_technique == InPair;
+ }
+
+#if ENABLE(JIT)
+ JSValueRegs jsValueRegs() const
+ {
+ ASSERT(isInJSValueRegs());
+ return JSValueRegs(tagGPR(), payloadGPR());
+ }
+#endif // ENABLE(JIT)
+#else
+ bool isInJSValueRegs() const
+ {
+ return isInGPR();
+ }
+#endif // USE(JSVALUE32_64)
MacroAssembler::FPRegisterID fpr() const
{
- ASSERT(m_technique == InFPR);
+ ASSERT(isInFPR());
return m_source.fpr;
}
VirtualRegister virtualRegister() const
{
- ASSERT(m_technique == DisplacedInJSStack || m_technique == Int32DisplacedInJSStack || m_technique == DoubleDisplacedInJSStack || m_technique == CellDisplacedInJSStack || m_technique == BooleanDisplacedInJSStack || m_technique == Int52DisplacedInJSStack || m_technique == StrictInt52DisplacedInJSStack);
+ ASSERT(isInJSStack());
return VirtualRegister(m_source.virtualReg);
}
+ ValueRecovery withLocalsOffset(int offset) const
+ {
+ switch (m_technique) {
+ case DisplacedInJSStack:
+ case Int32DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ case BooleanDisplacedInJSStack:
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack: {
+ ValueRecovery result;
+ result.m_technique = m_technique;
+ result.m_source.virtualReg = m_source.virtualReg + offset;
+ return result;
+ }
+
+ default:
+ return *this;
+ }
+ }
+
JSValue constant() const
{
- ASSERT(m_technique == Constant);
+ ASSERT(isConstant());
return JSValue::decode(m_source.constant);
}
+ DFG::MinifiedID nodeID() const
+ {
+ ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated);
+ return DFG::MinifiedID::fromBits(m_source.nodeID);
+ }
+
JSValue recover(ExecState*) const;
#if ENABLE(JIT)
+ template<typename Func>
+ void forEachReg(const Func& func)
+ {
+ switch (m_technique) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedBooleanInGPR:
+ case UnboxedCellInGPR:
+ case UnboxedInt52InGPR:
+ case UnboxedStrictInt52InGPR:
+ func(gpr());
+ return;
+ case InFPR:
+ case UnboxedDoubleInFPR:
+ func(fpr());
+ return;
+#if USE(JSVALUE32_64)
+ case InPair:
+ func(jsValueRegs().payloadGPR());
+ func(jsValueRegs().tagGPR());
+ return;
+#endif
+ default:
+ return;
+ }
+ }
+
void dumpInContext(PrintStream& out, DumpContext* context) const;
void dump(PrintStream& out) const;
#endif
@@ -256,9 +419,8 @@ private:
#endif
int virtualReg;
EncodedJSValue constant;
+ uintptr_t nodeID;
} m_source;
};
} // namespace JSC
-
-#endif // ValueRecovery_h
diff --git a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h b/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
deleted file mode 100644
index 4dec40495..000000000
--- a/Source/JavaScriptCore/bytecode/VariableWatchpointSet.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef VariableWatchpointSet_h
-#define VariableWatchpointSet_h
-
-#include "Watchpoint.h"
-#include "WriteBarrier.h"
-
-namespace JSC {
-
-class VariableWatchpointSet : public WatchpointSet {
- friend class LLIntOffsetsExtractor;
-public:
- VariableWatchpointSet()
- : WatchpointSet(ClearWatchpoint)
- {
- }
-
- ~VariableWatchpointSet() { }
-
- // For the purpose of deciding whether or not to watch this variable, you only need
- // to inspect inferredValue(). If this returns something other than the empty
- // value, then it means that at all future safepoints, this watchpoint set will be
- // in one of these states:
- //
- // IsWatched: in this case, the variable's value must still be the
- // inferredValue.
- //
- // IsInvalidated: in this case the variable's value may be anything but you'll
- // either notice that it's invalidated and not install the watchpoint, or
- // you will have been notified that the watchpoint was fired.
- JSValue inferredValue() const { return m_inferredValue; }
-
- void notifyWrite(JSValue value)
- {
- ASSERT(!!value);
- switch (state()) {
- case ClearWatchpoint:
- m_inferredValue = value;
- startWatching();
- return;
-
- case IsWatched:
- ASSERT(!!m_inferredValue);
- if (value == m_inferredValue)
- return;
- invalidate();
- return;
-
- case IsInvalidated:
- ASSERT(!m_inferredValue);
- return;
- }
-
- ASSERT_NOT_REACHED();
- }
-
- void invalidate()
- {
- m_inferredValue = JSValue();
- WatchpointSet::invalidate();
- }
-
- void finalizeUnconditionally()
- {
- ASSERT(!!m_inferredValue == (state() == IsWatched));
- if (!m_inferredValue)
- return;
- if (!m_inferredValue.isCell())
- return;
- JSCell* cell = m_inferredValue.asCell();
- if (Heap::isMarked(cell))
- return;
- invalidate();
- }
-
- JSValue* addressOfInferredValue() { return &m_inferredValue; }
-
-private:
- JSValue m_inferredValue;
-};
-
-} // namespace JSC
-
-#endif // VariableWatchpointSet_h
-
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
new file mode 100644
index 000000000..ec6198449
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VariableWriteFireDetail.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void VariableWriteFireDetail::dump(PrintStream& out) const
+{
+ out.print("Write to ", m_name, " in ", JSValue(m_object));
+}
+
+void VariableWriteFireDetail::touch(VM& vm, WatchpointSet* set, JSObject* object, const PropertyName& name)
+{
+ set->touch(vm, VariableWriteFireDetail(object, name));
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
new file mode 100644
index 000000000..42ffb1b59
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class JSObject;
+class PropertyName;
+
+class VariableWriteFireDetail : public FireDetail {
+public:
+ VariableWriteFireDetail(JSObject* object, const PropertyName& name)
+ : m_object(object)
+ , m_name(name)
+ {
+ }
+
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const override;
+
+ JS_EXPORT_PRIVATE static void touch(VM&, WatchpointSet*, JSObject*, const PropertyName&);
+
+private:
+ JSObject* m_object;
+ const PropertyName& m_name;
+};
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
new file mode 100644
index 000000000..57cdb62c9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void VirtualRegister::dump(PrintStream& out) const
+{
+ if (!isValid()) {
+ out.print("<invalid>");
+ return;
+ }
+
+ if (isHeader()) {
+ out.print("head", m_virtualRegister);
+ return;
+ }
+
+ if (isConstant()) {
+ out.print("const", toConstantIndex());
+ return;
+ }
+
+ if (isArgument()) {
+ if (!toArgument())
+ out.print("this");
+ else
+ out.print("arg", toArgument());
+ return;
+ }
+
+ if (isLocal()) {
+ out.print("loc", toLocal());
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h
index c63aee85f..f32e8d24f 100644
--- a/Source/JavaScriptCore/bytecode/VirtualRegister.h
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,12 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef VirtualRegister_h
-#define VirtualRegister_h
+#pragma once
+#include "BytecodeConventions.h"
#include "CallFrame.h"
-
-#include <wtf/Platform.h>
#include <wtf/PrintStream.h>
namespace JSC {
@@ -60,18 +58,51 @@ public:
bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); }
bool isLocal() const { return operandIsLocal(m_virtualRegister); }
bool isArgument() const { return operandIsArgument(m_virtualRegister); }
+ bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < CallFrameSlot::thisArgument; }
bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; }
int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); }
int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); }
int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; }
int offset() const { return m_virtualRegister; }
-
- bool operator==(const VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
- bool operator!=(const VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+ int offsetInBytes() const { return m_virtualRegister * sizeof(Register); }
+
+ bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
+ bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+ bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; }
+ bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; }
+ bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; }
+ bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; }
+
+ VirtualRegister operator+(int value) const
+ {
+ return VirtualRegister(offset() + value);
+ }
+ VirtualRegister operator-(int value) const
+ {
+ return VirtualRegister(offset() - value);
+ }
+ VirtualRegister operator+(VirtualRegister value) const
+ {
+ return VirtualRegister(offset() + value.offset());
+ }
+ VirtualRegister operator-(VirtualRegister value) const
+ {
+ return VirtualRegister(offset() - value.offset());
+ }
+ VirtualRegister& operator+=(int value)
+ {
+ return *this = *this + value;
+ }
+ VirtualRegister& operator-=(int value)
+ {
+ return *this = *this - value;
+ }
+
+ void dump(PrintStream& out) const;
private:
static const int s_invalidVirtualRegister = 0x3fffffff;
- static const int s_firstConstantRegisterIndex = 0x40000000;
+ static const int s_firstConstantRegisterIndex = FirstConstantRegisterIndex;
static int localToOperand(int local) { return -1 - local; }
static int operandToLocal(int operand) { return -1 - operand; }
@@ -94,14 +125,3 @@ inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0)
}
} // namespace JSC
-
-namespace WTF {
-
-inline void printInternal(PrintStream& out, JSC::VirtualRegister value)
-{
- out.print(value.offset());
-}
-
-} // namespace WTF
-
-#endif // VirtualRegister_h
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.cpp b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
index f29c2141c..fbe952d03 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.cpp
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,16 +26,33 @@
#include "config.h"
#include "Watchpoint.h"
-#include "LinkBuffer.h"
+#include "HeapInlines.h"
+#include "VM.h"
#include <wtf/CompilationThread.h>
-#include <wtf/PassRefPtr.h>
namespace JSC {
+void StringFireDetail::dump(PrintStream& out) const
+{
+ out.print(m_string);
+}
+
Watchpoint::~Watchpoint()
{
- if (isOnList())
+ if (isOnList()) {
+ // This will happen if we get destroyed before the set fires. That's totally a valid
+ // possibility. For example:
+ //
+ // CodeBlock has a Watchpoint on transition from structure S1. The transition never
+ // happens, but the CodeBlock gets destroyed because of GC.
remove();
+ }
+}
+
+void Watchpoint::fire(const FireDetail& detail)
+{
+ RELEASE_ASSERT(!isOnList());
+ fireInternal(detail);
}
WatchpointSet::WatchpointSet(WatchpointState state)
@@ -65,20 +82,55 @@ void WatchpointSet::add(Watchpoint* watchpoint)
m_state = IsWatched;
}
-void WatchpointSet::fireAllSlow()
+void WatchpointSet::fireAllSlow(VM& vm, const FireDetail& detail)
{
ASSERT(state() == IsWatched);
WTF::storeStoreFence();
- fireAllWatchpoints();
- m_state = IsInvalidated;
+ m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints.
+ fireAllWatchpoints(vm, detail);
WTF::storeStoreFence();
}
-void WatchpointSet::fireAllWatchpoints()
+void WatchpointSet::fireAllSlow(VM& vm, const char* reason)
{
- while (!m_set.isEmpty())
- m_set.begin()->fire();
+ fireAllSlow(vm, StringFireDetail(reason));
+}
+
+void WatchpointSet::fireAllWatchpoints(VM& vm, const FireDetail& detail)
+{
+ // In case there are any adaptive watchpoints, we need to make sure that they see that this
+ // watchpoint has been already invalidated.
+ RELEASE_ASSERT(hasBeenInvalidated());
+
+ // Firing a watchpoint may cause a GC to happen. This GC could destroy various
+ // Watchpoints themselves while they're in the process of firing. It's not safe
+ // for most Watchpoints to be destructed while they're in the middle of firing.
+ // This GC could also destroy us, and we're not in a safe state to be destroyed.
+ // The safest thing to do is to DeferGCForAWhile to prevent this GC from happening.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ while (!m_set.isEmpty()) {
+ Watchpoint* watchpoint = m_set.begin();
+ ASSERT(watchpoint->isOnList());
+
+ // Removing the Watchpoint before firing it makes it possible to implement watchpoints
+ // that add themselves to a different set when they fire. This kind of "adaptive"
+ // watchpoint can be used to track some semantic property that is more fine-graiend than
+ // what the set can convey. For example, we might care if a singleton object ever has a
+ // property called "foo". We can watch for this by checking if its Structure has "foo" and
+ // then watching its transitions. But then the watchpoint fires if any property is added.
+ // So, before the watchpoint decides to invalidate any code, it can check if it is
+ // possible to add itself to the transition watchpoint set of the singleton object's new
+ // Structure.
+ watchpoint->remove();
+ ASSERT(m_set.begin() != watchpoint);
+ ASSERT(!watchpoint->isOnList());
+
+ watchpoint->fire(detail);
+ // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's
+ // fine, because we have no use for the pointer anymore.
+ }
}
void InlineWatchpointSet::add(Watchpoint* watchpoint)
@@ -86,6 +138,11 @@ void InlineWatchpointSet::add(Watchpoint* watchpoint)
inflate()->add(watchpoint);
}
+void InlineWatchpointSet::fireAll(VM& vm, const char* reason)
+{
+ fireAll(vm, StringFireDetail(reason));
+}
+
WatchpointSet* InlineWatchpointSet::inflateSlow()
{
ASSERT(isThin());
diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.h b/Source/JavaScriptCore/bytecode/Watchpoint.h
index 8790f4e62..69e393de4 100644
--- a/Source/JavaScriptCore/bytecode/Watchpoint.h
+++ b/Source/JavaScriptCore/bytecode/Watchpoint.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,16 +23,50 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Watchpoint_h
-#define Watchpoint_h
+#pragma once
#include <wtf/Atomics.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
#include <wtf/SentinelLinkedList.h>
#include <wtf/ThreadSafeRefCounted.h>
namespace JSC {
+class FireDetail {
+ void* operator new(size_t) = delete;
+
+public:
+ FireDetail()
+ {
+ }
+
+ virtual ~FireDetail()
+ {
+ }
+
+ virtual void dump(PrintStream&) const = 0;
+};
+
+class StringFireDetail : public FireDetail {
+public:
+ StringFireDetail(const char* string)
+ : m_string(string)
+ {
+ }
+
+ void dump(PrintStream& out) const override;
+
+private:
+ const char* m_string;
+};
+
+class WatchpointSet;
+
class Watchpoint : public BasicRawSentinelNode<Watchpoint> {
+ WTF_MAKE_NONCOPYABLE(Watchpoint);
+ WTF_MAKE_FAST_ALLOCATED;
public:
Watchpoint()
{
@@ -40,10 +74,12 @@ public:
virtual ~Watchpoint();
- void fire() { fireInternal(); }
-
protected:
- virtual void fireInternal() = 0;
+ virtual void fireInternal(const FireDetail&) = 0;
+
+private:
+ friend class WatchpointSet;
+ void fire(const FireDetail&);
};
enum WatchpointState {
@@ -53,12 +89,22 @@ enum WatchpointState {
};
class InlineWatchpointSet;
+class VM;
class WatchpointSet : public ThreadSafeRefCounted<WatchpointSet> {
friend class LLIntOffsetsExtractor;
public:
- WatchpointSet(WatchpointState);
- ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+ JS_EXPORT_PRIVATE WatchpointSet(WatchpointState);
+
+ // FIXME: In many cases, it would be amazing if this *did* fire the watchpoints. I suspect that
+ // this might be hard to get right, but still, it might be awesome.
+ JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+
+ // Fast way of getting the state, which only works from the main thread.
+ WatchpointState stateOnJSThread() const
+ {
+ return static_cast<WatchpointState>(m_state);
+ }
// It is safe to call this from another thread. It may return an old
// state. Guarantees that if *first* read the state() of the thing being
@@ -98,39 +144,67 @@ public:
// set watchpoints that we believe will actually be fired.
void startWatching()
{
- ASSERT(state() != IsInvalidated);
+ ASSERT(m_state != IsInvalidated);
+ if (m_state == IsWatched)
+ return;
+ WTF::storeStoreFence();
m_state = IsWatched;
+ WTF::storeStoreFence();
}
- void fireAll()
+ void fireAll(VM& vm, const FireDetail& detail)
{
- if (state() != IsWatched)
+ if (LIKELY(m_state != IsWatched))
return;
- fireAllSlow();
+ fireAllSlow(vm, detail);
}
- void touch()
+ void fireAll(VM& vm, const char* reason)
+ {
+ if (LIKELY(m_state != IsWatched))
+ return;
+ fireAllSlow(vm, reason);
+ }
+
+ void touch(VM& vm, const FireDetail& detail)
{
if (state() == ClearWatchpoint)
startWatching();
else
- fireAll();
+ fireAll(vm, detail);
}
- void invalidate()
+ void touch(VM& vm, const char* reason)
+ {
+ touch(vm, StringFireDetail(reason));
+ }
+
+ void invalidate(VM& vm, const FireDetail& detail)
{
if (state() == IsWatched)
- fireAll();
+ fireAll(vm, detail);
m_state = IsInvalidated;
}
-
+
+ void invalidate(VM& vm, const char* reason)
+ {
+ invalidate(vm, StringFireDetail(reason));
+ }
+
+ bool isBeingWatched() const
+ {
+ return m_setIsNotEmpty;
+ }
+
int8_t* addressOfState() { return &m_state; }
+ static ptrdiff_t offsetOfState() { return OBJECT_OFFSETOF(WatchpointSet, m_state); }
int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; }
- JS_EXPORT_PRIVATE void fireAllSlow(); // Call only if you've checked isWatched.
+ JS_EXPORT_PRIVATE void fireAllSlow(VM&, const FireDetail&); // Call only if you've checked isWatched.
+ JS_EXPORT_PRIVATE void fireAllSlow(VM&, const char* reason); // Ditto.
private:
- void fireAllWatchpoints();
+ void fireAllWatchpoints(VM&, const FireDetail&);
friend class InlineWatchpointSet;
@@ -174,18 +248,34 @@ public:
freeFat();
}
+ // Fast way of getting the state, which only works from the main thread.
+ WatchpointState stateOnJSThread() const
+ {
+ uintptr_t data = m_data;
+ if (isFat(data))
+ return fat(data)->stateOnJSThread();
+ return decodeState(data);
+ }
+
+ // It is safe to call this from another thread. It may return a prior state,
+ // but that should be fine since you should only perform actions based on the
+ // state if you also add a watchpoint.
+ WatchpointState state() const
+ {
+ WTF::loadLoadFence();
+ uintptr_t data = m_data;
+ WTF::loadLoadFence();
+ if (isFat(data))
+ return fat(data)->state();
+ return decodeState(data);
+ }
+
// It is safe to call this from another thread. It may return false
// even if the set actually had been invalidated, but that ought to happen
// only in the case of races, and should be rare.
bool hasBeenInvalidated() const
{
- WTF::loadLoadFence();
- uintptr_t data = m_data;
- if (isFat(data)) {
- WTF::loadLoadFence();
- return fat(data)->hasBeenInvalidated();
- }
- return decodeState(data) == IsInvalidated;
+ return state() == IsInvalidated;
}
// Like hasBeenInvalidated(), may be called from another thread.
@@ -206,10 +296,10 @@ public:
m_data = encodeState(IsWatched);
}
- void fireAll()
+ void fireAll(VM& vm, const FireDetail& detail)
{
if (isFat()) {
- fat()->fireAll();
+ fat()->fireAll(vm, detail);
return;
}
if (decodeState(m_data) == ClearWatchpoint)
@@ -218,19 +308,77 @@ public:
WTF::storeStoreFence();
}
- void touch()
+ void invalidate(VM& vm, const FireDetail& detail)
+ {
+ if (isFat())
+ fat()->invalidate(vm, detail);
+ else
+ m_data = encodeState(IsInvalidated);
+ }
+
+ JS_EXPORT_PRIVATE void fireAll(VM&, const char* reason);
+
+ void touch(VM& vm, const FireDetail& detail)
{
if (isFat()) {
- fat()->touch();
+ fat()->touch(vm, detail);
return;
}
- if (decodeState(m_data) == ClearWatchpoint)
+ uintptr_t data = m_data;
+ if (decodeState(data) == IsInvalidated)
+ return;
+ WTF::storeStoreFence();
+ if (decodeState(data) == ClearWatchpoint)
m_data = encodeState(IsWatched);
else
m_data = encodeState(IsInvalidated);
WTF::storeStoreFence();
}
+ void touch(VM& vm, const char* reason)
+ {
+ touch(vm, StringFireDetail(reason));
+ }
+
+ // Note that for any watchpoint that is visible from the DFG, it would be incorrect to write code like:
+ //
+ // if (w.isBeingWatched())
+ // w.fireAll()
+ //
+ // Concurrently to this, the DFG could do:
+ //
+ // if (w.isStillValid())
+ // perform optimizations;
+ // if (!w.isStillValid())
+ // retry compilation;
+ //
+ // Note that the DFG algorithm is widespread, and sound, because fireAll() and invalidate() will leave
+ // the watchpoint in a !isStillValid() state. Hence, if fireAll() or invalidate() interleaved between
+ // the first isStillValid() check and the second one, then it would simply cause the DFG to retry
+ // compilation later.
+ //
+ // But, if you change some piece of state that the DFG might optimize for, but invalidate the
+ // watchpoint by doing:
+ //
+ // if (w.isBeingWatched())
+ // w.fireAll()
+ //
+ // then the DFG would never know that you invalidated state between the two checks.
+ //
+ // There are two ways to work around this:
+ //
+ // - Call fireAll() without a isBeingWatched() check. Then, the DFG will know that the watchpoint has
+ // been invalidated when it does its second check.
+ //
+ // - Do not expose the watchpoint set to the DFG directly, and have your own way of validating whether
+ // the assumptions that the DFG thread used are still valid when the DFG code is installed.
+ bool isBeingWatched() const
+ {
+ if (isFat())
+ return fat()->isBeingWatched();
+ return false;
+ }
+
private:
static const uintptr_t IsThinFlag = 1;
static const uintptr_t StateMask = 6;
@@ -247,7 +395,7 @@ private:
static uintptr_t encodeState(WatchpointState state)
{
- return (state << StateShift) | IsThinFlag;
+ return (static_cast<uintptr_t>(state) << StateShift) | IsThinFlag;
}
bool isThin() const { return isThin(m_data); }
@@ -284,6 +432,3 @@ private:
};
} // namespace JSC
-
-#endif // Watchpoint_h
-