summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/CodeBlock.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/bytecode/CodeBlock.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/bytecode/CodeBlock.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp3587
1 files changed, 2307 insertions, 1280 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index eec5b7076..44d7f83da 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2010, 2012-2017 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
*
* Redistribution and use in source and binary forms, with or without
@@ -11,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,35 +30,60 @@
#include "config.h"
#include "CodeBlock.h"
+#include "ArithProfile.h"
+#include "BasicBlockLocation.h"
#include "BytecodeGenerator.h"
+#include "BytecodeLivenessAnalysis.h"
#include "BytecodeUseDef.h"
#include "CallLinkStatus.h"
+#include "CodeBlockSet.h"
#include "DFGCapabilities.h"
#include "DFGCommon.h"
#include "DFGDriver.h"
-#include "DFGNode.h"
+#include "DFGJITCode.h"
#include "DFGWorklist.h"
#include "Debugger.h"
+#include "EvalCodeBlock.h"
+#include "FunctionCodeBlock.h"
+#include "FunctionExecutableDump.h"
+#include "GetPutInfo.h"
+#include "InlineCallFrame.h"
#include "Interpreter.h"
#include "JIT.h"
-#include "JITStubs.h"
-#include "JSActivation.h"
+#include "JITMathIC.h"
+#include "JSCInlines.h"
#include "JSCJSValue.h"
#include "JSFunction.h"
-#include "JSNameScope.h"
+#include "JSLexicalEnvironment.h"
+#include "JSModuleEnvironment.h"
+#include "LLIntData.h"
#include "LLIntEntrypoint.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
+#include "ModuleProgramCodeBlock.h"
+#include "PCToCodeOriginMap.h"
+#include "PolymorphicAccess.h"
+#include "ProfilerDatabase.h"
+#include "ProgramCodeBlock.h"
#include "ReduceWhitespace.h"
#include "Repatch.h"
-#include "RepatchBuffer.h"
#include "SlotVisitorInlines.h"
+#include "StackVisitor.h"
+#include "StructureStubInfo.h"
+#include "TypeLocationCache.h"
+#include "TypeProfiler.h"
#include "UnlinkedInstructionStream.h"
+#include "VMInlines.h"
#include <wtf/BagToHashMap.h>
#include <wtf/CommaPrinter.h>
+#include <wtf/SimpleStats.h>
#include <wtf/StringExtras.h>
#include <wtf/StringPrintStream.h>
+#include <wtf/text/UniquedStringImpl.h>
+
+#if ENABLE(JIT)
+#include "RegisterAtOffsetList.h"
+#endif
#if ENABLE(DFG_JIT)
#include "DFGOperations.h"
@@ -70,6 +95,11 @@
namespace JSC {
+const ClassInfo CodeBlock::s_info = {
+ "CodeBlock", 0, 0,
+ CREATE_METHOD_TABLE(CodeBlock)
+};
+
CString CodeBlock::inferredName() const
{
switch (codeType()) {
@@ -79,6 +109,8 @@ CString CodeBlock::inferredName() const
return "<eval>";
case FunctionCode:
return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
+ case ModuleCode:
+ return "<module>";
default:
CRASH();
return CString("", 0);
@@ -99,7 +131,7 @@ CodeBlockHash CodeBlock::hash() const
{
if (!m_hash) {
RELEASE_ASSERT(isSafeToComputeHash());
- m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
+ m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
}
return m_hash;
}
@@ -107,7 +139,7 @@ CodeBlockHash CodeBlock::hash() const
CString CodeBlock::sourceCodeForTools() const
{
if (codeType() != FunctionCode)
- return ownerExecutable()->source().toUTF8();
+ return ownerScriptExecutable()->source().toUTF8();
SourceProvider* provider = source();
FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
@@ -119,7 +151,7 @@ CString CodeBlock::sourceCodeForTools() const
unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
return toCString(
"function ",
- provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
+ provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
}
CString CodeBlock::sourceCodeOnOneLine() const
@@ -127,22 +159,42 @@ CString CodeBlock::sourceCodeOnOneLine() const
return reduceWhitespace(sourceCodeForTools());
}
-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+CString CodeBlock::hashAsStringIfPossible() const
{
if (hasHash() || isSafeToComputeHash())
- out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
- else
- out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
+ return toCString(hash());
+ return "<no-hash>";
+}
+
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+{
+ out.print(inferredName(), "#", hashAsStringIfPossible());
+ out.print(":[", RawPointer(this), "->");
+ if (!!m_alternative)
+ out.print(RawPointer(alternative()), "->");
+ out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
if (codeType() == FunctionCode)
out.print(specializationKind());
out.print(", ", instructionCount());
if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
- out.print(" (SABI)");
- if (ownerExecutable()->neverInline())
+ out.print(" (ShouldAlwaysBeInlined)");
+ if (ownerScriptExecutable()->neverInline())
out.print(" (NeverInline)");
- if (ownerExecutable()->isStrictMode())
+ if (ownerScriptExecutable()->neverOptimize())
+ out.print(" (NeverOptimize)");
+ else if (ownerScriptExecutable()->neverFTLOptimize())
+ out.print(" (NeverFTLOptimize)");
+ if (ownerScriptExecutable()->didTryToEnterInLoop())
+ out.print(" (DidTryToEnterInLoop)");
+ if (ownerScriptExecutable()->isStrictMode())
out.print(" (StrictMode)");
+ if (m_didFailJITCompilation)
+ out.print(" (JITFail)");
+ if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+ out.print(" (FTLFail)");
+ if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+ out.print(" (HadFTLReplacement)");
out.print("]");
}
@@ -151,11 +203,6 @@ void CodeBlock::dump(PrintStream& out) const
dumpAssumingJITType(out, jitType());
}
-static CString constantName(int k, JSValue value)
-{
- return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
-}
-
static CString idName(int id0, const Identifier& ident)
{
return toCString(ident.impl(), "(@id", id0, ")");
@@ -163,19 +210,16 @@ static CString idName(int id0, const Identifier& ident)
CString CodeBlock::registerName(int r) const
{
- if (r == missingThisObjectMarker())
- return "<null>";
-
if (isConstantRegisterIndex(r))
- return constantName(r, getConstant(r));
+ return constantName(r);
- if (operandIsArgument(r)) {
- if (!VirtualRegister(r).toArgument())
- return "this";
- return toCString("arg", VirtualRegister(r).toArgument());
- }
+ return toCString(VirtualRegister(r));
+}
- return toCString("loc", VirtualRegister(r).toLocal());
+CString CodeBlock::constantName(int index) const
+{
+ JSValue value = getConstant(index);
+ return toCString(value, "(", VirtualRegister(index), ")");
}
static CString regexpToSourceString(RegExp* regExp)
@@ -188,6 +232,10 @@ static CString regexpToSourceString(RegExp* regExp)
postfix[index++] = 'i';
if (regExp->multiline())
postfix[index] = 'm';
+ if (regExp->sticky())
+ postfix[index++] = 'y';
+ if (regExp->unicode())
+ postfix[index++] = 'u';
return toCString("/", regExp->pattern().impl(), postfix);
}
@@ -197,15 +245,17 @@ static CString regexpName(int re, RegExp* regexp)
return toCString(regexpToSourceString(regexp), "(@re", re, ")");
}
-NEVER_INLINE static const char* debugHookName(int debugHookID)
+NEVER_INLINE static const char* debugHookName(int debugHookType)
{
- switch (static_cast<DebugHookID>(debugHookID)) {
+ switch (static_cast<DebugHookType>(debugHookType)) {
case DidEnterCallFrame:
return "didEnterCallFrame";
case WillLeaveCallFrame:
return "willLeaveCallFrame";
case WillExecuteStatement:
return "willExecuteStatement";
+ case WillExecuteExpression:
+ return "willExecuteExpression";
case WillExecuteProgram:
return "willExecuteProgram";
case DidExecuteProgram:
@@ -251,48 +301,20 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
case op_get_by_id:
op = "get_by_id";
break;
- case op_get_by_id_out_of_line:
- op = "get_by_id_out_of_line";
- break;
- case op_get_by_id_self:
- op = "get_by_id_self";
+ case op_get_by_id_proto_load:
+ op = "get_by_id_proto_load";
break;
- case op_get_by_id_proto:
- op = "get_by_id_proto";
- break;
- case op_get_by_id_chain:
- op = "get_by_id_chain";
- break;
- case op_get_by_id_getter_self:
- op = "get_by_id_getter_self";
- break;
- case op_get_by_id_getter_proto:
- op = "get_by_id_getter_proto";
- break;
- case op_get_by_id_getter_chain:
- op = "get_by_id_getter_chain";
- break;
- case op_get_by_id_custom_self:
- op = "get_by_id_custom_self";
- break;
- case op_get_by_id_custom_proto:
- op = "get_by_id_custom_proto";
- break;
- case op_get_by_id_custom_chain:
- op = "get_by_id_custom_chain";
- break;
- case op_get_by_id_generic:
- op = "get_by_id_generic";
+ case op_get_by_id_unset:
+ op = "get_by_id_unset";
break;
case op_get_array_length:
op = "array_length";
break;
- case op_get_string_length:
- op = "string_length";
- break;
default:
RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
op = 0;
+#endif
}
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -302,22 +324,19 @@ void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location,
it += 4; // Increment up to the value profiler.
}
-#if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
-static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
+static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
{
if (!structure)
return;
out.printf("%s = %p", name, structure);
- PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
+ PropertyOffset offset = structure->getConcurrently(ident.impl());
if (offset != invalidOffset)
out.printf(" (offset = %d)", offset);
}
-#endif
-#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
-static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
+static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
{
out.printf("chain = %p: [", chain);
bool first = true;
@@ -328,11 +347,10 @@ static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain,
first = false;
else
out.printf(", ");
- dumpStructure(out, "struct", exec, currentStructure->get(), ident);
+ dumpStructure(out, "struct", currentStructure->get(), ident);
}
out.printf("]");
}
-#endif
void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
{
@@ -342,125 +360,131 @@ void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int l
UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
-#if ENABLE(LLINT)
if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
out.printf(" llint(array_length)");
- else if (Structure* structure = instruction[4].u.structure.get()) {
+ else if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
out.printf(" llint(");
- dumpStructure(out, "struct", exec, structure, ident);
+ dumpStructure(out, "struct", structure, ident);
out.printf(")");
+ if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_by_id_proto_load)
+ out.printf(" proto(%p)", instruction[6].u.pointer);
}
-#endif
#if ENABLE(JIT)
if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
StructureStubInfo& stubInfo = *stubPtr;
- if (stubInfo.seen) {
- out.printf(" jit(");
-
- Structure* baseStructure = 0;
- Structure* prototypeStructure = 0;
- StructureChain* chain = 0;
- PolymorphicAccessStructureList* structureList = 0;
- int listSize = 0;
-
- switch (stubInfo.accessType) {
- case access_get_by_id_self:
- out.printf("self");
- baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
- break;
- case access_get_by_id_proto:
- out.printf("proto");
- baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
- prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
- break;
- case access_get_by_id_chain:
- out.printf("chain");
- baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
- chain = stubInfo.u.getByIdChain.chain.get();
- break;
- case access_get_by_id_self_list:
- out.printf("self_list");
- structureList = stubInfo.u.getByIdSelfList.structureList;
- listSize = stubInfo.u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- out.printf("proto_list");
- structureList = stubInfo.u.getByIdProtoList.structureList;
- listSize = stubInfo.u.getByIdProtoList.listSize;
- break;
- case access_unset:
- out.printf("unset");
- break;
- case access_get_by_id_generic:
- out.printf("generic");
- break;
- case access_get_array_length:
- out.printf("array_length");
- break;
- case access_get_string_length:
- out.printf("string_length");
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-
- if (baseStructure) {
- out.printf(", ");
- dumpStructure(out, "struct", exec, baseStructure, ident);
- }
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
- if (prototypeStructure) {
- out.printf(", ");
- dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
- }
+ Structure* baseStructure = nullptr;
+ PolymorphicAccess* stub = nullptr;
- if (chain) {
- out.printf(", ");
- dumpChain(out, exec, chain, ident);
- }
+ switch (stubInfo.cacheType) {
+ case CacheType::GetByIdSelf:
+ out.printf("self");
+ baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
+ break;
+ case CacheType::Stub:
+ out.printf("stub");
+ stub = stubInfo.u.stub;
+ break;
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ case CacheType::ArrayLength:
+ out.printf("ArrayLength");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
- if (structureList) {
- out.printf(", list = %p: [", structureList);
- for (int i = 0; i < listSize; ++i) {
- if (i)
- out.printf(", ");
- out.printf("(");
- dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
- if (structureList->list[i].isChain) {
- if (structureList->list[i].u.chain.get()) {
- out.printf(", ");
- dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
- }
- } else {
- if (structureList->list[i].u.proto.get()) {
- out.printf(", ");
- dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
- }
- }
- out.printf(")");
- }
- out.printf("]");
+ if (baseStructure) {
+ out.printf(", ");
+ dumpStructure(out, "struct", baseStructure, ident);
+ }
+
+ if (stub)
+ out.print(", ", *stub);
+
+ out.printf(")");
+ }
+#else
+ UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
+{
+ Instruction* instruction = instructions().begin() + location;
+
+ const Identifier& ident = identifier(instruction[2].u.operand);
+
+ UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+
+ out.print(", ", instruction[8].u.putByIdFlags);
+
+ if (StructureID structureID = instruction[4].u.structureID) {
+ Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+ out.print(" llint(");
+ if (StructureID newStructureID = instruction[6].u.structureID) {
+ Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
+ dumpStructure(out, "prev", structure, ident);
+ out.print(", ");
+ dumpStructure(out, "next", newStructure, ident);
+ if (StructureChain* chain = instruction[7].u.structureChain.get()) {
+ out.print(", ");
+ dumpChain(out, chain, ident);
}
- out.printf(")");
+ } else
+ dumpStructure(out, "struct", structure, ident);
+ out.print(")");
+ }
+
+#if ENABLE(JIT)
+ if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+ StructureStubInfo& stubInfo = *stubPtr;
+ if (stubInfo.resetByGC)
+ out.print(" (Reset By GC)");
+
+ out.printf(" jit(");
+
+ switch (stubInfo.cacheType) {
+ case CacheType::PutByIdReplace:
+ out.print("replace, ");
+ dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
+ break;
+ case CacheType::Stub: {
+ out.print("stub, ", *stubInfo.u.stub);
+ break;
+ }
+ case CacheType::Unset:
+ out.printf("unset");
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
+ out.printf(")");
}
#else
UNUSED_PARAM(map);
#endif
}
-void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
{
int dst = (++it)->u.operand;
int func = (++it)->u.operand;
int argCount = (++it)->u.operand;
int registerOffset = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, op);
- out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
+ out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset);
+ out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")");
if (cacheDumpMode == DumpCaches) {
-#if ENABLE(LLINT)
LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
if (callLinkInfo->lastSeenCallee) {
out.printf(
@@ -468,17 +492,21 @@ void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, con
callLinkInfo->lastSeenCallee.get(),
callLinkInfo->lastSeenCallee->executable());
}
-#endif
#if ENABLE(JIT)
- if (numberOfCallLinkInfos()) {
- JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
+ if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
+ JSFunction* target = info->lastSeenCallee();
if (target)
out.printf(" jit(%p, exec %p)", target, target->executable());
}
+
+ if (jitType() != JITCode::FTLJIT)
+ out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
+#else
+ UNUSED_PARAM(map);
#endif
- out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
}
++it;
+ ++it;
dumpArrayProfiling(out, it, hasPrintedProfiling);
dumpValueProfiling(out, it, hasPrintedProfiling);
}
@@ -493,6 +521,31 @@ void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location,
it += 5;
}
+void CodeBlock::dumpSource()
+{
+ dumpSource(WTF::dataFile());
+}
+
+void CodeBlock::dumpSource(PrintStream& out)
+{
+ ScriptExecutable* executable = ownerScriptExecutable();
+ if (executable->isFunctionExecutable()) {
+ FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
+ StringView source = functionExecutable->source().provider()->getRange(
+ functionExecutable->parametersStartOffset(),
+ functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+
+ out.print("function ", inferredName(), source);
+ return;
+ }
+ out.print(executable->source().view());
+}
+
+void CodeBlock::dumpBytecode()
+{
+ dumpBytecode(WTF::dataFile());
+}
+
void CodeBlock::dumpBytecode(PrintStream& out)
{
// We only use the ExecState* for things that don't actually lead to JS execution,
@@ -509,34 +562,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
static_cast<unsigned long>(instructions().size()),
static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
- m_numParameters, m_numCalleeRegisters, m_numVars);
- if (symbolTable() && symbolTable()->captureCount()) {
- out.printf(
- "; %d captured var(s) (from r%d to r%d, inclusive)",
- symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
- }
- if (usesArguments()) {
- out.printf(
- "; uses arguments, in r%d, r%d",
- argumentsRegister().offset(),
- unmodifiedArgumentsRegister(argumentsRegister()).offset());
- }
- if (needsFullScopeChain() && codeType() == FunctionCode)
- out.printf("; activation in r%d", activationRegister().offset());
+ m_numParameters, m_numCalleeLocals, m_numVars);
+ out.print("; scope at ", scopeRegister());
out.printf("\n");
StubInfoMap stubInfos;
-#if ENABLE(JIT)
- {
- ConcurrentJITLocker locker(m_lock);
- getStubInfoMap(locker, stubInfos);
- }
-#endif
+ CallLinkInfoMap callLinkInfos;
+ getStubInfoMap(stubInfos);
+ getCallLinkInfoMap(callLinkInfos);
const Instruction* begin = instructions().begin();
const Instruction* end = instructions().end();
for (const Instruction* it = begin; it != end; ++it)
- dumpBytecode(out, exec, begin, it, stubInfos);
+ dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
if (numberOfIdentifiers()) {
out.printf("\nIdentifiers:\n");
@@ -551,7 +589,19 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf("\nConstants:\n");
size_t i = 0;
do {
- out.printf(" k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
+ const char* sourceCodeRepresentationDescription = nullptr;
+ switch (m_constantsSourceCodeRepresentation[i]) {
+ case SourceCodeRepresentation::Double:
+ sourceCodeRepresentationDescription = ": in source as double";
+ break;
+ case SourceCodeRepresentation::Integer:
+ sourceCodeRepresentationDescription = ": in source as integer";
+ break;
+ case SourceCodeRepresentation::Other:
+ sourceCodeRepresentationDescription = "";
+ break;
+ }
+ out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
++i;
} while (i < m_constantRegisters.size());
}
@@ -565,14 +615,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
} while (i < count);
}
- if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
- out.printf("\nException Handlers:\n");
- unsigned i = 0;
- do {
- out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
- ++i;
- } while (i < m_rareData->m_exceptionHandlers.size());
- }
+ dumpExceptionHandlers(out);
if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
out.printf("Switch Jump Tables:\n");
@@ -598,7 +641,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf(" %1d = {\n", i);
StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
- out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
+ out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
out.printf(" }\n");
++i;
} while (i < m_rareData->m_stringSwitchJumpTables.size());
@@ -607,6 +650,20 @@ void CodeBlock::dumpBytecode(PrintStream& out)
out.printf("\n");
}
+void CodeBlock::dumpExceptionHandlers(PrintStream& out)
+{
+ if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+ out.printf("\nException Handlers:\n");
+ unsigned i = 0;
+ do {
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+ out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
+ i + 1, handler.start, handler.end, handler.target, handler.typeName());
+ ++i;
+ } while (i < m_rareData->m_exceptionHandlers.size());
+ }
+}
+
void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
{
if (hasPrintedProfiling) {
@@ -620,7 +677,7 @@ void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
++it;
CString description = it->u.profile->briefDescription(locker);
@@ -632,7 +689,7 @@ void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, boo
void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
++it;
if (!it->u.arrayProfile)
@@ -653,52 +710,113 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase
out.print(name, profile->m_counter);
}
-void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
+void CodeBlock::dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)
+{
+ if (!profile)
+ return;
+
+ beginDumpProfiling(out, hasPrintedProfiling);
+ out.print("results: ", *profile);
+}
+
+void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+{
+ out.printf("[%4d] %-17s ", location, op);
+}
+
+void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+{
+ printLocationAndOp(out, exec, location, it, op);
+ out.printf("%s", registerName(operand).data());
+}
+
+void CodeBlock::dumpBytecode(
+ PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
int location = it - begin;
bool hasPrintedProfiling = false;
- switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+ OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
+ switch (opcode) {
case op_enter: {
printLocationAndOp(out, exec, location, it, "enter");
break;
}
- case op_touch_entry: {
- printLocationAndOp(out, exec, location, it, "touch_entry");
+ case op_get_scope: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
break;
}
- case op_create_activation: {
+ case op_create_direct_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
+ printLocationAndOp(out, exec, location, it, "create_direct_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_create_arguments: {
+ case op_create_scoped_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_init_lazy_reg: {
+ case op_create_cloned_arguments: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
+ printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
+ out.printf("%s", registerName(r0).data());
break;
}
- case op_get_callee: {
+ case op_argument_count: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
- ++it;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "argument_count", r0);
+ break;
+ }
+ case op_get_argument: {
+ int r0 = (++it)->u.operand;
+ int index = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "argument", r0);
+ out.printf(", %d", index);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_create_rest: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ printLocationAndOp(out, exec, location, it, "create_rest");
+ out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
+ out.printf("ArgumentsOffset: %u", argumentOffset);
+ break;
+ }
+ case op_get_rest_length: {
+ int r0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_rest_length");
+ out.printf("%s, ", registerName(r0).data());
+ unsigned argumentOffset = (++it)->u.unsignedValue;
+ out.printf("ArgumentsOffset: %u", argumentOffset);
break;
}
case op_create_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
unsigned inferredInlineCapacity = (++it)->u.operand;
+ unsigned cachedFunction = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "create_this");
- out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
+ out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
break;
}
case op_to_this: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
- ++it; // Skip value profile.
+ Structure* structure = (++it)->u.structure.get();
+ if (structure)
+ out.print(", cache(struct = ", RawPointer(structure), ")");
+ out.print(", ", (++it)->u.toThisStatus);
+ break;
+ }
+ case op_check_tdz: {
+ int r0 = (++it)->u.operand;
+ printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
break;
}
case op_new_object: {
@@ -718,6 +836,30 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
++it; // Skip array allocation profile.
break;
}
+ case op_new_array_with_spread: {
+ int dst = (++it)->u.operand;
+ int argv = (++it)->u.operand;
+ int argc = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_array_with_spread");
+ out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc);
+ unsigned bitVectorIndex = (++it)->u.unsignedValue;
+ const BitVector& bitVector = m_unlinkedCode->bitVector(bitVectorIndex);
+ out.print("BitVector:", bitVectorIndex, ":");
+ for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) {
+ if (bitVector.get(i))
+ out.print("1");
+ else
+ out.print("0");
+ }
+ break;
+ }
+ case op_spread: {
+ int dst = (++it)->u.operand;
+ int arg = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "spread");
+ out.printf("%s, %s", registerName(dst).data(), registerName(arg).data());
+ break;
+ }
case op_new_array_with_size: {
int dst = (++it)->u.operand;
int length = (++it)->u.operand;
@@ -753,12 +895,20 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_captured_mov: {
+ case op_profile_type: {
int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "captured_mov");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
++it;
+ ++it;
+ ++it;
+ ++it;
+ printLocationAndOp(out, exec, location, it, "op_profile_type");
+ out.printf("%s", registerName(r0).data());
+ break;
+ }
+ case op_profile_control_flow: {
+ BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
+ printLocationAndOp(out, exec, location, it, "profile_control_flow");
+ out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
break;
}
case op_not: {
@@ -817,10 +967,16 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_to_number: {
printUnaryOp(out, exec, location, it, "to_number");
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_to_string: {
+ printUnaryOp(out, exec, location, it, "to_string");
break;
}
case op_negate: {
printUnaryOp(out, exec, location, it, "negate");
+ ++it; // op_negate has an extra operand for the ArithProfile.
break;
}
case op_add: {
@@ -842,6 +998,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printBinaryOp(out, exec, location, it, "mod");
break;
}
+ case op_pow: {
+ printBinaryOp(out, exec, location, it, "pow");
+ break;
+ }
case op_sub: {
printBinaryOp(out, exec, location, it, "sub");
++it;
@@ -874,13 +1034,12 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
++it;
break;
}
- case op_check_has_instance: {
+ case op_overrides_has_instance: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- int offset = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "check_has_instance");
- out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
+ printLocationAndOp(out, exec, location, it, "overrides_has_instance");
+ out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
case op_instanceof: {
@@ -891,6 +1050,15 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
break;
}
+ case op_instanceof_custom: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "instanceof_custom");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+ break;
+ }
case op_unsigned: {
printUnaryOp(out, exec, location, it, "unsigned");
break;
@@ -899,6 +1067,10 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "typeof");
break;
}
+ case op_is_empty: {
+ printUnaryOp(out, exec, location, it, "is_empty");
+ break;
+ }
case op_is_undefined: {
printUnaryOp(out, exec, location, it, "is_undefined");
break;
@@ -911,106 +1083,155 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printUnaryOp(out, exec, location, it, "is_number");
break;
}
- case op_is_string: {
- printUnaryOp(out, exec, location, it, "is_string");
+ case op_is_cell_with_type: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int type = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "is_cell_with_type");
+ out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type);
break;
}
case op_is_object: {
printUnaryOp(out, exec, location, it, "is_object");
break;
}
+ case op_is_object_or_null: {
+ printUnaryOp(out, exec, location, it, "is_object_or_null");
+ break;
+ }
case op_is_function: {
printUnaryOp(out, exec, location, it, "is_function");
break;
}
case op_in: {
printBinaryOp(out, exec, location, it, "in");
+ dumpArrayProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_init_global_const_nop: {
- printLocationAndOp(out, exec, location, it, "init_global_const_nop");
- it++;
- it++;
- it++;
- it++;
- break;
- }
- case op_init_global_const: {
- WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
+ case op_try_get_by_id: {
int r0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "init_global_const");
- out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
- it++;
- it++;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "try_get_by_id");
+ out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
- case op_get_array_length:
- case op_get_string_length: {
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
+ case op_get_array_length: {
printGetByIdOp(out, exec, location, it);
- printGetByIdCacheStatus(out, exec, location, map);
+ printGetByIdCacheStatus(out, exec, location, stubInfos);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_get_by_id_with_this: {
+ printLocationAndOp(out, exec, location, it, "get_by_id_with_this");
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data());
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_arguments_length: {
- printUnaryOp(out, exec, location, it, "get_arguments_length");
- it++;
+ case op_get_by_val_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_by_val_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_put_by_id: {
printPutByIdOp(out, exec, location, it, "put_by_id");
+ printPutByIdCacheStatus(out, location, stubInfos);
break;
}
- case op_put_by_id_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
+ case op_put_by_id_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_by_id_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data());
break;
}
- case op_put_by_id_replace: {
- printPutByIdOp(out, exec, location, it, "put_by_id_replace");
+ case op_put_by_val_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_by_val_with_this");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
break;
}
- case op_put_by_id_transition: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition");
+ case op_put_getter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_direct: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
+ case op_put_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_setter_by_id");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
break;
}
- case op_put_by_id_transition_direct_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
+ case op_put_getter_setter_by_id: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
+ out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
break;
}
- case op_put_by_id_transition_normal: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
+ case op_put_getter_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_getter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
- case op_put_by_id_transition_normal_out_of_line: {
- printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
+ case op_put_setter_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int n0 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_setter_by_val");
+ out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
break;
}
- case op_put_by_id_generic: {
- printPutByIdOp(out, exec, location, it, "put_by_id_generic");
+ case op_define_data_property: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "define_data_property");
+ out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
break;
}
- case op_put_getter_setter: {
+ case op_define_accessor_property: {
int r0 = (++it)->u.operand;
- int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "put_getter_setter");
- out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
+ int r3 = (++it)->u.operand;
+ int r4 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "define_accessor_property");
+ out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data());
break;
}
case op_del_by_id: {
@@ -1031,27 +1252,6 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_get_argument_by_val: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_argument_by_val");
- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
- ++it;
- dumpValueProfiling(out, it, hasPrintedProfiling);
- break;
- }
- case op_get_by_pname: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- int r2 = (++it)->u.operand;
- int r3 = (++it)->u.operand;
- int r4 = (++it)->u.operand;
- int r5 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "get_by_pname");
- out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
- break;
- }
case op_put_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
@@ -1114,6 +1314,7 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
int offset = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "jneq_ptr");
out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
+ ++it;
break;
}
case op_jless: {
@@ -1184,6 +1385,23 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
printLocationAndOp(out, exec, location, it, "loop_hint");
break;
}
+ case op_watchdog: {
+ printLocationAndOp(out, exec, location, it, "watchdog");
+ break;
+ }
+ case op_log_shadow_chicken_prologue: {
+ int r0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
+ out.printf("%s", registerName(r0).data());
+ break;
+ }
+ case op_log_shadow_chicken_tail: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+ break;
+ }
case op_switch_imm: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
@@ -1210,73 +1428,108 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_new_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- int shouldCheck = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func");
- out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
- case op_new_captured_func: {
+ case op_new_generator_func: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "new_captured_func");
- out.printf("%s, f%d", registerName(r0).data(), f0);
- ++it;
+ printLocationAndOp(out, exec, location, it, "new_generator_func");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_async_func: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_async_func");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
break;
}
case op_new_func_exp: {
int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
int f0 = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "new_func_exp");
- out.printf("%s, f%d", registerName(r0).data(), f0);
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_generator_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_new_async_func_exp: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "new_async_func_exp");
+ out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+ break;
+ }
+ case op_set_function_name: {
+ int funcReg = (++it)->u.operand;
+ int nameReg = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "set_function_name");
+ out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
break;
}
case op_call: {
- printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+ break;
+ }
+ case op_tail_call: {
+ printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_call_eval: {
- printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
- case op_call_varargs: {
+
+ case op_construct_varargs:
+ case op_call_varargs:
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments: {
int result = (++it)->u.operand;
int callee = (++it)->u.operand;
int thisValue = (++it)->u.operand;
int arguments = (++it)->u.operand;
int firstFreeRegister = (++it)->u.operand;
+ int varArgOffset = (++it)->u.operand;
++it;
- printLocationAndOp(out, exec, location, it, "call_varargs");
- out.printf("%s, %s, %s, %s, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
+ const char* opName;
+ if (opcode == op_call_varargs)
+ opName = "call_varargs";
+ else if (opcode == op_construct_varargs)
+ opName = "construct_varargs";
+ else if (opcode == op_tail_call_varargs)
+ opName = "tail_call_varargs";
+ else if (opcode == op_tail_call_forward_arguments)
+ opName = "tail_call_forward_arguments";
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ printLocationAndOp(out, exec, location, it, opName);
+ out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
- case op_tear_off_activation: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
- break;
- }
- case op_tear_off_arguments: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "tear_off_arguments");
- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
+
case op_ret: {
int r0 = (++it)->u.operand;
printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
break;
}
- case op_ret_object_or_this: {
- int r0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "constructor_ret");
- out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
- break;
- }
case op_construct: {
- printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
+ printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
break;
}
case op_strcat: {
@@ -1294,49 +1547,120 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
- case op_get_pnames: {
- int r0 = it[1].u.operand;
- int r1 = it[2].u.operand;
- int r2 = it[3].u.operand;
- int r3 = it[4].u.operand;
- int offset = it[5].u.operand;
- printLocationAndOp(out, exec, location, it, "get_pnames");
- out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_get_pnames) - 1;
+ case op_get_enumerable_length: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
break;
}
- case op_next_pname: {
- int dest = it[1].u.operand;
+ case op_has_indexed_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ ArrayProfile* arrayProfile = it[4].u.arrayProfile;
+ printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
+ out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
+ it += OPCODE_LENGTH(op_has_indexed_property) - 1;
+ break;
+ }
+ case op_has_structure_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int enumerator = it[4].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_structure_property");
+ out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
+ it += OPCODE_LENGTH(op_has_structure_property) - 1;
+ break;
+ }
+ case op_has_generic_property: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_has_generic_property");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
+ it += OPCODE_LENGTH(op_has_generic_property) - 1;
+ break;
+ }
+ case op_get_direct_pname: {
+ int dst = it[1].u.operand;
+ int base = it[2].u.operand;
+ int propertyName = it[3].u.operand;
+ int index = it[4].u.operand;
+ int enumerator = it[5].u.operand;
+ ValueProfile* profile = it[6].u.profile;
+ printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
+ out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
+ it += OPCODE_LENGTH(op_get_direct_pname) - 1;
+ break;
+
+ }
+ case op_get_property_enumerator: {
+ int dst = it[1].u.operand;
int base = it[2].u.operand;
- int i = it[3].u.operand;
- int size = it[4].u.operand;
- int iter = it[5].u.operand;
- int offset = it[6].u.operand;
- printLocationAndOp(out, exec, location, it, "next_pname");
- out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
- it += OPCODE_LENGTH(op_next_pname) - 1;
+ printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
+ out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+ it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
+ break;
+ }
+ case op_enumerator_structure_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
+ break;
+ }
+ case op_enumerator_generic_pname: {
+ int dst = it[1].u.operand;
+ int enumerator = it[2].u.operand;
+ int index = it[3].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
+ break;
+ }
+ case op_to_index_string: {
+ int dst = it[1].u.operand;
+ int index = it[2].u.operand;
+ printLocationAndOp(out, exec, location, it, "op_to_index_string");
+ out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
+ it += OPCODE_LENGTH(op_to_index_string) - 1;
break;
}
case op_push_with_scope: {
- int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
+ int dst = (++it)->u.operand;
+ int newScope = (++it)->u.operand;
+ int currentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "push_with_scope");
+ out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
break;
}
- case op_pop_scope: {
- printLocationAndOp(out, exec, location, it, "pop_scope");
+ case op_get_parent_scope: {
+ int dst = (++it)->u.operand;
+ int parentScope = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_parent_scope");
+ out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
break;
}
- case op_push_name_scope: {
- int id0 = (++it)->u.operand;
- int r1 = (++it)->u.operand;
- unsigned attributes = (++it)->u.operand;
- printLocationAndOp(out, exec, location, it, "push_name_scope");
- out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
+ case op_create_lexical_environment: {
+ int dst = (++it)->u.operand;
+ int scope = (++it)->u.operand;
+ int symbolTable = (++it)->u.operand;
+ int initialValue = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "create_lexical_environment");
+ out.printf("%s, %s, %s, %s",
+ registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
break;
}
case op_catch: {
int r0 = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "catch");
+ out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
break;
}
case op_throw: {
@@ -1346,26 +1670,24 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_throw_static_error: {
int k0 = (++it)->u.operand;
- int k1 = (++it)->u.operand;
+ ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue);
printLocationAndOp(out, exec, location, it, "throw_static_error");
- out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
+ out.printf("%s, ", constantName(k0).data());
+ out.print(k1);
break;
}
case op_debug: {
- int debugHookID = (++it)->u.operand;
+ int debugHookType = (++it)->u.operand;
int hasBreakpointFlag = (++it)->u.operand;
printLocationAndOp(out, exec, location, it, "debug");
- out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
+ out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag);
break;
}
- case op_profile_will_call: {
- int function = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
- break;
- }
- case op_profile_did_call: {
- int function = (++it)->u.operand;
- printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
+ case op_assert: {
+ int condition = (++it)->u.operand;
+ int line = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "assert");
+ out.printf("%s, %d", registerName(condition).data(), line);
break;
}
case op_end: {
@@ -1375,53 +1697,81 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
}
case op_resolve_scope: {
int r0 = (++it)->u.operand;
+ int scope = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
- ++it; // depth
+ ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
+ int depth = (++it)->u.operand;
+ void* pointer = (++it)->u.pointer;
printLocationAndOp(out, exec, location, it, "resolve_scope");
- out.printf("%s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
- ++it;
+ out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
break;
}
case op_get_from_scope: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
- ++it; // Skip value profile.
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "get_from_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
+ out.print(registerName(r0), ", ", registerName(r1));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
break;
}
case op_put_to_scope: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- int resolveModeAndType = (++it)->u.operand;
+ GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
++it; // Structure
- ++it; // Operand
+ int operand = (++it)->u.operand; // Operand
printLocationAndOp(out, exec, location, it, "put_to_scope");
- out.printf("%s, %s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
+ out.print(registerName(r0));
+ if (static_cast<unsigned>(id0) == UINT_MAX)
+ out.print(", anonymous");
+ else
+ out.print(", ", idName(id0, identifier(id0)));
+ out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
+ break;
+ }
+ case op_get_from_arguments: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "get_from_arguments");
+ out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
+ dumpValueProfiling(out, it, hasPrintedProfiling);
+ break;
+ }
+ case op_put_to_arguments: {
+ int r0 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printLocationAndOp(out, exec, location, it, "put_to_arguments");
+ out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
break;
}
-#if ENABLE(LLINT_C_LOOP)
default:
RELEASE_ASSERT_NOT_REACHED();
-#endif
}
dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
- dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+ {
+ dumpArithProfile(out, arithProfileForBytecodeOffset(location), hasPrintedProfiling);
+ }
#if ENABLE(DFG_JIT)
Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
if (!exitSites.isEmpty()) {
out.print(" !! frequent exits: ");
CommaPrinter comma;
- for (unsigned i = 0; i < exitSites.size(); ++i)
- out.print(comma, exitSites[i].kind());
+ for (auto& exitSite : exitSites)
+ out.print(comma, exitSite.kind(), " ", exitSite.jitType());
}
#else // ENABLE(DFG_JIT)
UNUSED_PARAM(location);
@@ -1429,11 +1779,13 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
out.print("\n");
}
-void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
+void CodeBlock::dumpBytecode(
+ PrintStream& out, unsigned bytecodeOffset,
+ const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
{
ExecState* exec = m_globalObject->globalExec();
const Instruction* it = instructions().begin() + bytecodeOffset;
- dumpBytecode(out, exec, instructions().begin(), it);
+ dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
}
#define FOR_EACH_MEMBER_VECTOR(macro) \
@@ -1444,63 +1796,84 @@ void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
macro(functionExpressions) \
macro(constantRegisters)
-#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
- macro(regexps) \
- macro(functions) \
- macro(exceptionHandlers) \
- macro(switchJumpTables) \
- macro(stringSwitchJumpTables) \
- macro(evalCodeCache) \
- macro(expressionInfo) \
- macro(lineInfo) \
- macro(callReturnIndexVector)
-
template<typename T>
static size_t sizeInBytes(const Vector<T>& vector)
{
return vector.capacity() * sizeof(T);
}
-CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
- : m_globalObject(other.m_globalObject)
- , m_heap(other.m_heap)
- , m_numCalleeRegisters(other.m_numCalleeRegisters)
+namespace {
+
+class PutToScopeFireDetail : public FireDetail {
+public:
+ PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
+ : m_codeBlock(codeBlock)
+ , m_ident(ident)
+ {
+ }
+
+ void dump(PrintStream& out) const override
+ {
+ out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
+ }
+
+private:
+ CodeBlock* m_codeBlock;
+ const Identifier& m_ident;
+};
+
+} // anonymous namespace
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
+ : JSCell(*vm, structure)
+ , m_globalObject(other.m_globalObject)
+ , m_numCalleeLocals(other.m_numCalleeLocals)
, m_numVars(other.m_numVars)
- , m_isConstructor(other.m_isConstructor)
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+ , m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(other.m_isConstructor)
+ , m_isStrictMode(other.m_isStrictMode)
+ , m_codeType(other.m_codeType)
+ , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
+ , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+ , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
, m_vm(other.m_vm)
, m_instructions(other.m_instructions)
, m_thisRegister(other.m_thisRegister)
- , m_argumentsRegister(other.m_argumentsRegister)
- , m_activationRegister(other.m_activationRegister)
- , m_isStrictMode(other.m_isStrictMode)
- , m_needsActivation(other.m_needsActivation)
+ , m_scopeRegister(other.m_scopeRegister)
+ , m_hash(other.m_hash)
, m_source(other.m_source)
, m_sourceOffset(other.m_sourceOffset)
, m_firstLineColumnOffset(other.m_firstLineColumnOffset)
- , m_codeType(other.m_codeType)
, m_constantRegisters(other.m_constantRegisters)
+ , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
, m_functionDecls(other.m_functionDecls)
, m_functionExprs(other.m_functionExprs)
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
- , m_hash(other.m_hash)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
-
- if (SymbolTable* symbolTable = other.symbolTable())
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
-
+ m_visitWeaklyHasBeenCalled = false;
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
+
setNumParameters(other.numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
+{
+ Base::finishCreation(vm);
+
optimizeAfterWarmUp();
jitAfterWarmUp();
@@ -1513,87 +1886,94 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
}
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
+ heap()->m_codeBlocks->add(this);
}
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
- : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
- , m_heap(&m_globalObject->vm().heap)
- , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+ : JSCell(*vm, structure)
+ , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
+ , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
, m_numVars(unlinkedCodeBlock->m_numVars)
- , m_isConstructor(unlinkedCodeBlock->isConstructor())
, m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+ , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+ , m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
- , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
+ , m_hasBeenCompiledWithFTL(false)
+ , m_isConstructor(unlinkedCodeBlock->isConstructor())
+ , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+ , m_codeType(unlinkedCodeBlock->codeType())
+ , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
+ , m_hasDebuggerStatement(false)
, m_steppingMode(SteppingModeDisabled)
, m_numBreakpoints(0)
- , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
+ , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
, m_vm(unlinkedCodeBlock->vm())
, m_thisRegister(unlinkedCodeBlock->thisRegister())
- , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
- , m_activationRegister(unlinkedCodeBlock->activationRegister())
- , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
- , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
- , m_source(sourceProvider)
+ , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
+ , m_source(WTFMove(sourceProvider))
, m_sourceOffset(sourceOffset)
, m_firstLineColumnOffset(firstLineColumnOffset)
- , m_codeType(unlinkedCodeBlock->codeType())
, m_osrExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
-#if ENABLE(JIT)
- , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
-#endif
+ , m_creationTime(std::chrono::steady_clock::now())
{
- ASSERT(m_heap->isDeferred());
+ m_visitWeaklyHasBeenCalled = false;
+
+ ASSERT(heap()->isDeferred());
+ ASSERT(m_scopeRegister.isLocal());
- bool didCloneSymbolTable = false;
-
- if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
- if (codeType() == FunctionCode && symbolTable->captureCount()) {
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->clone(*m_vm));
- didCloneSymbolTable = true;
- } else
- m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
- }
-
ASSERT(m_source);
setNumParameters(unlinkedCodeBlock->numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+ JSScope* scope)
+{
+ Base::finishCreation(vm);
- setConstantRegisters(unlinkedCodeBlock->constantRegisters());
+ if (vm.typeProfiler() || vm.controlFlowProfiler())
+ vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
+
+ setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
if (unlinkedCodeBlock->usesGlobalObject())
- m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().offset()].set(*m_vm, ownerExecutable, m_globalObject.get());
- m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
+ m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
+
+ for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+ LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
+ if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+ m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
+ }
+
+ // We already have the cloned symbol table for the module environment since we need to instantiate
+ // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
+ if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
+ SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
+ if (m_vm->typeProfiler()) {
+ ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
+ clonedSymbolTable->prepareForTypeProfiling(locker);
+ }
+ replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
+ }
+
+ bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+ m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
- }
-
- m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+ }
+
+ m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
- unsigned lineCount = unlinkedExecutable->lineCount();
- unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
- bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
- unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
- bool endColumnIsOnStartLine = !lineCount;
- unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
- unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
- unsigned sourceLength = unlinkedExecutable->sourceLength();
- SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
- FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
- m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
+ if (shouldUpdateFunctionHasExecutedCache)
+ vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+ m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
}
if (unlinkedCodeBlock->hasRareData()) {
@@ -1607,15 +1987,13 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
m_rareData->m_exceptionHandlers.resizeToFit(count);
- size_t nonLocalScopeDepth = scope->depth();
for (size_t i = 0; i < count; i++) {
- const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
- m_rareData->m_exceptionHandlers[i].start = handler.start;
- m_rareData->m_exceptionHandlers[i].end = handler.end;
- m_rareData->m_exceptionHandlers[i].target = handler.target;
- m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
-#if ENABLE(JIT) && ENABLE(LLINT)
- m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
+ const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+ handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+ handler.initialize(unlinkedHandler);
#endif
}
}
@@ -1627,7 +2005,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
for (; ptr != end; ++ptr) {
OffsetLocation offset;
- offset.branchOffset = ptr->value;
+ offset.branchOffset = ptr->value.branchOffset;
m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
}
}
@@ -1645,58 +2023,83 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
// Allocate metadata buffers for the bytecode
-#if ENABLE(LLINT)
if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
- m_llintCallLinkInfos.resizeToFit(size);
-#endif
+ m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
m_arrayProfiles.grow(size);
if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
- m_arrayAllocationProfiles.resizeToFit(size);
+ m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
- m_valueProfiles.resizeToFit(size);
+ m_valueProfiles = RefCountedArray<ValueProfile>(size);
if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
- m_objectAllocationProfiles.resizeToFit(size);
+ m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
+
+#if ENABLE(JIT)
+ setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
+#endif
// Copy and translate the UnlinkedInstructions
unsigned instructionCount = unlinkedCodeBlock->instructions().count();
UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
- Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
+ // Bookkeep the strongly referenced module environments.
+ HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
+
+ RefCountedArray<Instruction> instructions(instructionCount);
+
+ unsigned valueProfileCount = 0;
+ auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
+ unsigned valueProfileIndex = valueProfileCount++;
+ ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
+ ASSERT(profile->m_bytecodeOffset == -1);
+ profile->m_bytecodeOffset = bytecodeOffset;
+ instructions[bytecodeOffset + opLength - 1] = profile;
+ };
+
for (unsigned i = 0; !instructionReader.atEnd(); ) {
const UnlinkedInstruction* pc = instructionReader.next();
unsigned opLength = opcodeLength(pc[0].u.opcode);
- instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
+ instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
for (size_t j = 1; j < opLength; ++j) {
if (sizeof(int32_t) != sizeof(intptr_t))
instructions[i + j].u.pointer = 0;
instructions[i + j].u.operand = pc[j].u.operand;
}
switch (pc[0].u.opcode) {
+ case op_has_indexed_property: {
+ int arrayProfileIndex = pc[opLength - 1].u.operand;
+ m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+ instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ break;
+ }
case op_call_varargs:
- case op_get_by_val:
- case op_get_argument_by_val: {
+ case op_tail_call_varargs:
+ case op_tail_call_forward_arguments:
+ case op_construct_varargs:
+ case op_get_by_val: {
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
FALLTHROUGH;
}
- case op_get_by_id: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
- break;
- }
- case op_put_by_val: {
- int arrayProfileIndex = pc[opLength - 1].u.operand;
- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+ case op_get_direct_pname:
+ case op_get_by_id:
+ case op_get_by_id_with_this:
+ case op_try_get_by_id:
+ case op_get_by_val_with_this:
+ case op_get_from_arguments:
+ case op_to_number:
+ case op_get_argument: {
+ linkValueProfile(i, opLength);
break;
}
+
+ case op_in:
+ case op_put_by_val:
case op_put_by_val_direct: {
int arrayProfileIndex = pc[opLength - 1].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
@@ -1717,125 +2120,216 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
int inferredInlineCapacity = pc[opLength - 2].u.operand;
instructions[i + opLength - 1] = objectAllocationProfile;
- objectAllocationProfile->initialize(*vm(),
- m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
+ objectAllocationProfile->initialize(vm,
+ m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
break;
}
case op_call:
+ case op_tail_call:
case op_call_eval: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
+ linkValueProfile(i, opLength);
int arrayProfileIndex = pc[opLength - 2].u.operand;
m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
break;
}
case op_construct: {
-#if ENABLE(LLINT)
instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
-#endif
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
- break;
- }
- case op_get_by_id_out_of_line:
- case op_get_by_id_self:
- case op_get_by_id_proto:
- case op_get_by_id_chain:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_chain:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_chain:
- case op_get_by_id_generic:
- case op_get_array_length:
- case op_get_string_length:
- CRASH();
-
- case op_init_global_const_nop: {
- ASSERT(codeType() == GlobalCode);
- Identifier ident = identifier(pc[4].u.operand);
- SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
- if (entry.isNull())
- break;
-
- instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
- instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
+ linkValueProfile(i, opLength);
break;
}
+ case op_get_array_length:
+ CRASH();
case op_resolve_scope: {
- const Identifier& ident = identifier(pc[2].u.operand);
- ResolveType type = static_cast<ResolveType>(pc[3].u.operand);
-
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
- instructions[i + 3].u.operand = op.type;
- instructions[i + 4].u.operand = op.depth;
- if (op.activation)
- instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
+ const Identifier& ident = identifier(pc[3].u.operand);
+ ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
+ RELEASE_ASSERT(type != LocalClosureVar);
+ int localScopeDepth = pc[5].u.operand;
+
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+ instructions[i + 4].u.operand = op.type;
+ instructions[i + 5].u.operand = op.depth;
+ if (op.lexicalEnvironment) {
+ if (op.type == ModuleVar) {
+ // Keep the linked module environment strongly referenced.
+ if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
+ addConstant(op.lexicalEnvironment);
+ instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+ } else
+ instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+ } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
+ instructions[i + 6].u.jsCell.set(vm, this, constantScope);
+ else
+ instructions[i + 6].u.pointer = nullptr;
break;
}
case op_get_from_scope: {
- ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
- ASSERT(profile->m_bytecodeOffset == -1);
- profile->m_bytecodeOffset = i;
- instructions[i + opLength - 1] = profile;
+ linkValueProfile(i, opLength);
+
+ // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ ASSERT(!isInitialization(getPutInfo.initializationMode()));
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ break;
+ }
- // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
const Identifier& ident = identifier(pc[3].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == ModuleVar)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
break;
}
case op_put_to_scope: {
- // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
+ // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ // Only do watching if the property we're putting to is not anonymous.
+ if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
+ int symbolTableIndex = pc[5].u.operand;
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[2].u.operand);
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ auto iter = symbolTable->find(locker, ident.impl());
+ ASSERT(iter != symbolTable->end(locker));
+ iter->value.prepareToWatch();
+ instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+ } else
+ instructions[i + 5].u.watchpointSet = nullptr;
+ break;
+ }
+
const Identifier& ident = identifier(pc[2].u.operand);
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
+ int localScopeDepth = pc[5].u.operand;
+ instructions[i + 5].u.pointer = nullptr;
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
- instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
+ instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+ if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
instructions[i + 5].u.watchpointSet = op.watchpointSet;
else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
if (op.watchpointSet)
- op.watchpointSet->invalidate();
+ op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
} else if (op.structure)
- instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
+ instructions[i + 5].u.structure.set(vm, this, op.structure);
instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
+
break;
}
-
- case op_captured_mov:
- case op_new_captured_func: {
- if (pc[3].u.index == UINT_MAX) {
- instructions[i + 3].u.watchpointSet = 0;
+
+ case op_profile_type: {
+ RELEASE_ASSERT(vm.typeProfiler());
+ // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+ size_t instructionOffset = i + opLength - 1;
+ unsigned divotStart, divotEnd;
+ GlobalVariableID globalVariableID = 0;
+ RefPtr<TypeSet> globalTypeSet;
+ bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+ VirtualRegister profileRegister(pc[1].u.operand);
+ ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
+ SymbolTable* symbolTable = nullptr;
+
+ switch (flag) {
+ case ProfileTypeBytecodeClosureVar: {
+ const Identifier& ident = identifier(pc[4].u.operand);
+ int localScopeDepth = pc[2].u.operand;
+ ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
+ // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
+ // we're abstractly "read"ing from a JSScope.
+ ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+
+ if (op.type == ClosureVar || op.type == ModuleVar)
+ symbolTable = op.lexicalEnvironment->symbolTable();
+ else if (op.type == GlobalVar)
+ symbolTable = m_globalObject.get()->symbolTable();
+
+ UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
+ if (symbolTable) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ symbolTable->prepareForTypeProfiling(locker);
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
+ } else
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+
+ break;
+ }
+ case ProfileTypeBytecodeLocallyResolved: {
+ int symbolTableIndex = pc[2].u.operand;
+ SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
+ const Identifier& ident = identifier(pc[4].u.operand);
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+ globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
+ globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
+
+ break;
+ }
+ case ProfileTypeBytecodeDoesNotHaveGlobalID:
+ case ProfileTypeBytecodeFunctionArgument: {
+ globalVariableID = TypeProfilerNoGlobalIDExists;
+ break;
+ }
+ case ProfileTypeBytecodeFunctionReturnStatement: {
+ RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+ globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
+ globalVariableID = TypeProfilerReturnStatement;
+ if (!shouldAnalyze) {
+ // Because a return statement can be added implicitly to return undefined at the end of a function,
+ // and these nodes don't emit expression ranges because they aren't in the actual source text of
+ // the user's program, give the type profiler some range to identify these return statements.
+ // Currently, the text offset that is used as identification is "f" in the function keyword
+ // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+ divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+ shouldAnalyze = true;
+ }
break;
}
- StringImpl* uid = identifier(pc[3].u.index).impl();
- RELEASE_ASSERT(didCloneSymbolTable);
- ConcurrentJITLocker locker(m_symbolTable->m_lock);
- SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
- ASSERT(iter != m_symbolTable->end(locker));
- iter->value.prepareToWatch();
- instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
+ }
+
+ std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+ ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
+ TypeLocation* location = locationPair.first;
+ bool isNewLocation = locationPair.second;
+
+ if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+ location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+
+ if (shouldAnalyze && isNewLocation)
+ vm.typeProfiler()->insertNewLocation(location);
+
+ instructions[i + 2].u.location = location;
+ break;
+ }
+
+ case op_debug: {
+ if (pc[1].u.index == DidReachBreakpoint)
+ m_hasDebuggerStatement = true;
+ break;
+ }
+
+ case op_create_rest: {
+ int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
+ ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
+ // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
+ m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
break;
}
@@ -1844,7 +2338,11 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
i += opLength;
}
- m_instructions = WTF::RefCountedArray<Instruction>(instructions);
+
+ if (vm.controlFlowProfiler())
+ insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+ m_instructions = WTFMove(instructions);
// Set optimization thresholds only after m_instructions is initialized, since these
// rely on the instruction count (and are in theory permitted to also inspect the
@@ -1854,71 +2352,89 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
// If the concurrent thread will want the code block's hash, then compute it here
// synchronously.
- if (Options::showDisassembly()
- || Options::showDFGDisassembly()
- || Options::dumpBytecodeAtDFGTime()
- || Options::dumpGraphAtEachPhase()
- || Options::verboseCompilation()
- || Options::logCompilationChanges()
- || Options::validateGraph()
- || Options::validateGraphAtEachPhase()
- || Options::verboseOSR()
- || Options::verboseCompilationQueue()
- || Options::reportCompileTimes()
- || Options::verboseCFA())
+ if (Options::alwaysComputeHash())
hash();
if (Options::dumpGeneratedBytecodes())
dumpBytecode();
-
- m_heap->m_codeBlocks.add(this);
- m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
+
+ heap()->m_codeBlocks->add(this);
+ heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
}
CodeBlock::~CodeBlock()
{
if (m_vm->m_perBytecodeProfiler)
m_vm->m_perBytecodeProfiler->notifyDestruction(this);
-
+
+ if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
+ unlinkedCodeBlock()->setDidOptimize(FalseTriState);
+
#if ENABLE(VERBOSE_VALUE_PROFILE)
dumpValueProfiles();
#endif
-#if ENABLE(LLINT)
- while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
- m_incomingLLIntCalls.begin()->remove();
-#endif // ENABLE(LLINT)
-#if ENABLE(JIT)
// We may be destroyed before any CodeBlocks that refer to us are destroyed.
// Consider that two CodeBlocks become unreachable at the same time. There
// is no guarantee about the order in which the CodeBlocks are destroyed.
// So, if we don't remove incoming calls, and get destroyed before the
// CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
// destructor will try to remove nodes from our (no longer valid) linked list.
- while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->remove();
+ unlinkIncomingCalls();
// Note that our outgoing calls will be removed from other CodeBlocks'
// m_incomingCalls linked lists through the execution of the ~CallLinkInfo
// destructors.
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
- (*iter)->deref();
+#if ENABLE(JIT)
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ stub->aboutToDie();
+ stub->deref();
+ }
#endif // ENABLE(JIT)
}
-void CodeBlock::setNumParameters(int newValue)
+void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
{
- m_numParameters = newValue;
+ ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
+ size_t count = constants.size();
+ m_constantRegisters.resizeToFit(count);
+ bool hasTypeProfiler = !!m_vm->typeProfiler();
+ for (size_t i = 0; i < count; i++) {
+ JSValue constant = constants[i].get();
+
+ if (!constant.isEmpty()) {
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constant)) {
+ if (hasTypeProfiler) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ symbolTable->prepareForTypeProfiling(locker);
+ }
- m_argumentValueProfiles.resizeToFit(newValue);
+ SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
+ if (wasCompiledWithDebuggingOpcodes())
+ clone->setRareDataCodeBlock(this);
+
+ constant = clone;
+ }
+ }
+
+ m_constantRegisters[i].set(*m_vm, this, constant);
+ }
+
+ m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
}
-void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
+void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
{
- EvalCacheMap::iterator end = m_cacheMap.end();
- for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
- visitor.append(&ptr->value);
+ m_alternative.set(vm, this, alternative);
+}
+
+void CodeBlock::setNumParameters(int newValue)
+{
+ m_numParameters = newValue;
+
+ m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
}
CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
@@ -1927,77 +2443,46 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
if (jitType() != JITCode::DFGJIT)
return 0;
DFG::JITCode* jitCode = m_jitCode->dfg();
- return jitCode->osrEntryBlock.get();
+ return jitCode->osrEntryBlock();
#else // ENABLE(FTL_JIT)
return 0;
#endif // ENABLE(FTL_JIT)
}
-void CodeBlock::visitAggregate(SlotVisitor& visitor)
-{
-#if ENABLE(PARALLEL_GC)
- // I may be asked to scan myself more than once, and it may even happen concurrently.
- // To this end, use a CAS loop to check if I've been called already. Only one thread
- // may proceed past this point - whichever one wins the CAS race.
- unsigned oldValue;
- do {
- oldValue = m_visitAggregateHasBeenCalled;
- if (oldValue) {
- // Looks like someone else won! Return immediately to ensure that we don't
- // trace the same CodeBlock concurrently. Doing so is hazardous since we will
- // be mutating the state of ValueProfiles, which contain JSValues, which can
- // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
- // that are nearly impossible to track down.
-
- // Also note that it must be safe to return early as soon as we see the
- // value true (well, (unsigned)1), since once a GC thread is in this method
- // and has won the CAS race (i.e. was responsible for setting the value true)
- // it will definitely complete the rest of this method before declaring
- // termination.
- return;
- }
- } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
-#endif // ENABLE(PARALLEL_GC)
-
- if (!!m_alternative)
- m_alternative->visitAggregate(visitor);
+void CodeBlock::visitWeakly(SlotVisitor& visitor)
+{
+ ConcurrentJSLocker locker(m_lock);
+ if (m_visitWeaklyHasBeenCalled)
+ return;
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->visitAggregate(visitor);
+ m_visitWeaklyHasBeenCalled = true;
- visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
- if (m_jitCode)
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
- if (m_instructions.size()) {
- // Divide by refCount() because m_instructions points to something that is shared
- // by multiple CodeBlocks, and we only want to count it towards the heap size once.
- // Having each CodeBlock report only its proportional share of the size is one way
- // of accomplishing this.
- visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
+ if (Heap::isMarkedConcurrently(this))
+ return;
+
+ if (shouldVisitStrongly(locker)) {
+ visitor.appendUnbarriered(this);
+ return;
}
+
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
- visitor.append(&m_unlinkedCode);
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return;
- // There are three things that may use unconditional finalizers: lazy bytecode freeing,
- // inline cache clearing, and jettisoning. The probability of us wanting to do at
- // least one of those things is probably quite close to 1. So we add one no matter what
- // and when it runs, it figures out whether it has any work to do.
- visitor.addUnconditionalFinalizer(this);
+ // If we jettison ourselves we'll install our alternative, so make sure that it
+ // survives GC even if we don't.
+ visitor.append(m_alternative);
// There are two things that we use weak reference harvesters for: DFG fixpoint for
// jettisoning, and trying to find structures that would be live based on some
// inline cache. So it makes sense to register them regardless.
- visitor.addWeakReferenceHarvester(this);
- m_allTransitionsHaveBeenMarked = false;
-
- if (shouldImmediatelyAssumeLivenessDuringScan()) {
- // This code block is live, so scan all references strongly and return.
- stronglyVisitStrongReferences(visitor);
- stronglyVisitWeakReferences(visitor);
- propagateTransitions(visitor);
- return;
- }
-
+ visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
+
#if ENABLE(DFG_JIT)
// We get here if we're live in the sense that our owner executable is live,
// but we're not yet live for sure in another sense: we may yet decide that this
@@ -2007,17 +2492,149 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
// either us marking additional objects, or by other objects being marked for
// other reasons, that this iteration should run again; it will notify us of this
// decision by calling harvestWeakReferences().
-
+
+ m_allTransitionsHaveBeenMarked = false;
+ propagateTransitions(locker, visitor);
+
m_jitCode->dfgCommon()->livenessHasBeenProved = false;
-
- propagateTransitions(visitor);
- determineLiveness(visitor);
-#else // ENABLE(DFG_JIT)
- RELEASE_ASSERT_NOT_REACHED();
+ determineLiveness(locker, visitor);
#endif // ENABLE(DFG_JIT)
}
-void CodeBlock::propagateTransitions(SlotVisitor& visitor)
+size_t CodeBlock::estimatedSize(JSCell* cell)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
+ if (thisObject->m_jitCode)
+ extraMemoryAllocated += thisObject->m_jitCode->size();
+ return Base::estimatedSize(cell) + extraMemoryAllocated;
+}
+
+void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+ CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
+ ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+ JSCell::visitChildren(thisObject, visitor);
+ thisObject->visitChildren(visitor);
+}
+
+void CodeBlock::visitChildren(SlotVisitor& visitor)
+{
+ ConcurrentJSLocker locker(m_lock);
+ // There are two things that may use unconditional finalizers: inline cache clearing
+ // and jettisoning. The probability of us wanting to do at least one of those things
+ // is probably quite close to 1. So we add one no matter what and when it runs, it
+ // figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+ if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+ visitor.appendUnbarriered(otherBlock);
+
+ if (m_jitCode)
+ visitor.reportExtraMemoryVisited(m_jitCode->size());
+ if (m_instructions.size()) {
+ unsigned refCount = m_instructions.refCount();
+ if (!refCount) {
+ dataLog("CodeBlock: ", RawPointer(this), "\n");
+ dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
+ dataLog("refCount: ", refCount, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
+ }
+
+ stronglyVisitStrongReferences(locker, visitor);
+ stronglyVisitWeakReferences(locker, visitor);
+
+ m_allTransitionsHaveBeenMarked = false;
+ propagateTransitions(locker, visitor);
+}
+
+bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
+{
+ if (Options::forceCodeBlockLiveness())
+ return true;
+
+ if (shouldJettisonDueToOldAge(locker))
+ return false;
+
+ // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+ // their weak references go stale. So if a basline JIT CodeBlock gets
+ // scanned, we can assume that this means that it's live.
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return true;
+
+ return false;
+}
+
+bool CodeBlock::shouldJettisonDueToWeakReference()
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return false;
+ return !Heap::isMarked(this);
+}
+
+static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
+{
+ if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
+ switch (jitType) {
+ case JITCode::InterpreterThunk:
+ return std::chrono::milliseconds(10);
+ case JITCode::BaselineJIT:
+ return std::chrono::milliseconds(10 + 20);
+ case JITCode::DFGJIT:
+ return std::chrono::milliseconds(40);
+ case JITCode::FTLJIT:
+ return std::chrono::milliseconds(120);
+ default:
+ return std::chrono::milliseconds::max();
+ }
+ }
+
+ switch (jitType) {
+ case JITCode::InterpreterThunk:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
+ case JITCode::BaselineJIT:
+ // Effectively 10 additional seconds, since BaselineJIT and
+ // InterpreterThunk share a CodeBlock.
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
+ case JITCode::DFGJIT:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
+ case JITCode::FTLJIT:
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
+ default:
+ return std::chrono::milliseconds::max();
+ }
+}
+
+bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
+{
+ if (Heap::isMarkedConcurrently(this))
+ return false;
+
+ if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
+ return true;
+
+ if (timeSinceCreation() < timeToLive(jitType()))
+ return false;
+
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+ if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
+ return false;
+
+ if (!Heap::isMarkedConcurrently(transition.m_from.get()))
+ return false;
+
+ return true;
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
@@ -2026,19 +2643,23 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
bool allAreMarkedSoFar = true;
-#if ENABLE(LLINT)
Interpreter* interpreter = m_vm->interpreter;
if (jitType() == JITCode::InterpreterThunk) {
const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line: {
- if (Heap::isMarked(instruction[4].u.structure.get()))
- visitor.append(&instruction[6].u.structure);
+ case op_put_by_id: {
+ StructureID oldStructureID = instruction[4].u.structureID;
+ StructureID newStructureID = instruction[6].u.structureID;
+ if (!oldStructureID || !newStructureID)
+ break;
+ Structure* oldStructure =
+ m_vm->heap.structureIDTable().get(oldStructureID);
+ Structure* newStructure =
+ m_vm->heap.structureIDTable().get(newStructureID);
+ if (Heap::isMarkedConcurrently(oldStructure))
+ visitor.appendUnbarriered(newStructure);
else
allAreMarkedSoFar = false;
break;
@@ -2048,69 +2669,42 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
}
}
}
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
if (JITCode::isJIT(jitType())) {
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
- switch (stubInfo.accessType) {
- case access_put_by_id_transition_normal:
- case access_put_by_id_transition_direct: {
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if ((!origin || Heap::isMarked(origin))
- && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
- visitor.append(&stubInfo.u.putByIdTransition.structure);
- else
- allAreMarkedSoFar = false;
- break;
- }
-
- case access_put_by_id_list: {
- PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
- JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
- if (origin && !Heap::isMarked(origin)) {
- allAreMarkedSoFar = false;
- break;
- }
- for (unsigned j = list->size(); j--;) {
- PutByIdAccess& access = list->m_list[j];
- if (!access.isTransition())
- continue;
- if (Heap::isMarked(access.oldStructure()))
- visitor.append(&access.m_newStructure);
- else
- allAreMarkedSoFar = false;
- }
- break;
- }
-
- default:
- break;
- }
- }
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
+ allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
}
#endif // ENABLE(JIT)
#if ENABLE(DFG_JIT)
if (JITCode::isOptimizingJIT(jitType())) {
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- if ((!dfgCommon->transitions[i].m_codeOrigin
- || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
- && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
+ for (auto& weakReference : dfgCommon->weakStructureReferences)
+ allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
+
+ for (auto& transition : dfgCommon->transitions) {
+ if (shouldMarkTransition(transition)) {
// If the following three things are live, then the target of the
// transition is also live:
+ //
// - This code block. We know it's live already because otherwise
// we wouldn't be scanning ourselves.
+ //
// - The code origin of the transition. Transitions may arise from
// code that was inlined. They are not relevant if the user's
// object that is required for the inlinee to run is no longer
// live.
+ //
// - The source of the transition. The transition checks if some
// heap location holds the source, and if so, stores the target.
// Hence the source must be live for the transition to be live.
- visitor.append(&dfgCommon->transitions[i].m_to);
+ //
+ // We also short-circuit the liveness if the structure is harmless
+ // to mark (i.e. its global object and prototype are both already
+ // live).
+
+ visitor.append(transition.m_to);
} else
allAreMarkedSoFar = false;
}
@@ -2121,13 +2715,10 @@ void CodeBlock::propagateTransitions(SlotVisitor& visitor)
m_allTransitionsHaveBeenMarked = true;
}
-void CodeBlock::determineLiveness(SlotVisitor& visitor)
+void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
- if (shouldImmediatelyAssumeLivenessDuringScan())
- return;
-
#if ENABLE(DFG_JIT)
// Check if we have any remaining work to do.
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
@@ -2139,11 +2730,21 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
// GC we still have not proved liveness, then this code block is toast.
bool allAreLiveSoFar = true;
for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
- if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
+ JSCell* reference = dfgCommon->weakReferences[i].get();
+ ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
+ if (!Heap::isMarkedConcurrently(reference)) {
allAreLiveSoFar = false;
break;
}
}
+ if (allAreLiveSoFar) {
+ for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+ if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+ }
// If some weak references are dead, then this fixpoint iteration was
// unsuccessful.
@@ -2153,261 +2754,346 @@ void CodeBlock::determineLiveness(SlotVisitor& visitor)
// All weak references are live. Record this information so we don't
// come back here again, and scan the strong references.
dfgCommon->livenessHasBeenProved = true;
- stronglyVisitStrongReferences(visitor);
+ visitor.appendUnbarriered(this);
#endif // ENABLE(DFG_JIT)
}
-void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
{
- propagateTransitions(visitor);
- determineLiveness(visitor);
+ CodeBlock* codeBlock =
+ bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
+
+ codeBlock->propagateTransitions(NoLockingNecessary, visitor);
+ codeBlock->determineLiveness(NoLockingNecessary, visitor);
}
-void CodeBlock::finalizeUnconditionally()
+void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
+{
+ instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+ instruction[4].u.pointer = nullptr;
+ instruction[5].u.pointer = nullptr;
+ instruction[6].u.pointer = nullptr;
+}
+
+void CodeBlock::finalizeLLIntInlineCaches()
{
Interpreter* interpreter = m_vm->interpreter;
- if (JITCode::couldBeInterpreted(jitType())) {
- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
- for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
- Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
- switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
- case op_get_by_id:
- case op_get_by_id_out_of_line:
- case op_put_by_id:
- case op_put_by_id_out_of_line:
- if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
- curInstruction[4].u.structure.clear();
- curInstruction[5].u.operand = 0;
+ const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+ for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
break;
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
- if (Heap::isMarked(curInstruction[4].u.structure.get())
- && Heap::isMarked(curInstruction[6].u.structure.get())
- && Heap::isMarked(curInstruction[7].u.structureChain.get()))
- break;
- if (Options::verboseOSR()) {
- dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
- curInstruction[4].u.structure.get(),
- curInstruction[6].u.structure.get(),
- curInstruction[7].u.structureChain.get());
- }
- curInstruction[4].u.structure.clear();
- curInstruction[6].u.structure.clear();
- curInstruction[7].u.structureChain.clear();
- curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt property access.\n");
+ clearLLIntGetByIdCache(curInstruction);
+ break;
+ }
+ case op_put_by_id: {
+ StructureID oldStructureID = curInstruction[4].u.structureID;
+ StructureID newStructureID = curInstruction[6].u.structureID;
+ StructureChain* chain = curInstruction[7].u.structureChain.get();
+ if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
+ (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
+ (!chain || Heap::isMarked(chain)))
break;
- case op_get_array_length:
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt put transition.\n");
+ curInstruction[4].u.structureID = 0;
+ curInstruction[5].u.operand = 0;
+ curInstruction[6].u.structureID = 0;
+ curInstruction[7].u.structureChain.clear();
+ break;
+ }
+ case op_get_array_length:
+ break;
+ case op_to_this:
+ if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
break;
- case op_to_this:
- if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
- curInstruction[2].u.structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+ curInstruction[2].u.structure.clear();
+ curInstruction[3].u.toThisStatus = merge(
+ curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+ break;
+ case op_create_this: {
+ auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+ if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
break;
- case op_get_callee:
- if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
- curInstruction[2].u.jsCell.clear();
+ JSCell* cachedFunction = cacheWriteBarrier.get();
+ if (Heap::isMarked(cachedFunction))
break;
- case op_resolve_scope: {
- WriteBarrierBase<JSActivation>& activation = curInstruction[5].u.activation;
- if (!activation || Heap::isMarked(activation.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing dead activation %p.\n", activation.get());
- activation.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+ cacheWriteBarrier.clear();
+ break;
+ }
+ case op_resolve_scope: {
+ // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+ // are for outer functions, and we refer to those functions strongly, and they refer
+ // to the symbol table strongly. But it's nice to be on the safe side.
+ WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
+ if (!symbolTable || Heap::isMarked(symbolTable.get()))
break;
- }
- case op_get_from_scope:
- case op_put_to_scope: {
- ResolveModeAndType modeAndType =
- ResolveModeAndType(curInstruction[4].u.operand);
- if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
- continue;
- WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
- if (!structure || Heap::isMarked(structure.get()))
- break;
- if (Options::verboseOSR())
- dataLogF("Clearing scope access with structure %p.\n", structure.get());
- structure.clear();
+ if (Options::verboseOSR())
+ dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+ symbolTable.clear();
+ break;
+ }
+ case op_get_from_scope:
+ case op_put_to_scope: {
+ GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
+ if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
+ || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+ continue;
+ WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
+ if (!structure || Heap::isMarked(structure.get()))
break;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
+ if (Options::verboseOSR())
+ dataLogF("Clearing scope access with structure %p.\n", structure.get());
+ structure.clear();
+ break;
}
-
-#if ENABLE(LLINT)
- for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
- if (Options::verboseOSR())
- dataLog("Clearing LLInt call from ", *this, "\n");
- m_llintCallLinkInfos[i].unlink();
- }
- if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
- m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ default:
+ OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
}
-#endif // ENABLE(LLINT)
}
-#if ENABLE(DFG_JIT)
- // Check if we're not live. If we are, then jettison.
- if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
- if (Options::verboseOSR())
- dataLog(*this, " has dead weak references, jettisoning during GC.\n");
+ // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
+ // then cleared the cache without GCing in between.
+ m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
+ return !Heap::isMarked(pair.key);
+ });
- if (DFG::shouldShowDisassembly()) {
- dataLog(*this, " will be jettisoned because of the following dead references:\n");
- DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
- JSCell* origin = transition.m_codeOrigin.get();
- JSCell* from = transition.m_from.get();
- JSCell* to = transition.m_to.get();
- if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
- continue;
- dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
- }
- for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
- JSCell* weak = dfgCommon->weakReferences[i].get();
- if (Heap::isMarked(weak))
- continue;
- dataLog(" Weak reference ", RawPointer(weak), ".\n");
- }
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (Options::verboseOSR())
+ dataLog("Clearing LLInt call from ", *this, "\n");
+ m_llintCallLinkInfos[i].unlink();
}
-
- jettison();
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ }
+}
+
+void CodeBlock::finalizeBaselineJITInlineCaches()
+{
+#if ENABLE(JIT)
+ for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+ (*iter)->visitWeak(*vm());
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo& stubInfo = **iter;
+ stubInfo.visitWeakReferences(this);
+ }
+#endif
+}
+
+void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
+{
+ CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
+ bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
+
+ codeBlock->updateAllPredictions();
+
+ if (!Heap::isMarked(codeBlock)) {
+ if (codeBlock->shouldJettisonDueToWeakReference())
+ codeBlock->jettison(Profiler::JettisonDueToWeakReference);
+ else
+ codeBlock->jettison(Profiler::JettisonDueToOldAge);
return;
}
-#endif // ENABLE(DFG_JIT)
+
+ if (JITCode::couldBeInterpreted(codeBlock->jitType()))
+ codeBlock->finalizeLLIntInlineCaches();
#if ENABLE(JIT)
- // Handle inline caches.
- if (!!jitCode()) {
- RepatchBuffer repatchBuffer(this);
- for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
- if (callLinkInfo(i).isLinked()) {
- if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
- if (!Heap::isMarked(stub->structure())
- || !Heap::isMarked(stub->executable())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing closure call from ", *this, " to ",
- stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
- ", stub routine ", RawPointer(stub), ".\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
- if (Options::verboseOSR()) {
- dataLog(
- "Clearing call from ", *this, " to ",
- RawPointer(callLinkInfo(i).callee.get()), " (",
- callLinkInfo(i).callee.get()->executable()->hashFor(
- callLinkInfo(i).specializationKind()),
- ").\n");
- }
- callLinkInfo(i).unlink(*m_vm, repatchBuffer);
- }
- }
- if (!!callLinkInfo(i).lastSeenCallee
- && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
- callLinkInfo(i).lastSeenCallee.clear();
- }
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
-
- if (stubInfo.visitWeakReferences())
- continue;
-
- resetStubDuringGCInternal(repatchBuffer, stubInfo);
- }
+ if (!!codeBlock->jitCode())
+ codeBlock->finalizeBaselineJITInlineCaches();
+#endif
+}
+
+void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType()))
+ toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getStubInfoMap(locker, result);
+}
+
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType()))
+ toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+ UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getCallLinkInfoMap(locker, result);
+}
+
+void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
+{
+#if ENABLE(JIT)
+ if (JITCode::isJIT(jitType())) {
+ for (auto* byValInfo : m_byValInfos)
+ result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
}
+#else
+ UNUSED_PARAM(result);
#endif
}
+void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+{
+ ConcurrentJSLocker locker(m_lock);
+ getByValInfoMap(locker, result);
+}
+
#if ENABLE(JIT)
-StructureStubInfo* CodeBlock::addStubInfo()
+StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
{
- ConcurrentJITLocker locker(m_lock);
- return m_stubInfos.add();
+ ConcurrentJSLocker locker(m_lock);
+ return m_stubInfos.add(accessType);
}
-void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
{
- toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+ return m_addICs.add(arithProfile);
}
-void CodeBlock::resetStub(StructureStubInfo& stubInfo)
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
{
- if (stubInfo.accessType == access_unset)
- return;
-
- ConcurrentJITLocker locker(m_lock);
-
- RepatchBuffer repatchBuffer(this);
- resetStubInternal(repatchBuffer, stubInfo);
+ return m_mulICs.add(arithProfile);
}
-void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
{
- AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
-
- if (Options::verboseOSR()) {
- // This can be called from GC destructor calls, so we don't try to do a full dump
- // of the CodeBlock.
- dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
+ return m_subICs.add(arithProfile);
+}
+
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
+{
+ return m_negICs.add(arithProfile);
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+ for (StructureStubInfo* stubInfo : m_stubInfos) {
+ if (stubInfo->codeOrigin == codeOrigin)
+ return stubInfo;
}
-
- RELEASE_ASSERT(JITCode::isJIT(jitType()));
-
- if (isGetByIdAccess(accessType))
- resetGetByID(repatchBuffer, stubInfo);
- else if (isPutByIdAccess(accessType))
- resetPutByID(repatchBuffer, stubInfo);
- else {
- RELEASE_ASSERT(isInAccess(accessType));
- resetIn(repatchBuffer, stubInfo);
+ return nullptr;
+}
+
+ByValInfo* CodeBlock::addByValInfo()
+{
+ ConcurrentJSLocker locker(m_lock);
+ return m_byValInfos.add();
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+ ConcurrentJSLocker locker(m_lock);
+ return m_callLinkInfos.add();
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+ for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+ if ((*iter)->codeOrigin() == CodeOrigin(index))
+ return *iter;
}
-
- stubInfo.reset();
+ return nullptr;
}
-void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void CodeBlock::resetJITData()
{
- resetStubInternal(repatchBuffer, stubInfo);
- stubInfo.resetByGC = true;
+ RELEASE_ASSERT(!JITCode::isJIT(jitType()));
+ ConcurrentJSLocker locker(m_lock);
+
+ // We can clear these because no other thread will have references to any stub infos, call
+ // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+ // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
+ // don't have JIT code.
+ m_stubInfos.clear();
+ m_callLinkInfos.clear();
+ m_byValInfos.clear();
+
+ // We can clear this because the DFG's queries to these data structures are guarded by whether
+ // there is JIT code.
+ m_rareCaseProfiles.clear();
}
#endif
-void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
- visitor.append(&m_globalObject);
- visitor.append(&m_ownerExecutable);
- visitor.append(&m_symbolTable);
- visitor.append(&m_unlinkedCode);
+ // We strongly visit OSR exits targets because we don't want to deal with
+ // the complexity of generating an exit target CodeBlock on demand and
+ // guaranteeing that it matches the details of the CodeBlock we compiled
+ // the OSR exit against.
+
+ visitor.append(m_alternative);
+
+#if ENABLE(DFG_JIT)
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ if (dfgCommon->inlineCallFrames) {
+ for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
+ ASSERT(inlineCallFrame->baselineCodeBlock);
+ visitor.append(inlineCallFrame->baselineCodeBlock);
+ }
+ }
+#endif
+}
+
+void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
+{
+ UNUSED_PARAM(locker);
+
+ visitor.append(m_globalObject);
+ visitor.append(m_ownerExecutable);
+ visitor.append(m_unlinkedCode);
if (m_rareData)
- m_rareData->m_evalCodeCache.visitAggregate(visitor);
+ m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
- for (size_t i = 0; i < m_functionExprs.size(); ++i)
- visitor.append(&m_functionExprs[i]);
- for (size_t i = 0; i < m_functionDecls.size(); ++i)
- visitor.append(&m_functionDecls[i]);
- for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
- m_objectAllocationProfiles[i].visitAggregate(visitor);
+ for (auto& functionExpr : m_functionExprs)
+ visitor.append(functionExpr);
+ for (auto& functionDecl : m_functionDecls)
+ visitor.append(functionDecl);
+ for (auto& objectAllocationProfile : m_objectAllocationProfiles)
+ objectAllocationProfile.visitAggregate(visitor);
- updateAllPredictions();
+#if ENABLE(JIT)
+ for (ByValInfo* byValInfo : m_byValInfos)
+ visitor.append(byValInfo->cachedSymbol);
+#endif
+
+#if ENABLE(DFG_JIT)
+ if (JITCode::isOptimizingJIT(jitType()))
+ visitOSRExitTargets(locker, visitor);
+#endif
}
-void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
@@ -2417,15 +3103,20 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
- for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
- if (!!dfgCommon->transitions[i].m_codeOrigin)
- visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
- visitor.append(&dfgCommon->transitions[i].m_from);
- visitor.append(&dfgCommon->transitions[i].m_to);
+ for (auto& transition : dfgCommon->transitions) {
+ if (!!transition.m_codeOrigin)
+ visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+ visitor.append(transition.m_from);
+ visitor.append(transition.m_to);
}
-
- for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
- visitor.append(&dfgCommon->weakReferences[i]);
+
+ for (auto& weakReference : dfgCommon->weakReferences)
+ visitor.append(weakReference);
+
+ for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
+ visitor.append(weakStructureReference);
+
+ dfgCommon->livenessHasBeenProved = true;
#endif
}
@@ -2474,87 +3165,56 @@ bool CodeBlock::hasOptimizedReplacement()
}
#endif
-bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
{
- if (operand.isArgument())
- return operand.toArgument() && usesArguments();
-
- if (inlineCallFrame)
- return inlineCallFrame->capturedVars.get(operand.toLocal());
-
- // The activation object isn't in the captured region, but it's "captured"
- // in the sense that stores to its location can be observed indirectly.
- if (needsActivation() && operand == activationRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == argumentsRegister())
- return true;
-
- // Ditto for the arguments object.
- if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
- return true;
-
- // We're in global code so there are no locals to capture
- if (!symbolTable())
- return false;
-
- return symbolTable()->isCaptured(operand.offset());
+ RELEASE_ASSERT(bytecodeOffset < instructions().size());
+ return handlerForIndex(bytecodeOffset, requiredHandler);
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
+HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
{
- // We'll be adding this to the stack pointer to get a registers pointer that looks
- // like it would have looked in the baseline engine. For example, if bytecode would
- // have put the first captured variable at offset -5 but we put it at offset -1, then
- // we'll have an offset of 4.
- int32_t offset = 0;
-
- // Compute where we put the captured variables. This offset will point the registers
- // pointer directly at the first captured var.
- offset += machineCaptureStart;
-
- // Now compute the offset needed to make the runtime see the captured variables at the
- // same offset that the bytecode would have used.
- offset -= symbolTable()->captureStart();
-
- return offset;
+ if (!m_rareData)
+ return 0;
+ return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
}
-int CodeBlock::framePointerOffsetToGetActivationRegisters()
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
{
- if (!JITCode::isOptimizingJIT(jitType()))
- return 0;
#if ENABLE(DFG_JIT)
- return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+ ASSERT(!!handlerForIndex(originalCallSite.bits()));
+ CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+ return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
#else
+ // We never create new on-the-fly exception handling
+ // call sites outside the DFG/FTL inline caches.
+ UNUSED_PARAM(originalCallSite);
RELEASE_ASSERT_NOT_REACHED();
- return 0;
+ return CallSiteIndex(0u);
#endif
}
-HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
{
- RELEASE_ASSERT(bytecodeOffset < instructions().size());
-
- if (!m_rareData)
- return 0;
-
+ RELEASE_ASSERT(m_rareData);
Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ unsigned index = callSiteIndex.bits();
for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
- // Handlers are ordered innermost first, so the first handler we encounter
- // that contains the source address is the correct handler to use.
- if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
- return &exceptionHandlers[i];
+ HandlerInfo& handler = exceptionHandlers[i];
+ if (handler.start <= index && handler.end > index) {
+ exceptionHandlers.remove(i);
+ return;
+ }
}
- return 0;
+ RELEASE_ASSERT_NOT_REACHED();
}
unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
RELEASE_ASSERT(bytecodeOffset < instructions().size());
- return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+ return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
}
unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
@@ -2568,12 +3228,12 @@ unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
return column;
}
-void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
{
m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
divot += m_sourceOffset;
column += line ? 1 : firstLineColumnOffset();
- line += m_ownerExecutable->lineNo();
+ line += ownerScriptExecutable()->firstLine();
}
bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
@@ -2599,11 +3259,13 @@ bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
{
+ ConcurrentJSLocker locker(m_lock);
+
m_rareCaseProfiles.shrinkToFit();
- m_specialFastCaseProfiles.shrinkToFit();
if (shrinkMode == EarlyShrink) {
m_constantRegisters.shrinkToFit();
+ m_constantsSourceCodeRepresentation.shrinkToFit();
if (m_rareData) {
m_rareData->m_switchJumpTables.shrinkToFit();
@@ -2612,175 +3274,154 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
} // else don't shrink these, because we would have already pointed pointers into these tables.
}
-unsigned CodeBlock::addOrFindConstant(JSValue v)
-{
- unsigned result;
- if (findConstant(v, result))
- return result;
- return addConstant(v);
-}
-
-bool CodeBlock::findConstant(JSValue v, unsigned& index)
-{
- unsigned numberOfConstants = numberOfConstantRegisters();
- for (unsigned i = 0; i < numberOfConstants; ++i) {
- if (getConstant(FirstConstantRegisterIndex + i) == v) {
- index = i;
- return true;
- }
- }
- index = numberOfConstants;
- return false;
-}
-
#if ENABLE(JIT)
-void CodeBlock::unlinkCalls()
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
{
- if (!!m_alternative)
- m_alternative->unlinkCalls();
-#if ENABLE(LLINT)
- for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
- if (m_llintCallLinkInfos[i].isLinked())
- m_llintCallLinkInfos[i].unlink();
- }
-#endif
- if (!m_callLinkInfos.size())
- return;
- if (!m_vm->canUseJIT())
- return;
- RepatchBuffer repatchBuffer(this);
- for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
- if (!m_callLinkInfos[i].isLinked())
- continue;
- m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
- }
+ noticeIncomingCall(callerFrame);
+ m_incomingCalls.push(incoming);
}
-void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
{
noticeIncomingCall(callerFrame);
- m_incomingCalls.push(incoming);
+ m_incomingPolymorphicCalls.push(incoming);
}
#endif // ENABLE(JIT)
void CodeBlock::unlinkIncomingCalls()
{
-#if ENABLE(LLINT)
while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
m_incomingLLIntCalls.begin()->unlink();
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
- if (m_incomingCalls.isEmpty())
- return;
- RepatchBuffer repatchBuffer(this);
while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
+ m_incomingCalls.begin()->unlink(*vm());
+ while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+ m_incomingPolymorphicCalls.begin()->unlink(*vm());
#endif // ENABLE(JIT)
}
-#if ENABLE(LLINT)
void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
{
noticeIncomingCall(callerFrame);
m_incomingLLIntCalls.push(incoming);
}
-#endif // ENABLE(LLINT)
-void CodeBlock::clearEvalCache()
+CodeBlock* CodeBlock::newReplacement()
{
- if (!!m_alternative)
- m_alternative->clearEvalCache();
- if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
- otherBlock->clearEvalCache();
- if (!m_rareData)
- return;
- m_rareData->m_evalCodeCache.clear();
+ return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
}
-void CodeBlock::install()
+#if ENABLE(JIT)
+CodeBlock* CodeBlock::replacement()
{
- ownerExecutable()->installCode(this);
-}
+ const ClassInfo* classInfo = this->classInfo(*vm());
-PassRefPtr<CodeBlock> CodeBlock::newReplacement()
-{
- return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
-}
+ if (classInfo == FunctionCodeBlock::info())
+ return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
-const SlowArgument* CodeBlock::machineSlowArguments()
-{
- if (!JITCode::isOptimizingJIT(jitType()))
- return symbolTable()->slowArguments();
-
-#if ENABLE(DFG_JIT)
- return jitCode()->dfgCommon()->slowArguments.get();
-#else // ENABLE(DFG_JIT)
- return 0;
-#endif // ENABLE(DFG_JIT)
-}
+ if (classInfo == EvalCodeBlock::info())
+ return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-#if ENABLE(JIT)
-CodeBlock* ProgramCodeBlock::replacement()
-{
- return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ProgramCodeBlock::info())
+ return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* EvalCodeBlock::replacement()
-{
- return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
-}
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
-CodeBlock* FunctionCodeBlock::replacement()
-{
- return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
}
-DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
+DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
{
- return DFG::programCapabilityLevel(this);
-}
+ const ClassInfo* classInfo = this->classInfo(*vm());
-DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
-{
- return DFG::evalCapabilityLevel(this);
+ if (classInfo == FunctionCodeBlock::info()) {
+ if (m_isConstructor)
+ return DFG::functionForConstructCapabilityLevel(this);
+ return DFG::functionForCallCapabilityLevel(this);
+ }
+
+ if (classInfo == EvalCodeBlock::info())
+ return DFG::evalCapabilityLevel(this);
+
+ if (classInfo == ProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+ if (classInfo == ModuleProgramCodeBlock::info())
+ return DFG::programCapabilityLevel(this);
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return DFG::CannotCompile;
}
-DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
+#endif // ENABLE(JIT)
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
{
- if (m_isConstructor)
- return DFG::functionForConstructCapabilityLevel(this);
- return DFG::functionForCallCapabilityLevel(this);
-}
+#if !ENABLE(DFG_JIT)
+ UNUSED_PARAM(mode);
+ UNUSED_PARAM(detail);
#endif
+
+ CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
-void CodeBlock::jettison(ReoptimizationMode mode)
-{
+ RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+
#if ENABLE(DFG_JIT)
- if (DFG::shouldShowDisassembly()) {
+ if (DFG::shouldDumpDisassembly()) {
dataLog("Jettisoning ", *this);
if (mode == CountReoptimization)
dataLog(" and counting reoptimization");
+ dataLog(" due to ", reason);
+ if (detail)
+ dataLog(", ", *detail);
dataLog(".\n");
}
- DeferGCForAWhile deferGC(*m_heap);
- RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+ if (reason == Profiler::JettisonDueToWeakReference) {
+ if (DFG::shouldDumpDisassembly()) {
+ dataLog(*this, " will be jettisoned because of the following dead references:\n");
+ DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+ for (auto& transition : dfgCommon->transitions) {
+ JSCell* origin = transition.m_codeOrigin.get();
+ JSCell* from = transition.m_from.get();
+ JSCell* to = transition.m_to.get();
+ if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+ continue;
+ dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+ }
+ for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+ JSCell* weak = dfgCommon->weakReferences[i].get();
+ if (Heap::isMarked(weak))
+ continue;
+ dataLog(" Weak reference ", RawPointer(weak), ".\n");
+ }
+ }
+ }
+#endif // ENABLE(DFG_JIT)
+
+ DeferGCForAWhile deferGC(*heap());
// We want to accomplish two things here:
// 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
// we should OSR exit at the top of the next bytecode instruction after the return.
// 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
-
- // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
- // whether the invalidation has already happened.
- if (!jitCode()->dfgCommon()->invalidate()) {
- // Nothing to do since we've already been invalidated. That means that we cannot be
- // the optimized replacement.
- RELEASE_ASSERT(this != replacement());
- return;
+
+#if ENABLE(DFG_JIT)
+ if (reason != Profiler::JettisonDueToOldAge) {
+ if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+ compilation->setJettisonReason(reason, detail);
+
+ // This accomplishes (1), and does its own book-keeping about whether it has already happened.
+ if (!jitCode()->dfgCommon()->invalidate()) {
+ // We've already been invalidated.
+ RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
+ return;
+ }
}
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did invalidate ", *this, "\n");
// Count the reoptimization if that's what the user wanted.
@@ -2788,24 +3429,35 @@ void CodeBlock::jettison(ReoptimizationMode mode)
// FIXME: Maybe this should call alternative().
// https://bugs.webkit.org/show_bug.cgi?id=123677
baselineAlternative()->countReoptimization();
- if (DFG::shouldShowDisassembly())
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did count reoptimization for ", *this, "\n");
}
- // Now take care of the entrypoint.
if (this != replacement()) {
// This means that we were never the entrypoint. This can happen for OSR entry code
// blocks.
return;
}
- alternative()->optimizeAfterWarmUp();
- tallyFrequentExitSites();
- alternative()->install();
- if (DFG::shouldShowDisassembly())
+
+ if (alternative())
+ alternative()->optimizeAfterWarmUp();
+
+ if (reason != Profiler::JettisonDueToOldAge)
+ tallyFrequentExitSites();
+#endif // ENABLE(DFG_JIT)
+
+ // Jettison can happen during GC. We don't want to install code to a dead executable
+ // because that would add a dead object to the remembered set.
+ if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
+ return;
+
+ // This accomplishes (2).
+ ownerScriptExecutable()->installCode(
+ m_globalObject->vm(), alternative(), codeType(), specializationKind());
+
+#if ENABLE(DFG_JIT)
+ if (DFG::shouldDumpDisassembly())
dataLog(" Did install baseline version of ", *this, "\n");
-#else // ENABLE(DFG_JIT)
- UNUSED_PARAM(mode);
- UNREACHABLE_FOR_PLATFORM();
#endif // ENABLE(DFG_JIT)
}
@@ -2813,28 +3465,82 @@ JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
return globalObject();
- return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
+ return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
}
+class RecursionCheckFunctor {
+public:
+ RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+ : m_startCallFrame(startCallFrame)
+ , m_codeBlock(codeBlock)
+ , m_depthToCheck(depthToCheck)
+ , m_foundStartCallFrame(false)
+ , m_didRecurse(false)
+ { }
+
+ StackVisitor::Status operator()(StackVisitor& visitor) const
+ {
+ CallFrame* currentCallFrame = visitor->callFrame();
+
+ if (currentCallFrame == m_startCallFrame)
+ m_foundStartCallFrame = true;
+
+ if (m_foundStartCallFrame) {
+ if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+ m_didRecurse = true;
+ return StackVisitor::Done;
+ }
+
+ if (!m_depthToCheck--)
+ return StackVisitor::Done;
+ }
+
+ return StackVisitor::Continue;
+ }
+
+ bool didRecurse() const { return m_didRecurse; }
+
+private:
+ CallFrame* m_startCallFrame;
+ CodeBlock* m_codeBlock;
+ mutable unsigned m_depthToCheck;
+ mutable bool m_foundStartCallFrame;
+ mutable bool m_didRecurse;
+};
+
void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
{
CodeBlock* callerCodeBlock = callerFrame->codeBlock();
if (Options::verboseCallLink())
- dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
+ dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
+#if ENABLE(DFG_JIT)
if (!m_shouldAlwaysBeInlined)
return;
+
+ if (!callerCodeBlock) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is native.\n");
+ return;
+ }
-#if ENABLE(DFG_JIT)
if (!hasBaselineJITProfiling())
return;
if (!DFG::mightInlineFunction(this))
return;
- if (!canInline(m_capabilityLevelState))
+ if (!canInline(capabilityLevelState()))
+ return;
+
+ if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because caller is too large.\n");
return;
+ }
if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
// If the caller is still in the interpreter, then we can't expect inlining to
@@ -2843,7 +3549,14 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// any of its callers.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is in LLInt.\n");
+ dataLog(" Clearing SABI because caller is in LLInt.\n");
+ return;
+ }
+
+ if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
+ m_shouldAlwaysBeInlined = false;
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI bcause caller was already optimized.\n");
return;
}
@@ -2853,40 +3566,72 @@ void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
// delay eval optimization by a *lot*.
m_shouldAlwaysBeInlined = false;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because caller is not a function.\n");
+ dataLog(" Clearing SABI because caller is not a function.\n");
return;
}
-
- ExecState* frame = callerFrame;
- for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
- if (frame->isVMEntrySentinel())
- break;
- if (frame->codeBlock() == this) {
- // Recursive calls won't be inlined.
- if (Options::verboseCallLink())
- dataLog(" Marking SABI because recursion was detected.\n");
- m_shouldAlwaysBeInlined = false;
- return;
- }
+
+ // Recursive calls won't be inlined.
+ RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
+ vm()->topCallFrame->iterate(functor);
+
+ if (functor.didRecurse()) {
+ if (Options::verboseCallLink())
+ dataLog(" Clearing SABI because recursion was detected.\n");
+ m_shouldAlwaysBeInlined = false;
+ return;
}
- RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
+ if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
+ dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
+ CRASH();
+ }
- if (canCompile(callerCodeBlock->m_capabilityLevelState))
+ if (canCompile(callerCodeBlock->capabilityLevelState()))
return;
if (Options::verboseCallLink())
- dataLog(" Marking SABI because the caller is not a DFG candidate.\n");
+ dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
m_shouldAlwaysBeInlined = false;
#endif
}
-#if ENABLE(JIT)
unsigned CodeBlock::reoptimizationRetryCounter() const
{
+#if ENABLE(JIT)
ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
return m_reoptimizationRetryCounter;
+#else
+ return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+ m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
+{
+ m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+
+static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
+{
+ static const unsigned cpuRegisterSize = sizeof(void*);
+ return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+}
+
+size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
+}
+
+size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
+{
+ return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
}
void CodeBlock::countReoptimization()
@@ -2899,6 +3644,11 @@ void CodeBlock::countReoptimization()
unsigned CodeBlock::numberOfDFGCompiles()
{
ASSERT(JITCode::isBaselineCode(jitType()));
+ if (Options::testTheFTL()) {
+ if (m_didFailFTLCompilation)
+ return 1000000;
+ return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+ }
return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
}
@@ -2979,13 +3729,16 @@ double CodeBlock::optimizationThresholdScalingFactor()
ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+
+ result *= codeTypeThresholdMultiplier();
+
if (Options::verboseOSR()) {
dataLog(
*this, ": instruction count is ", instructionCount,
", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
"\n");
}
- return result * codeTypeThresholdMultiplier();
+ return result;
}
static int32_t clipThreshold(double threshold)
@@ -3010,7 +3763,7 @@ int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
bool CodeBlock::checkIfOptimizationThresholdReached()
{
#if ENABLE(DFG_JIT)
- if (DFG::Worklist* worklist = m_vm->worklist.get()) {
+ if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
== DFG::Worklist::Compiled) {
optimizeNextInvocation();
@@ -3076,8 +3829,22 @@ void CodeBlock::forceOptimizationSlowPathConcurrently()
#if ENABLE(DFG_JIT)
void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
{
- RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
- RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
+ JITCode::JITType type = jitType();
+ if (type != JITCode::BaselineJIT) {
+ dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ CodeBlock* theReplacement = replacement();
+ if ((result == CompilationSuccessful) != (theReplacement != this)) {
+ dataLog(*this, ": we have result = ", result, " but ");
+ if (theReplacement == this)
+ dataLog("we are our own replacement.\n");
+ else
+ dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
switch (result) {
case CompilationSuccessful:
RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
@@ -3100,6 +3867,8 @@ void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResu
optimizeAfterWarmUp();
return;
}
+
+ dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
RELEASE_ASSERT_NOT_REACHED();
}
@@ -3141,26 +3910,74 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
}
#endif
-ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
{
- for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
- if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
- return &m_arrayProfiles[i];
+ for (auto& m_arrayProfile : m_arrayProfiles) {
+ if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
+ return &m_arrayProfile;
}
return 0;
}
-ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
{
- ArrayProfile* result = getArrayProfile(bytecodeOffset);
+ ConcurrentJSLocker locker(m_lock);
+ return getArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+{
+ m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+ return &m_arrayProfiles.last();
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
+{
+ ConcurrentJSLocker locker(m_lock);
+ return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
+{
+ ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
if (result)
return result;
- return addArrayProfile(bytecodeOffset);
+ return addArrayProfile(locker, bytecodeOffset);
}
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+ ConcurrentJSLocker locker(m_lock);
+ return getOrAddArrayProfile(locker, bytecodeOffset);
+}
+
+#if ENABLE(DFG_JIT)
+Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
+{
+ return m_jitCode->dfgCommon()->codeOrigins;
+}
+
+size_t CodeBlock::numberOfDFGIdentifiers() const
+{
+ if (!JITCode::isOptimizingJIT(jitType()))
+ return 0;
+
+ return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+}
+
+const Identifier& CodeBlock::identifier(int index) const
+{
+ size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+ if (static_cast<unsigned>(index) < unlinkedIdentifiers)
+ return m_unlinkedCode->identifier(index);
+ ASSERT(JITCode::isOptimizingJIT(jitType()));
+ return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+}
+#endif // ENABLE(DFG_JIT)
+
void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
numberOfLiveNonArgumentValueProfiles = 0;
numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
@@ -3192,7 +4009,7 @@ void CodeBlock::updateAllValueProfilePredictions()
void CodeBlock::updateAllArrayPredictions()
{
- ConcurrentJITLocker locker(m_lock);
+ ConcurrentJSLocker locker(m_lock);
for (unsigned i = m_arrayProfiles.size(); i--;)
m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
@@ -3253,12 +4070,8 @@ void CodeBlock::tallyFrequentExitSites()
switch (jitType()) {
case JITCode::DFGJIT: {
DFG::JITCode* jitCode = m_jitCode->dfg();
- for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
- DFG::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
- }
+ for (auto& exit : jitCode->osrExit)
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
break;
}
@@ -3270,9 +4083,7 @@ void CodeBlock::tallyFrequentExitSites()
FTL::JITCode* jitCode = m_jitCode->ftl();
for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
FTL::OSRExit& exit = jitCode->osrExit[i];
-
- if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
- continue;
+ exit.considerAddingAsFrequentExitSite(profiledBlock);
}
break;
}
@@ -3308,21 +4119,14 @@ void CodeBlock::dumpValueProfiles()
RareCaseProfile* profile = rareCaseProfile(i);
dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
- dataLog("SpecialFastCaseProfile for ", *this, ":\n");
- for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
- RareCaseProfile* profile = specialFastCaseProfile(i);
- dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
- }
}
#endif // ENABLE(VERBOSE_VALUE_PROFILE)
unsigned CodeBlock::frameRegisterCount()
{
switch (jitType()) {
-#if ENABLE(LLINT)
case JITCode::InterpreterThunk:
return LLInt::frameRegisterCountFor(this);
-#endif // ENABLE(LLINT)
#if ENABLE(JIT)
case JITCode::BaselineJIT:
@@ -3341,6 +4145,11 @@ unsigned CodeBlock::frameRegisterCount()
}
}
+int CodeBlock::stackPointerOffset()
+{
+ return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
size_t CodeBlock::predictedMachineCodeSize()
{
// This will be called from CodeBlock::CodeBlock before either m_vm or the
@@ -3349,12 +4158,12 @@ size_t CodeBlock::predictedMachineCodeSize()
if (!m_vm)
return 0;
- if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+ if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
return 0; // It's as good of a prediction as we'll get.
// Be conservative: return a size that will be an overestimation 84% of the time.
- double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
- m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
+ double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
// Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
// here is OK, since this whole method is just a heuristic.
@@ -3400,72 +4209,35 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID)
String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
{
- ConcurrentJITLocker locker(symbolTable()->m_lock);
- SymbolTable::Map::iterator end = symbolTable()->end(locker);
- for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
- if (ptr->value.getIndex() == virtualRegister.offset()) {
- // FIXME: This won't work from the compilation thread.
- // https://bugs.webkit.org/show_bug.cgi?id=115300
- return String(ptr->key);
+ for (auto& constantRegister : m_constantRegisters) {
+ if (constantRegister.get().isEmpty())
+ continue;
+ if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
+ ConcurrentJSLocker locker(symbolTable->m_lock);
+ auto end = symbolTable->end(locker);
+ for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
+ if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
+ // FIXME: This won't work from the compilation thread.
+ // https://bugs.webkit.org/show_bug.cgi?id=115300
+ return ptr->key.get();
+ }
+ }
}
}
- if (needsActivation() && virtualRegister == activationRegister())
- return ASCIILiteral("activation");
if (virtualRegister == thisRegister())
return ASCIILiteral("this");
- if (usesArguments()) {
- if (virtualRegister == argumentsRegister())
- return ASCIILiteral("arguments");
- if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
- return ASCIILiteral("real arguments");
- }
if (virtualRegister.isArgument())
- return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
+ return String::format("arguments[%3d]", virtualRegister.toArgument());
return "";
}
-namespace {
-
-struct VerifyCapturedDef {
- void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
- {
- unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
-
- if (codeBlock->isConstantRegisterIndex(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- switch (opcodeID) {
- case op_enter:
- case op_captured_mov:
- case op_init_lazy_reg:
- case op_create_arguments:
- case op_new_captured_func:
- return;
- default:
- break;
- }
-
- VirtualRegister virtualReg(operand);
- if (!virtualReg.isLocal())
- return;
-
- if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
- codeBlock->beginValidationDidFail();
- dataLog(" At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
- codeBlock->endValidationDidFail();
- return;
- }
-
- return;
- }
-};
-
-} // anonymous namespace
+ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+{
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset].u.opcode);
+ unsigned length = opcodeLength(opcodeID);
+ return instructions()[bytecodeOffset + length - 1].u.profile;
+}
void CodeBlock::validate()
{
@@ -3473,7 +4245,7 @@ void CodeBlock::validate()
FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
- if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
+ if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
beginValidationDidFail();
dataLog(" Wrong number of bits in result!\n");
dataLog(" Result: ", liveAtHead, "\n");
@@ -3481,39 +4253,16 @@ void CodeBlock::validate()
endValidationDidFail();
}
- for (unsigned i = m_numCalleeRegisters; i--;) {
- bool isCaptured = false;
+ for (unsigned i = m_numCalleeLocals; i--;) {
VirtualRegister reg = virtualRegisterForLocal(i);
- if (captureCount())
- isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
-
- if (isCaptured) {
- if (!liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
- } else {
- if (liveAtHead.get(i)) {
- beginValidationDidFail();
- dataLog(" Variable loc", i, " is expected to be dead.\n");
- dataLog(" Result: ", liveAtHead, "\n");
- endValidationDidFail();
- }
+ if (liveAtHead[i]) {
+ beginValidationDidFail();
+ dataLog(" Variable ", reg, " is expected to be dead.\n");
+ dataLog(" Result: ", liveAtHead, "\n");
+ endValidationDidFail();
}
}
-
- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
- Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
- OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- VerifyCapturedDef verifyCapturedDef;
- computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
-
- bytecodeOffset += opcodeLength(opcodeID);
- }
}
void CodeBlock::beginValidationDidFail()
@@ -3535,15 +4284,293 @@ void CodeBlock::addBreakpoint(unsigned numBreakpoints)
{
m_numBreakpoints += numBreakpoints;
ASSERT(m_numBreakpoints);
- if (jitType() == JITCode::DFGJIT)
- jettison();
+ if (JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerBreakpoint);
}
void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
{
m_steppingMode = mode;
- if (mode == SteppingModeEnabled && jitType() == JITCode::DFGJIT)
- jettison();
+ if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+ jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+ m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return tryBinarySearch<RareCaseProfile, int>(
+ m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+{
+ RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
+ if (profile)
+ return profile->m_counter;
+ return 0;
}
+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
+{
+ return arithProfileForPC(instructions().begin() + bytecodeOffset);
+}
+
+ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
+{
+ auto opcodeID = vm()->interpreter->getOpcodeID(pc[0].u.opcode);
+ switch (opcodeID) {
+ case op_negate:
+ return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
+ case op_bitor:
+ case op_bitand:
+ case op_bitxor:
+ case op_add:
+ case op_mul:
+ case op_sub:
+ case op_div:
+ return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
+ default:
+ break;
+ }
+
+ return nullptr;
+}
+
+bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
+{
+ if (!hasBaselineJITProfiling())
+ return false;
+ ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
+ if (!profile)
+ return false;
+ return profile->tookSpecialFastPath();
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+ DFG::CapabilityLevel result = computeCapabilityLevel();
+ m_capabilityLevelState = result;
+ return result;
+}
+#endif
+
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
+{
+ if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
+ return;
+ const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+ for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
+ // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
+ // the next op_profile_control_flow will give us the text range of a single basic block.
+ size_t startIdx = bytecodeOffsets[i];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
+ int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+ int basicBlockEndOffset;
+ if (i + 1 < offsetsLength) {
+ size_t endIdx = bytecodeOffsets[i + 1];
+ RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
+ basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+ } else {
+ basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+ basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
+ }
+
+ // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
+ // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
+ // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
+ // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
+ // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
+ // program. The condition:
+ // (basicBlockEndOffset < basicBlockStartOffset)
+ // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
+ // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
+ // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
+ // internal data structure, so if any of them execute, it will record the same textual basic block in the
+ // JavaScript program as executing.
+ // At the bytecode level, this situation looks like:
+ // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
+ // ...
+ // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
+ // ...
+ // m: op_profile_control_flow
+ if (basicBlockEndOffset < basicBlockStartOffset) {
+ RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
+ instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+ continue;
+ }
+
+ BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+
+ // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
+ // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
+ // This is necessary because in the original source text of a JavaScript program,
+ // function literals form new basic blocks boundaries, but they aren't represented
+ // inside the CodeBlock's instruction stream.
+ auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
+ const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
+ int functionStart = executable->typeProfilingStartOffset();
+ int functionEnd = executable->typeProfilingEndOffset();
+ if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
+ basicBlockLocation->insertGap(functionStart, functionEnd);
+ };
+
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
+ insertFunctionGaps(executable);
+ for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
+ insertFunctionGaps(executable);
+
+ instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+ }
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map)
+{
+ m_pcToCodeOriginMap = WTFMove(map);
+}
+
+std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
+{
+ if (m_pcToCodeOriginMap) {
+ if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
+ return codeOrigin;
+ }
+
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ if (stub->containsPC(pc))
+ return std::optional<CodeOrigin>(stub->codeOrigin);
+ }
+
+ if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
+ return codeOrigin;
+
+ return std::nullopt;
+}
+#endif // ENABLE(JIT)
+
+std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+{
+ std::optional<unsigned> bytecodeOffset;
+ JITCode::JITType jitType = this->jitType();
+ if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+#if USE(JSVALUE64)
+ bytecodeOffset = callSiteIndex.bits();
+#else
+ Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
+ bytecodeOffset = instruction - instructions().begin();
+#endif
+ } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
+ CodeOrigin origin = codeOrigin(callSiteIndex);
+ bytecodeOffset = origin.bytecodeIndex;
+#else
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+ return bytecodeOffset;
+}
+
+int32_t CodeBlock::thresholdForJIT(int32_t threshold)
+{
+ switch (unlinkedCodeBlock()->didOptimize()) {
+ case MixedTriState:
+ return threshold;
+ case FalseTriState:
+ return threshold * 4;
+ case TrueTriState:
+ return threshold / 2;
+ }
+ ASSERT_NOT_REACHED();
+ return threshold;
+}
+
+void CodeBlock::jitAfterWarmUp()
+{
+ m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
+}
+
+void CodeBlock::jitSoon()
+{
+ m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
+}
+
+void CodeBlock::dumpMathICStats()
+{
+#if ENABLE(MATH_IC_STATS)
+ double numAdds = 0.0;
+ double totalAddSize = 0.0;
+ double numMuls = 0.0;
+ double totalMulSize = 0.0;
+ double numNegs = 0.0;
+ double totalNegSize = 0.0;
+ double numSubs = 0.0;
+ double totalSubSize = 0.0;
+
+ auto countICs = [&] (CodeBlock* codeBlock) {
+ for (JITAddIC* addIC : codeBlock->m_addICs) {
+ numAdds++;
+ totalAddSize += addIC->codeSize();
+ }
+
+ for (JITMulIC* mulIC : codeBlock->m_mulICs) {
+ numMuls++;
+ totalMulSize += mulIC->codeSize();
+ }
+
+ for (JITNegIC* negIC : codeBlock->m_negICs) {
+ numNegs++;
+ totalNegSize += negIC->codeSize();
+ }
+
+ for (JITSubIC* subIC : codeBlock->m_subICs) {
+ numSubs++;
+ totalSubSize += subIC->codeSize();
+ }
+
+ return false;
+ };
+ heap()->forEachCodeBlock(countICs);
+
+ dataLog("Num Adds: ", numAdds, "\n");
+ dataLog("Total Add size in bytes: ", totalAddSize, "\n");
+ dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
+ dataLog("\n");
+ dataLog("Num Muls: ", numMuls, "\n");
+ dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
+ dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
+ dataLog("\n");
+ dataLog("Num Negs: ", numNegs, "\n");
+ dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
+ dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
+ dataLog("\n");
+ dataLog("Num Subs: ", numSubs, "\n");
+ dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
+ dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
+
+ dataLog("-----------------------\n");
+#endif
+}
+
+BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
+{
+ std::unique_ptr<BytecodeLivenessAnalysis> analysis = std::make_unique<BytecodeLivenessAnalysis>(this);
+ {
+ ConcurrentJSLocker locker(m_lock);
+ if (!m_livenessAnalysis)
+ m_livenessAnalysis = WTFMove(analysis);
+ return *m_livenessAnalysis;
+ }
+}
+
+
} // namespace JSC