summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/bytecode/GetByIdStatus.cpp')
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp563
1 files changed, 358 insertions, 205 deletions
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index fbb3da1a5..1537cd9b1 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,279 +27,432 @@
#include "GetByIdStatus.h"
#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "GetterSetterAccessCase.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSCInlines.h"
#include "JSScope.h"
#include "LLIntData.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+#include <wtf/ListDump.h>
namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+ // Attempt to merge this variant with an already existing variant.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].attemptToMerge(variant))
+ return true;
+ }
+
+ // Make sure there is no overlap. We should have pruned out opportunities for
+ // overlap but it's possible that an inline cache got into a weird state. We are
+ // defensive and bail if we detect crazy.
+ for (unsigned i = 0; i < m_variants.size(); ++i) {
+ if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+ return false;
+ }
+
+ m_variants.append(variant);
+ return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+ || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
-GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
-#if ENABLE(LLINT)
- Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ VM& vm = *profiledBlock->vm();
- if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length))
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ Opcode opcode = instruction[0].u.opcode;
+
+ ASSERT(opcode == LLInt::getOpcode(op_get_array_length) || opcode == LLInt::getOpcode(op_try_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_proto_load) || opcode == LLInt::getOpcode(op_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_unset));
+
+ // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load.
+ // https://bugs.webkit.org/show_bug.cgi?id=158039
+ if (opcode != LLInt::getOpcode(op_get_by_id))
return GetByIdStatus(NoInformation, false);
- Structure* structure = instruction[4].u.structure.get();
- if (!structure)
+ StructureID structureID = instruction[4].u.structureID;
+ if (!structureID)
return GetByIdStatus(NoInformation, false);
+ Structure* structure = vm.heap.structureIDTable().get(structureID);
+
if (structure->takesSlowPathInDFGForImpureProperty())
return GetByIdStatus(NoInformation, false);
- unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
if (!isValidOffset(offset))
return GetByIdStatus(NoInformation, false);
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(NoInformation, false);
- return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue);
-#else
- return GetByIdStatus(NoInformation, false);
-#endif
+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
}
-void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
{
-#if ENABLE(JIT)
- // Validate the chain. If the chain is invalid, then currently the best thing
- // we can do is to assume that TakesSlow is true. In the future, it might be
- // worth exploring reifying the structure chain from the structure we've got
- // instead of using the one from the cache, since that will do the right things
- // if the structure chain has changed. But that may be harder, because we may
- // then end up having a different type of access altogether. And it currently
- // does not appear to be worth it to do so -- effectively, the heuristic we
- // have now is that if the structure chain has changed between when it was
- // cached on in the baseline JIT and when the DFG tried to inline the access,
- // then we fall back on a polymorphic access.
- if (!result.m_chain->isStillValid())
- return;
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
- if (result.m_chain->head()->takesSlowPathInDFGForImpureProperty())
- return;
- size_t chainSize = result.m_chain->size();
- for (size_t i = 0; i < chainSize; i++) {
- if (result.m_chain->at(i)->takesSlowPathInDFGForImpureProperty())
- return;
- }
+ GetByIdStatus result;
- JSObject* currentObject = result.m_chain->terminalPrototype();
- Structure* currentStructure = result.m_chain->last();
+#if ENABLE(DFG_JIT)
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
- ASSERT_UNUSED(currentObject, currentObject);
-
- unsigned attributesIgnored;
- JSCell* specificValue;
-
- result.m_offset = currentStructure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (currentStructure->isDictionary())
- specificValue = 0;
- if (!isValidOffset(result.m_offset))
- return;
-
- result.m_structureSet.add(result.m_chain->head());
- result.m_specificValue = JSValue(specificValue);
+ if (!result.takesSlowPath()
+ && hasExitSite(locker, profiledBlock, bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
#else
- UNUSED_PARAM(result);
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(uid);
- UNREACHABLE_FOR_PLATFORM();
+ UNUSED_PARAM(map);
#endif
+
+ if (!result)
+ return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+
+ return result;
}
-GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
+#if ENABLE(DFG_JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
{
- ConcurrentJITLocker locker(profiledBlock->m_lock);
-
- UNUSED_PARAM(profiledBlock);
- UNUSED_PARAM(bytecodeIndex);
- UNUSED_PARAM(uid);
+ GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ locker, profiledBlock, stubInfo, uid,
+ CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
+
+ if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
+ return result;
+}
+#endif // ENABLE(DFG_JIT)
+
#if ENABLE(JIT)
- StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
- if (!stubInfo || !stubInfo->seen)
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
-
- if (stubInfo->resetByGC)
- return GetByIdStatus(TakesSlowPath, true);
-
- PolymorphicAccessStructureList* list;
- int listSize;
- switch (stubInfo->accessType) {
- case access_get_by_id_self_list:
- list = stubInfo->u.getByIdSelfList.structureList;
- listSize = stubInfo->u.getByIdSelfList.listSize;
- break;
- case access_get_by_id_proto_list:
- list = stubInfo->u.getByIdProtoList.structureList;
- listSize = stubInfo->u.getByIdProtoList.listSize;
- break;
- default:
- list = 0;
- listSize = 0;
- break;
- }
- for (int i = 0; i < listSize; ++i) {
- if (!list->list[i].isDirect)
- return GetByIdStatus(MakesCalls, true);
+GetByIdStatus::GetByIdStatus(const ModuleNamespaceAccessCase& accessCase)
+ : m_state(ModuleNamespace)
+ , m_wasSeenInJIT(true)
+ , m_moduleNamespaceObject(accessCase.moduleNamespaceObject())
+ , m_moduleEnvironment(accessCase.moduleEnvironment())
+ , m_scopeOffset(accessCase.scopeOffset())
+{
+}
+
+GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+ const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+ CallLinkStatus::ExitSiteData callExitSiteData)
+{
+ if (!stubInfo || !stubInfo->everConsidered)
+ return GetByIdStatus(NoInformation);
+
+ PolymorphicAccess* list = 0;
+ State slowPathState = TakesSlowPath;
+ if (stubInfo->cacheType == CacheType::Stub) {
+ list = stubInfo->u.stub;
+ for (unsigned i = 0; i < list->size(); ++i) {
+ const AccessCase& access = list->at(i);
+ if (access.doesCalls())
+ slowPathState = MakesCalls;
+ }
}
- // Next check if it takes slow case, in which case we want to be kind of careful.
- if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, true);
+ if (stubInfo->tookSlowPath)
+ return GetByIdStatus(slowPathState);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
+ result.m_state = Simple;
result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
- switch (stubInfo->accessType) {
- case access_unset:
- return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+ switch (stubInfo->cacheType) {
+ case CacheType::Unset:
+ return GetByIdStatus(NoInformation);
- case access_get_by_id_self: {
- Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get();
+ case CacheType::GetByIdSelf: {
+ Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
- unsigned attributesIgnored;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ return GetByIdStatus(slowPathState, true);
+ unsigned attributes;
+ GetByIdVariant variant;
+ variant.m_offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(variant.m_offset))
+ return GetByIdStatus(slowPathState, true);
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(slowPathState, true);
- if (isValidOffset(result.m_offset)) {
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
- }
-
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
+ variant.m_structureSet.add(structure);
+ bool didAppend = result.appendVariant(variant);
+ ASSERT_UNUSED(didAppend, didAppend);
+ return result;
}
- case access_get_by_id_self_list: {
- for (int i = 0; i < listSize; ++i) {
- ASSERT(list->list[i].isDirect);
-
- Structure* structure = list->list[i].base.get();
- if (structure->takesSlowPathInDFGForImpureProperty())
- return GetByIdStatus(TakesSlowPath, true);
+ case CacheType::Stub: {
+ if (list->size() == 1) {
+ const AccessCase& access = list->at(0);
+ switch (access.type()) {
+ case AccessCase::ModuleNamespaceLoad:
+ return GetByIdStatus(access.as<ModuleNamespaceAccessCase>());
+ default:
+ break;
+ }
+ }
- if (result.m_structureSet.contains(structure))
- continue;
-
- unsigned attributesIgnored;
- JSCell* specificValue;
- PropertyOffset myOffset = structure->getConcurrently(
- *profiledBlock->vm(), uid, attributesIgnored, specificValue);
- if (structure->isDictionary())
- specificValue = 0;
+ for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+ const AccessCase& access = list->at(listIndex);
+ if (access.viaProxy())
+ return GetByIdStatus(slowPathState, true);
- if (!isValidOffset(myOffset)) {
- result.m_offset = invalidOffset;
- break;
+ Structure* structure = access.structure();
+ if (!structure) {
+ // The null structure cases arise due to array.length and string.length. We have no way
+ // of creating a GetByIdVariant for those, and we don't really have to since the DFG
+ // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
+ // shouldn't have to use value profiling to discover something that the AccessCase
+ // could have told us. But, it works well enough. So, our only concern here is to not
+ // crash on null structure.
+ return GetByIdStatus(slowPathState, true);
}
-
- if (!i) {
- result.m_offset = myOffset;
- result.m_specificValue = JSValue(specificValue);
- } else if (result.m_offset != myOffset) {
- result.m_offset = invalidOffset;
- break;
- } else if (result.m_specificValue != JSValue(specificValue))
- result.m_specificValue = JSValue();
- result.m_structureSet.add(structure);
+ ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+ structure, access.conditionSet(), uid);
+
+ switch (complexGetStatus.kind()) {
+ case ComplexGetStatus::ShouldSkip:
+ continue;
+
+ case ComplexGetStatus::TakesSlowPath:
+ return GetByIdStatus(slowPathState, true);
+
+ case ComplexGetStatus::Inlineable: {
+ std::unique_ptr<CallLinkStatus> callLinkStatus;
+ JSFunction* intrinsicFunction = nullptr;
+ DOMJIT::GetterSetter* domJIT = nullptr;
+
+ switch (access.type()) {
+ case AccessCase::Load:
+ case AccessCase::GetGetter:
+ case AccessCase::Miss: {
+ break;
+ }
+ case AccessCase::IntrinsicGetter: {
+ intrinsicFunction = access.as<IntrinsicGetterAccessCase>().intrinsicFunction();
+ break;
+ }
+ case AccessCase::Getter: {
+ callLinkStatus = std::make_unique<CallLinkStatus>();
+ if (CallLinkInfo* callLinkInfo = access.as<GetterSetterAccessCase>().callLinkInfo()) {
+ *callLinkStatus = CallLinkStatus::computeFor(
+ locker, profiledBlock, *callLinkInfo, callExitSiteData);
+ }
+ break;
+ }
+ case AccessCase::CustomAccessorGetter: {
+ domJIT = access.as<GetterSetterAccessCase>().domJIT();
+ if (!domJIT)
+ return GetByIdStatus(slowPathState, true);
+ result.m_state = Custom;
+ break;
+ }
+ default: {
+ // FIXME: It would be totally sweet to support more of these at some point in the
+ // future. https://bugs.webkit.org/show_bug.cgi?id=133052
+ return GetByIdStatus(slowPathState, true);
+ } }
+
+ ASSERT((AccessCase::Miss == access.type()) == (access.offset() == invalidOffset));
+ GetByIdVariant variant(
+ StructureSet(structure), complexGetStatus.offset(),
+ complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
+ intrinsicFunction,
+ domJIT);
+
+ if (!result.appendVariant(variant))
+ return GetByIdStatus(slowPathState, true);
+
+ if (domJIT) {
+ // Give up when cutom accesses are not merged into one.
+ if (result.numVariants() != 1)
+ return GetByIdStatus(slowPathState, true);
+ } else {
+ // Give up when custom access and simple access are mixed.
+ if (result.m_state == Custom)
+ return GetByIdStatus(slowPathState, true);
+ }
+ break;
+ } }
}
-
- if (isValidOffset(result.m_offset))
- ASSERT(result.m_structureSet.size());
- break;
- }
- case access_get_by_id_proto: {
- if (!stubInfo->u.getByIdProto.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdProto.baseObjectStructure.get(),
- stubInfo->u.getByIdProto.prototypeStructure.get()));
- computeForChain(result, profiledBlock, uid);
- break;
- }
-
- case access_get_by_id_chain: {
- if (!stubInfo->u.getByIdChain.isDirect)
- return GetByIdStatus(MakesCalls, true);
- result.m_chain = adoptRef(new IntendedStructureChain(
- profiledBlock,
- stubInfo->u.getByIdChain.baseObjectStructure.get(),
- stubInfo->u.getByIdChain.chain.get(),
- stubInfo->u.getByIdChain.count));
- computeForChain(result, profiledBlock, uid);
- break;
+ return result;
}
default:
- ASSERT(!isValidOffset(result.m_offset));
- break;
+ return GetByIdStatus(slowPathState, true);
}
- if (!isValidOffset(result.m_offset)) {
- result.m_state = TakesSlowPath;
- result.m_structureSet.clear();
- result.m_chain.clear();
- result.m_specificValue = JSValue();
- } else
- result.m_state = Simple;
-
- return result;
-#else // ENABLE(JIT)
- UNUSED_PARAM(map);
- return GetByIdStatus(NoInformation, false);
+ RELEASE_ASSERT_NOT_REACHED();
+ return GetByIdStatus();
+}
#endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+ CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+ StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+ if (dfgBlock) {
+ CallLinkStatus::ExitSiteData exitSiteData;
+ {
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+ exitSiteData = CallLinkStatus::computeExitSiteData(
+ locker, profiledBlock, codeOrigin.bytecodeIndex);
+ }
+
+ GetByIdStatus result;
+ {
+ ConcurrentJSLocker locker(dfgBlock->m_lock);
+ result = computeForStubInfoWithoutExitSiteFeedback(
+ locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+ }
+
+ if (result.takesSlowPath())
+ return result;
+
+ {
+ ConcurrentJSLocker locker(profiledBlock->m_lock);
+ if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+ return GetByIdStatus(TakesSlowPath, true);
+ }
+
+ if (result.isSet())
+ return result;
+ }
+#else
+ UNUSED_PARAM(dfgBlock);
+ UNUSED_PARAM(dfgMap);
+#endif
+
+ return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
}
-GetByIdStatus GetByIdStatus::computeFor(VM& vm, Structure* structure, StringImpl* uid)
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
{
// For now we only handle the super simple self access case. We could handle the
// prototype case in the future.
- if (!structure)
- return GetByIdStatus(TakesSlowPath);
+ if (set.isEmpty())
+ return GetByIdStatus();
- if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
- return GetByIdStatus(TakesSlowPath);
-
- if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ if (parseIndex(*uid))
return GetByIdStatus(TakesSlowPath);
- if (!structure->propertyAccessesAreCacheable())
- return GetByIdStatus(TakesSlowPath);
-
GetByIdStatus result;
- result.m_wasSeenInJIT = false; // To my knowledge nobody that uses computeFor(VM&, Structure*, StringImpl*) reads this field, but I might as well be honest: no, it wasn't seen in the JIT, since I computed it statically.
- unsigned attributes;
- JSCell* specificValue;
- result.m_offset = structure->getConcurrently(vm, uid, attributes, specificValue);
- if (!isValidOffset(result.m_offset))
- return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
- if (attributes & Accessor)
- return GetByIdStatus(MakesCalls);
- if (structure->isDictionary())
- specificValue = 0;
- result.m_structureSet.add(structure);
- result.m_specificValue = JSValue(specificValue);
result.m_state = Simple;
+ result.m_wasSeenInJIT = false;
+ for (unsigned i = 0; i < set.size(); ++i) {
+ Structure* structure = set[i];
+ if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GetByIdStatus(TakesSlowPath);
+
+ unsigned attributes;
+ PropertyOffset offset = structure->getConcurrently(uid, attributes);
+ if (!isValidOffset(offset))
+ return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+ if (attributes & Accessor)
+ return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
+ if (attributes & CustomAccessor)
+ return GetByIdStatus(TakesSlowPath);
+
+ if (!result.appendVariant(GetByIdVariant(structure, offset)))
+ return GetByIdStatus(TakesSlowPath);
+ }
+
return result;
}
+bool GetByIdStatus::makesCalls() const
+{
+ switch (m_state) {
+ case NoInformation:
+ case TakesSlowPath:
+ case Custom:
+ case ModuleNamespace:
+ return false;
+ case Simple:
+ for (unsigned i = m_variants.size(); i--;) {
+ if (m_variants[i].callLinkStatus())
+ return true;
+ }
+ return false;
+ case MakesCalls:
+ return true;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+
+ return false;
+}
+
+void GetByIdStatus::filter(const StructureSet& set)
+{
+ if (m_state != Simple)
+ return;
+
+ // FIXME: We could also filter the variants themselves.
+
+ m_variants.removeAllMatching(
+ [&] (GetByIdVariant& variant) -> bool {
+ return !variant.structureSet().overlaps(set);
+ });
+
+ if (m_variants.isEmpty())
+ m_state = NoInformation;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+ out.print("(");
+ switch (m_state) {
+ case NoInformation:
+ out.print("NoInformation");
+ break;
+ case Simple:
+ out.print("Simple");
+ break;
+ case Custom:
+ out.print("Custom");
+ break;
+ case ModuleNamespace:
+ out.print("ModuleNamespace");
+ break;
+ case TakesSlowPath:
+ out.print("TakesSlowPath");
+ break;
+ case MakesCalls:
+ out.print("MakesCalls");
+ break;
+ }
+ out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
+}
+
} // namespace JSC