diff options
Diffstat (limited to 'Source/JavaScriptCore/bytecode/PutByIdStatus.cpp')
-rw-r--r-- | Source/JavaScriptCore/bytecode/PutByIdStatus.cpp | 461 |
1 files changed, 324 insertions, 137 deletions
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp index 17cf70897..fdadf7022 100644 --- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,206 +27,393 @@ #include "PutByIdStatus.h" #include "CodeBlock.h" +#include "ComplexGetStatus.h" +#include "GetterSetterAccessCase.h" #include "LLIntData.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "JSCInlines.h" +#include "PolymorphicAccess.h" #include "Structure.h" #include "StructureChain.h" +#include "StructureStubInfo.h" +#include <wtf/ListDump.h> namespace JSC { -PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid) +bool PutByIdStatus::appendVariant(const PutByIdVariant& variant) +{ + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].oldStructure().overlaps(variant.oldStructure())) + return false; + } + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool PutByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); + +} +#endif + +PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); -#if ENABLE(LLINT) + + VM& vm = *profiledBlock->vm(); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - Structure* structure = instruction[4].u.structure.get(); - if (!structure) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + StructureID structureID = instruction[4].u.structureID; + if (!structureID) + return PutByIdStatus(NoInformation); - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_out_of_line)) { - PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid); + Structure* structure = vm.heap.structureIDTable().get(structureID); + + StructureID newStructureID = instruction[6].u.structureID; + if (!newStructureID) { + PropertyOffset offset = structure->getConcurrently(uid); if (!isValidOffset(offset)) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + return PutByIdStatus(NoInformation); - return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); + return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid)); } + + Structure* newStructure = vm.heap.structureIDTable().get(newStructureID); ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); - ASSERT(instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line) - || instruction[0].u.opcode == LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line)); - - Structure* newStructure = instruction[6].u.structure.get(); - StructureChain* chain = instruction[7].u.structureChain.get(); - ASSERT(newStructure); - ASSERT(chain); - - PropertyOffset offset = newStructure->getConcurrently(*profiledBlock->vm(), uid); + PropertyOffset offset = newStructure->getConcurrently(uid); if (!isValidOffset(offset)) - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); + return PutByIdStatus(NoInformation); - return PutByIdStatus( - SimpleTransition, structure, newStructure, - chain ? adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)) : 0, - offset); -#else - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); -#endif + ObjectPropertyConditionSet conditionSet; + if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) { + conditionSet = + generateConditionsForPropertySetterMissConcurrently( + *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(NoInformation); + } + + return PutByIdVariant::transition( + structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid)); } -PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { - ConcurrentJITLocker locker(profiledBlock->m_lock); + ConcurrentJSLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); -#if ENABLE(JIT) - if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); +#if ENABLE(DFG_JIT) + if (hasExitSite(locker, profiledBlock, bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); - if (!stubInfo || !stubInfo->seen) + PutByIdStatus result = computeForStubInfo( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); + if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); - if (stubInfo->resetByGC) - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return result; +#else // ENABLE(JIT) + UNUSED_PARAM(map); + return PutByIdStatus(NoInformation); +#endif // ENABLE(JIT) +} - switch (stubInfo->accessType) { - case access_unset: - // If the JIT saw it but didn't optimize it, then assume that this takes slow path. - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); +#if ENABLE(JIT) +PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ + return computeForStubInfo( + locker, baselineBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex)); +} + +PutByIdStatus PutByIdStatus::computeForStubInfo( + const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, + UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo || !stubInfo->everConsidered) + return PutByIdStatus(); + + if (stubInfo->tookSlowPath) + return PutByIdStatus(TakesSlowPath); + + switch (stubInfo->cacheType) { + case CacheType::Unset: + // This means that we attempted to cache but failed for some reason. + return PutByIdStatus(TakesSlowPath); - case access_put_by_id_replace: { + case CacheType::PutByIdReplace: { PropertyOffset offset = - stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently( - *profiledBlock->vm(), uid); + stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid); if (isValidOffset(offset)) { - return PutByIdStatus( - SimpleReplace, - stubInfo->u.putByIdReplace.baseObjectStructure.get(), - 0, 0, - offset); + return PutByIdVariant::replace( + stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top); } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return PutByIdStatus(TakesSlowPath); } - case access_put_by_id_transition_normal: - case access_put_by_id_transition_direct: { - ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); - PropertyOffset offset = - stubInfo->u.putByIdTransition.structure->getConcurrently( - *profiledBlock->vm(), uid); - if (isValidOffset(offset)) { - return PutByIdStatus( - SimpleTransition, - stubInfo->u.putByIdTransition.previousStructure.get(), - stubInfo->u.putByIdTransition.structure.get(), - stubInfo->u.putByIdTransition.chain ? adoptRef(new IntendedStructureChain( - profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(), - stubInfo->u.putByIdTransition.chain.get())) : 0, - offset); + case CacheType::Stub: { + PolymorphicAccess* list = stubInfo->u.stub; + + PutByIdStatus result; + result.m_state = Simple; + + State slowPathState = TakesSlowPath; + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.doesCalls()) + slowPathState = MakesCalls; } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + + for (unsigned i = 0; i < list->size(); ++i) { + const AccessCase& access = list->at(i); + if (access.viaProxy()) + return PutByIdStatus(slowPathState); + + PutByIdVariant variant; + + switch (access.type()) { + case AccessCase::Replace: { + Structure* structure = access.structure(); + PropertyOffset offset = structure->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::replace( + structure, offset, structure->inferredTypeDescriptorFor(uid)); + break; + } + + case AccessCase::Transition: { + PropertyOffset offset = + access.newStructure()->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + ObjectPropertyConditionSet conditionSet = access.conditionSet(); + if (!conditionSet.structuresEnsureValidity()) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::transition( + access.structure(), access.newStructure(), conditionSet, offset, + access.newStructure()->inferredTypeDescriptorFor(uid)); + break; + } + + case AccessCase::Setter: { + Structure* structure = access.structure(); + + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, access.conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return PutByIdStatus(slowPathState); + + case ComplexGetStatus::Inlineable: { + std::unique_ptr<CallLinkStatus> callLinkStatus = + std::make_unique<CallLinkStatus>(); + if (CallLinkInfo* callLinkInfo = access.as<GetterSetterAccessCase>().callLinkInfo()) { + *callLinkStatus = CallLinkStatus::computeFor( + locker, profiledBlock, *callLinkInfo, callExitSiteData); + } + + variant = PutByIdVariant::setter( + structure, complexGetStatus.offset(), complexGetStatus.conditionSet(), + WTFMove(callLinkStatus)); + } } + break; + } + + case AccessCase::CustomValueSetter: + case AccessCase::CustomAccessorSetter: + return PutByIdStatus(MakesCalls); + + default: + return PutByIdStatus(slowPathState); + } + + if (!result.appendVariant(variant)) + return PutByIdStatus(slowPathState); + } + + return result; } default: - // FIXME: We should handle polymorphic PutById. We probably have some interesting things - // we could do about it. - return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); + return PutByIdStatus(TakesSlowPath); } -#else // ENABLE(JIT) - UNUSED_PARAM(map); - return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); -#endif // ENABLE(JIT) } +#endif -PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, StringImpl* uid, bool isDirect) +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) { - if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) - return PutByIdStatus(TakesSlowPath); +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJSLocker locker(baselineBlock->m_lock); + if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, baselineBlock, codeOrigin.bytecodeIndex); + } + + PutByIdStatus result; + { + ConcurrentJSLocker locker(dfgBlock->m_lock); + result = computeForStubInfo( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + // We use TakesSlowPath in some cases where the stub was unset. That's weird and + // it would be better not to do that. But it means that we have to defend + // ourselves here. + if (result.isSimple()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif + + return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid); +} - if (!structure) +PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect) +{ + if (parseIndex(*uid)) return PutByIdStatus(TakesSlowPath); + + if (set.isEmpty()) + return PutByIdStatus(); - if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) - return PutByIdStatus(TakesSlowPath); + PutByIdStatus result; + result.m_state = Simple; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return PutByIdStatus(TakesSlowPath); - if (!structure->propertyAccessesAreCacheable()) - return PutByIdStatus(TakesSlowPath); + if (!structure->propertyAccessesAreCacheable()) + return PutByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (isValidOffset(offset)) { + if (attributes & CustomAccessor) + return PutByIdStatus(MakesCalls); + + if (attributes & (Accessor | ReadOnly)) + return PutByIdStatus(TakesSlowPath); + + WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset); + if (!replaceSet || replaceSet->isStillValid()) { + // When this executes, it'll create, and fire, this replacement watchpoint set. + // That means that this has probably never executed or that something fishy is + // going on. Also, we cannot create or fire the watchpoint set from the concurrent + // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy. + // So, better leave this alone and take slow path. + return PutByIdStatus(TakesSlowPath); + } + + PutByIdVariant variant = + PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid)); + if (!result.appendVariant(variant)) + return PutByIdStatus(TakesSlowPath); + continue; + } + + // Our hypothesis is that we're doing a transition. Before we prove that this is really + // true, we want to do some sanity checks. - unsigned attributes; - JSCell* specificValue; - PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue); - if (isValidOffset(offset)) { - if (attributes & (Accessor | ReadOnly)) + // Don't cache put transitions on dictionaries. + if (structure->isDictionary()) return PutByIdStatus(TakesSlowPath); - if (specificValue) { - // We need the PutById slow path to verify that we're storing the right value into - // the specialized slot. + + // If the structure corresponds to something that isn't an object, then give up, since + // we don't want to be adding properties to strings. + if (!structure->typeInfo().isObject()) return PutByIdStatus(TakesSlowPath); + + ObjectPropertyConditionSet conditionSet; + if (!isDirect) { + conditionSet = generateConditionsForPropertySetterMissConcurrently( + globalObject->vm(), globalObject, structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(TakesSlowPath); } - return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); - } - // Our hypothesis is that we're doing a transition. Before we prove that this is really - // true, we want to do some sanity checks. + // We only optimize if there is already a structure that the transition is cached to. + Structure* transition = + Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset); + if (!transition) + return PutByIdStatus(TakesSlowPath); + ASSERT(isValidOffset(offset)); - // Don't cache put transitions on dictionaries. - if (structure->isDictionary()) - return PutByIdStatus(TakesSlowPath); + bool didAppend = result.appendVariant( + PutByIdVariant::transition( + structure, transition, conditionSet, offset, + transition->inferredTypeDescriptorFor(uid))); + if (!didAppend) + return PutByIdStatus(TakesSlowPath); + } + + return result; +} - // If the structure corresponds to something that isn't an object, then give up, since - // we don't want to be adding properties to strings. - if (structure->typeInfo().type() == StringType) - return PutByIdStatus(TakesSlowPath); +bool PutByIdStatus::makesCalls() const +{ + if (m_state == MakesCalls) + return true; + + if (m_state != Simple) + return false; + + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].makesCalls()) + return true; + } - RefPtr<IntendedStructureChain> chain; - if (!isDirect) { - chain = adoptRef(new IntendedStructureChain(globalObject, structure)); + return false; +} + +void PutByIdStatus::dump(PrintStream& out) const +{ + switch (m_state) { + case NoInformation: + out.print("(NoInformation)"); + return; - // If the prototype chain has setters or read-only properties, then give up. - if (chain->mayInterceptStoreTo(vm, uid)) - return PutByIdStatus(TakesSlowPath); + case Simple: + out.print("(", listDump(m_variants), ")"); + return; - // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries) - // then give up. The dictionary case would only happen if this structure has not been - // used in an optimized put_by_id transition. And really the only reason why we would - // bail here is that I don't really feel like having the optimizing JIT go and flatten - // dictionaries if we have evidence to suggest that those objects were never used as - // prototypes in a cacheable prototype access - i.e. there's a good chance that some of - // the other checks below will fail. - if (!chain->isNormalized()) - return PutByIdStatus(TakesSlowPath); + case TakesSlowPath: + out.print("(TakesSlowPath)"); + return; + case MakesCalls: + out.print("(MakesCalls)"); + return; } - // We only optimize if there is already a structure that the transition is cached to. - // Among other things, this allows us to guard against a transition with a specific - // value. - // - // - If we're storing a value that could be specific: this would only be a problem if - // the existing transition did have a specific value already, since if it didn't, - // then we would behave "as if" we were not storing a specific value. If it did - // have a specific value, then we'll know - the fact that we pass 0 for - // specificValue will tell us. - // - // - If we're not storing a value that could be specific: again, this would only be a - // problem if the existing transition did have a specific value, which we check for - // by passing 0 for the specificValue. - Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, 0, offset); - if (!transition) - return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above. - ASSERT(!transition->transitionDidInvolveSpecificValue()); - ASSERT(isValidOffset(offset)); - - return PutByIdStatus(SimpleTransition, structure, transition, chain.release(), offset); + RELEASE_ASSERT_NOT_REACHED(); } } // namespace JSC |