diff options
author | Matheus Marchini <mmarchini@netflix.com> | 2020-03-05 10:49:19 -0800 |
---|---|---|
committer | Matheus Marchini <mmarchini@netflix.com> | 2020-03-18 16:23:22 -0700 |
commit | 2883c855e0105b51e5c8020d21458af109ffe3d4 (patch) | |
tree | 26777aad0a398e9f7755c8b65ac76827fe352a81 /deps/v8/src/compiler/backend/instruction-scheduler.cc | |
parent | 5f0af2af2a67216e00fe07ccda11e889d14abfcd (diff) | |
download | node-new-2883c855e0105b51e5c8020d21458af109ffe3d4.tar.gz |
deps: update V8 to 8.1.307.20
PR-URL: https://github.com/nodejs/node/pull/32116
Reviewed-By: Michaƫl Zasso <targos@protonmail.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de>
Diffstat (limited to 'deps/v8/src/compiler/backend/instruction-scheduler.cc')
-rw-r--r-- | deps/v8/src/compiler/backend/instruction-scheduler.cc | 68 |
1 files changed, 48 insertions, 20 deletions
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index d4920cd575..dd4bca4d81 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -5,8 +5,8 @@ #include "src/compiler/backend/instruction-scheduler.h" #include "src/base/iterator.h" +#include "src/base/optional.h" #include "src/base/utils/random-number-generator.h" -#include "src/execution/isolate.h" namespace v8 { namespace internal { @@ -50,7 +50,7 @@ InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) { DCHECK(!IsEmpty()); // Choose a random element from the ready list. auto candidate = nodes_.begin(); - std::advance(candidate, isolate()->random_number_generator()->NextInt( + std::advance(candidate, random_number_generator()->NextInt( static_cast<int>(nodes_.size()))); ScheduleGraphNode* result = *candidate; nodes_.erase(candidate); @@ -81,7 +81,12 @@ InstructionScheduler::InstructionScheduler(Zone* zone, pending_loads_(zone), last_live_in_reg_marker_(nullptr), last_deopt_or_trap_(nullptr), - operands_map_(zone) {} + operands_map_(zone) { + if (FLAG_turbo_stress_instruction_scheduling) { + random_number_generator_ = + base::Optional<base::RandomNumberGenerator>(FLAG_random_seed); + } +} void InstructionScheduler::StartBlock(RpoNumber rpo) { DCHECK(graph_.empty()); @@ -95,17 +100,11 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) { void InstructionScheduler::EndBlock(RpoNumber rpo) { if (FLAG_turbo_stress_instruction_scheduling) { - ScheduleBlock<StressSchedulerQueue>(); + Schedule<StressSchedulerQueue>(); } else { - ScheduleBlock<CriticalPathFirstQueue>(); + Schedule<CriticalPathFirstQueue>(); } sequence()->EndBlock(rpo); - graph_.clear(); - last_side_effect_instr_ = nullptr; - pending_loads_.clear(); - last_live_in_reg_marker_ = nullptr; - last_deopt_or_trap_ = nullptr; - operands_map_.clear(); } void InstructionScheduler::AddTerminator(Instruction* instr) { @@ -119,6 +118,16 @@ void InstructionScheduler::AddTerminator(Instruction* instr) { } void InstructionScheduler::AddInstruction(Instruction* instr) { + if (IsBarrier(instr)) { + if (FLAG_turbo_stress_instruction_scheduling) { + Schedule<StressSchedulerQueue>(); + } else { + Schedule<CriticalPathFirstQueue>(); + } + sequence()->AddInstruction(instr); + return; + } + ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr); // We should not have branches in the middle of a block. @@ -197,7 +206,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { } template <typename QueueType> -void InstructionScheduler::ScheduleBlock() { +void InstructionScheduler::Schedule() { QueueType ready_list(this); // Compute total latencies so that we can schedule the critical path first. @@ -231,11 +240,20 @@ void InstructionScheduler::ScheduleBlock() { cycle++; } + + // Reset own state. + graph_.clear(); + operands_map_.clear(); + pending_loads_.clear(); + last_deopt_or_trap_ = nullptr; + last_live_in_reg_marker_ = nullptr; + last_side_effect_instr_ = nullptr; } int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { switch (instr->arch_opcode()) { case kArchNop: + case kArchStackCheckOffset: case kArchFramePointer: case kArchParentFramePointer: case kArchStackSlot: // Despite its name this opcode will produce a @@ -287,22 +305,32 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { return kHasSideEffect; case kArchPrepareCallCFunction: - case kArchSaveCallerRegisters: - case kArchRestoreCallerRegisters: case kArchPrepareTailCall: - case kArchCallCFunction: - case kArchCallCodeObject: - case kArchCallJSFunction: - case kArchCallWasmFunction: - case kArchCallBuiltinPointer: case kArchTailCallCodeObjectFromJSFunction: case kArchTailCallCodeObject: case kArchTailCallAddress: case kArchTailCallWasm: case kArchAbortCSAAssert: - case kArchDebugBreak: return kHasSideEffect; + case kArchDebugBreak: + return kIsBarrier; + + case kArchSaveCallerRegisters: + case kArchRestoreCallerRegisters: + return kIsBarrier; + + case kArchCallCFunction: + case kArchCallCodeObject: + case kArchCallJSFunction: + case kArchCallWasmFunction: + case kArchCallBuiltinPointer: + // Calls can cause GC and GC may relocate objects. If a pure instruction + // operates on a tagged pointer that was cast to a word then it may be + // incorrect to move the instruction across the call. Hence we mark all + // (non-tail-)calls as barriers. + return kIsBarrier; + case kArchStoreWithWriteBarrier: return kHasSideEffect; |