summaryrefslogtreecommitdiff
path: root/deps/v8/src/deoptimizer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/deoptimizer.cc')
-rw-r--r--deps/v8/src/deoptimizer.cc170
1 files changed, 111 insertions, 59 deletions
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 29575d8c0..4d5e60573 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -342,7 +342,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
function->set_code(shared->code());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
@@ -358,9 +357,41 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
+ Isolate* isolate = context->GetHeap()->isolate();
+#ifdef DEBUG
+ Code* topmost_optimized_code = NULL;
+ bool safe_to_deopt_topmost_optimized_code = false;
+ // Make sure all activations of optimized code can deopt at their current PC.
+ // The topmost optimized code has special handling because it cannot be
+ // deoptimized due to weak object dependency.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top());
+ !it.done(); it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (FLAG_trace_deopt) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer found activation of function: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int deopt_index = safepoint.deoptimization_index();
+ bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+ if (topmost_optimized_code == NULL) {
+ topmost_optimized_code = code;
+ safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+ }
+ }
+ }
+#endif
+
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Isolate* isolate = context->GetHeap()->isolate();
Zone zone(isolate);
ZoneList<Code*> codes(10, &zone);
@@ -393,35 +424,17 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
-#ifdef DEBUG
- // Make sure all activations of optimized code can deopt at their current PC.
- for (StackFrameIterator it(isolate, isolate->thread_local_top());
- !it.done(); it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (FLAG_trace_deopt) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[deoptimizer patches for lazy deopt: ");
- function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int deopt_index = safepoint.deoptimization_index();
- CHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
- }
- }
-#endif
-
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
+#ifdef DEBUG
+ if (codes[i] == topmost_optimized_code) {
+ ASSERT(safe_to_deopt_topmost_optimized_code);
+ }
+#endif
// It is finally time to die, code object.
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -755,6 +768,12 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
+
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(),
@@ -763,7 +782,8 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " @%d, FP to SP delta: %d]\n",
+ " (opt #%d) @%d, FP to SP delta: %d]\n",
+ input_data->OptimizationId()->value(),
bailout_id_,
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@@ -771,10 +791,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -990,24 +1006,19 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (FLAG_enable_ool_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
- // from the input frame. For subsequent output frames, it can be gotten from
- // the function's code.
- Register constant_pool_reg =
- JavaScriptFrame::constant_pool_pointer_register();
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
- value = reinterpret_cast<intptr_t>(
- function->shared()->code()->constant_pool());
+ value = output_[frame_index - 1]->GetConstantPool();
}
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetConstantPool(value);
- if (is_topmost) output_frame->SetRegister(constant_pool_reg.code(), value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; constant_pool\n",
+ V8PRIxPTR "; caller's constant_pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1065,6 +1076,18 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
+ // Update constant pool.
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
@@ -1148,15 +1171,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // A marker value is used in place of the constant pool.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
- intptr_t constant_pool = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, constant_pool);
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool (adaptor sentinel)\n",
- top_address + output_offset, output_offset, constant_pool);
+ V8PRIxPTR "; caller's constant_pool\n",
+ top_address + output_offset, output_offset, value);
}
}
@@ -1203,6 +1225,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1278,13 +1305,13 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1365,6 +1392,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(construct_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1436,13 +1468,13 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1504,6 +1536,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1607,17 +1644,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the input frame.
- Register constant_pool_pointer_register =
- StubFailureTrampolineFrame::constant_pool_pointer_register();
+ // Read the caller's constant pool from the input frame.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(constant_pool_pointer_register.code(), value);
output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
+ output_frame->SetCallerConstantPool(output_frame_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool_pointer\n",
+ V8PRIxPTR " ; caller's constant_pool\n",
top_address + output_frame_offset, output_frame_offset, value);
}
}
@@ -1751,6 +1785,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
+ if (FLAG_enable_ool_constant_pool) {
+ Register constant_pool_reg =
+ StubFailureTrampolineFrame::constant_pool_pointer_register();
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
@@ -2716,6 +2758,9 @@ FrameDescription::FrameDescription(uint32_t frame_size,
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -3306,6 +3351,13 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
// tagged and skip materializing the HeapNumber explicitly.
Handle<Object> object = GetNext(isolate, lvl + 1);
materialized_objects_.Add(object);
+ // On 32-bit architectures, there is an extra slot there because
+ // the escape analysis calculates the number of slots as
+ // object-size/pointer-size. To account for this, we read out
+ // any extra slots.
+ for (int i = 0; i < length - 2; i++) {
+ GetNext(isolate, lvl + 1);
+ }
return object;
}
case JS_OBJECT_TYPE: {
@@ -3360,7 +3412,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
void SlotRefValueBuilder::Finish(Isolate* isolate) {
- // We should have processed all slot
+ // We should have processed all the slots
ASSERT(slot_refs_.length() == current_slot_);
if (materialized_objects_.length() > prev_materialized_count_) {