summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-04-11 16:07:54 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-04-11 16:07:54 -0700
commit0b1920b202098f80d1425eee186a9e4f4dab8c82 (patch)
tree3671098756a54741b3e5e85edafd3902db57b643 /deps/v8/src
parent6631983dd1a7e82d14a27e9be856b65e58f19393 (diff)
downloadnode-0b1920b202098f80d1425eee186a9e4f4dab8c82.tar.gz
Upgrade v8 to 3.1.8.10
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc10
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc141
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h65
-rw-r--r--deps/v8/src/deoptimizer.cc84
-rw-r--r--deps/v8/src/deoptimizer.h48
-rw-r--r--deps/v8/src/frames-inl.h19
-rw-r--r--deps/v8/src/frames.cc63
-rw-r--r--deps/v8/src/frames.h19
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc197
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h70
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/runtime.cc25
-rw-r--r--deps/v8/src/scopes.cc37
-rw-r--r--deps/v8/src/scopes.h28
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc106
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h47
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h4
18 files changed, 549 insertions, 420 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 18264254b..8cbdc09ed 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -566,8 +566,8 @@ static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
const int offset = JavaScriptFrameConstants::kLocal0Offset;
return frame->fp() + offset - (slot_index * kPointerSize);
} else {
- const int offset = JavaScriptFrameConstants::kReceiverOffset;
- return frame->caller_sp() + offset + (slot_index * kPointerSize);
+ const int offset = JavaScriptFrameConstants::kSavedRegistersOffset;
+ return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
}
}
@@ -791,14 +791,16 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
- const int length = frame->GetProvidedParametersCount();
+ const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+ for (int i = 0; i < length; i++) {
+ array->set(i, frame->GetParameter(i));
+ }
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index c0f5800bf..1ec2b9842 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -465,11 +465,19 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, safepoint_mode);
}
@@ -482,11 +490,21 @@ void LCodeGen::CallRuntime(Runtime::Function* function,
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode) {
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
@@ -498,8 +516,16 @@ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
}
RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ 0,
+ deoptimization_environment->deoptimization_index());
+ }
}
@@ -631,6 +657,8 @@ void LCodeGen::RecordSafepoint(
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
@@ -951,7 +979,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
- __ PushSafepointRegistersAndDoubles();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
// Move left to r1 and right to r0 for the stub call.
if (left.is(r1)) {
__ Move(r0, right);
@@ -973,7 +1001,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
- __ PopSafepointRegistersAndDoubles();
}
@@ -1369,11 +1396,8 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ PopSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
@@ -1972,7 +1996,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
@@ -1987,12 +2011,13 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
-
- __ PopSafepointRegisters();
}
@@ -2456,7 +2481,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ Call(ip);
// Setup deoptimization.
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2494,44 +2519,43 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Input is negative. Reverse its sign.
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r1) ? r0 : r1;
+ Register tmp2 = input.is(r2) ? r0 : r2;
+ Register tmp3 = input.is(r3) ? r0 : r3;
+ Register tmp4 = input.is(r4) ? r0 : r4;
- // exponent: floating point exponent value.
+ // exponent: floating point exponent value.
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+ __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ StoreToSafepointRegisterSlot(tmp1, input);
- __ PopSafepointRegisters();
+ __ StoreToSafepointRegisterSlot(tmp1, input);
+ }
__ bind(&done);
}
@@ -2993,7 +3017,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// contained in the register pointer map.
__ mov(result, Operand(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -3006,15 +3030,12 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
if (FLAG_debug_code) {
__ AbortIfNotSmi(r0);
}
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
- __ PopSafepointRegisters();
}
@@ -3070,7 +3091,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
SwVfpRegister flt_scratch = s0;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -3095,9 +3116,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// integer value.
__ mov(ip, Operand(0));
__ StoreToSafepointRegisterSlot(ip, reg);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(r0)) __ mov(reg, r0);
// Done. Put the value in dbl_scratch into the value of the allocated heap
@@ -3106,7 +3125,6 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
__ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
}
@@ -3146,12 +3164,9 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
__ mov(reg, Operand(0));
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ StoreToSafepointRegisterSlot(r0, reg);
- __ PopSafepointRegisters();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index a26f6311e..393b6423e 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -57,7 +57,8 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
- resolver_(this) {
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -167,12 +168,24 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeferredCode();
bool GenerateSafepointTable();
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr);
+
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
@@ -180,6 +193,10 @@ class LCodeGen BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
@@ -188,7 +205,9 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
@@ -275,6 +294,48 @@ class LCodeGen BASE_EMBEDDED {
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PushSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PushSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters:
+ codegen_->masm_->PopSafepointRegisters();
+ break;
+ case Safepoint::kWithRegistersAndDoubles:
+ codegen_->masm_->PopSafepointRegistersAndDoubles();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index af2f42e46..9db812b3e 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -196,8 +196,7 @@ Deoptimizer::Deoptimizer(JSFunction* function,
fp_to_sp_delta_(fp_to_sp_delta),
output_count_(0),
output_(NULL),
- integer32_values_(NULL),
- double_values_(NULL) {
+ deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
PrintF("**** DEOPT: ");
function->PrintName();
@@ -236,8 +235,6 @@ Deoptimizer::Deoptimizer(JSFunction* function,
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
- delete[] integer32_values_;
- delete[] double_values_;
}
@@ -382,13 +379,8 @@ void Deoptimizer::DoComputeOutputFrames() {
int count = iterator.Next();
ASSERT(output_ == NULL);
output_ = new FrameDescription*[count];
- // Per-frame lists of untagged and unboxed int32 and double values.
- integer32_values_ = new List<ValueDescriptionInteger32>[count];
- double_values_ = new List<ValueDescriptionDouble>[count];
for (int i = 0; i < count; ++i) {
output_[i] = NULL;
- integer32_values_[i].Initialize(0);
- double_values_[i].Initialize(0);
}
output_count_ = count;
@@ -416,40 +408,22 @@ void Deoptimizer::DoComputeOutputFrames() {
}
-void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
- // We need to adjust the stack index by one for the top-most frame.
- int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
- List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
- for (int i = 0; i < ints->length(); i++) {
- ValueDescriptionInteger32 value = ints->at(i);
- double val = static_cast<double>(value.int32_value());
- InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
- }
+void Deoptimizer::MaterializeHeapNumbers() {
+ for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+ HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ Handle<Object> num = Factory::NewNumber(d.value());
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing a new heap number %p [%e] in slot %p\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.slot_address());
+ }
- // Iterate over double values and convert them to a heap number.
- List<ValueDescriptionDouble>* doubles = &double_values_[index];
- for (int i = 0; i < doubles->length(); ++i) {
- ValueDescriptionDouble value = doubles->at(i);
- InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
- extra_slot_count);
+ Memory::Object_at(d.slot_address()) = *num;
}
}
-void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
- int stack_index,
- double val,
- int extra_slot_count) {
- // Add one to the TOS index to take the 'state' pushed before jumping
- // to the stub that calls Runtime::NotifyDeoptimized into account.
- int tos_index = stack_index + extra_slot_count;
- int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
- if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
- Handle<Object> num = Factory::NewNumber(val);
- frame->SetExpression(index, *num);
-}
-
-
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset) {
@@ -492,7 +466,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value);
- unsigned output_index = output_offset / kPointerSize;
if (FLAG_trace_deopt) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
@@ -509,9 +482,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- AddInteger32Value(frame_index,
- output_index,
- static_cast<int32_t>(value));
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
}
return;
@@ -520,7 +492,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- unsigned output_index = output_offset / kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
output_[frame_index]->GetTop() + output_offset,
@@ -530,7 +501,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- AddDoubleValue(frame_index, output_index, value);
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
return;
}
@@ -558,7 +529,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(this, input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
- unsigned output_index = output_offset / kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
@@ -575,9 +545,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- AddInteger32Value(frame_index,
- output_index,
- static_cast<int32_t>(value));
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
}
return;
@@ -588,7 +557,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset =
input_->GetOffsetFromSlotIndex(this, input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- unsigned output_index = output_offset / kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
@@ -598,7 +566,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- AddDoubleValue(frame_index, output_index, value);
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
return;
}
@@ -901,19 +869,11 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
}
-void Deoptimizer::AddInteger32Value(int frame_index,
- int slot_index,
- int32_t value) {
- ValueDescriptionInteger32 value_desc(slot_index, value);
- integer32_values_[frame_index].Add(value_desc);
-}
-
-
-void Deoptimizer::AddDoubleValue(int frame_index,
- int slot_index,
+void Deoptimizer::AddDoubleValue(intptr_t slot_address,
double value) {
- ValueDescriptionDouble value_desc(slot_index, value);
- double_values_[frame_index].Add(value_desc);
+ HeapNumberMaterializationDescriptor value_desc(
+ reinterpret_cast<Address>(slot_address), value);
+ deferred_heap_numbers_.Add(value_desc);
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 90495c976..f4cd409be 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -42,38 +42,17 @@ class TranslationIterator;
class DeoptimizingCodeListNode;
-class ValueDescription BASE_EMBEDDED {
+class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
- explicit ValueDescription(int index) : stack_index_(index) { }
- int stack_index() const { return stack_index_; }
-
- private:
- // Offset relative to the top of the stack.
- int stack_index_;
-};
-
-
-class ValueDescriptionInteger32: public ValueDescription {
- public:
- ValueDescriptionInteger32(int index, int32_t value)
- : ValueDescription(index), int32_value_(value) { }
- int32_t int32_value() const { return int32_value_; }
-
- private:
- // Raw value.
- int32_t int32_value_;
-};
+ HeapNumberMaterializationDescriptor(Address slot_address, double val)
+ : slot_address_(slot_address), val_(val) { }
-
-class ValueDescriptionDouble: public ValueDescription {
- public:
- ValueDescriptionDouble(int index, double value)
- : ValueDescription(index), double_value_(value) { }
- double double_value() const { return double_value_; }
+ Address slot_address() const { return slot_address_; }
+ double value() const { return val_; }
private:
- // Raw value.
- double double_value_;
+ Address slot_address_;
+ double val_;
};
@@ -164,7 +143,7 @@ class Deoptimizer : public Malloced {
~Deoptimizer();
- void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
+ void MaterializeHeapNumbers();
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -253,13 +232,7 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
- void InsertHeapNumberValue(JavaScriptFrame* frame,
- int stack_index,
- double val,
- int extra_slot_count);
-
- void AddInteger32Value(int frame_index, int slot_index, int32_t value);
- void AddDoubleValue(int frame_index, int slot_index, double value);
+ void AddDoubleValue(intptr_t slot_address, double value);
static LargeObjectChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
@@ -295,8 +268,7 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- List<ValueDescriptionInteger32>* integer32_values_;
- List<ValueDescriptionDouble>* double_values_;
+ List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static int table_entry_size_;
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 78bb646c7..9430cad0d 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -136,15 +136,26 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
+Address JavaScriptFrame::GetParameterSlot(int index) const {
+ int param_count = ComputeParametersCount();
+ ASSERT(-1 <= index && index < param_count);
+ int parameter_offset = (param_count - index - 1) * kPointerSize;
+ return caller_sp() + parameter_offset;
+}
+
+
+Object* JavaScriptFrame::GetParameter(int index) const {
+ return Memory::Object_at(GetParameterSlot(index));
+}
+
+
inline Object* JavaScriptFrame::receiver() const {
- const int offset = JavaScriptFrameConstants::kReceiverOffset;
- return Memory::Object_at(caller_sp() + offset);
+ return GetParameter(-1);
}
inline void JavaScriptFrame::set_receiver(Object* value) {
- const int offset = JavaScriptFrameConstants::kReceiverOffset;
- Memory::Object_at(caller_sp() + offset) = value;
+ Memory::Object_at(GetParameterSlot(-1)) = value;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 24ea8dde2..e94fdd84e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -540,9 +540,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
pc(), &safepoint_entry, &stack_slots);
unsigned slot_space = stack_slots * kPointerSize;
- // Visit the outgoing parameters. This is usually dealt with by the
- // callee, but while GC'ing we artificially lower the number of
- // arguments to zero and let the caller deal with it.
+ // Visit the outgoing parameters.
Object** parameters_base = &Memory::Object_at(sp());
Object** parameters_limit = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
@@ -596,21 +594,6 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), code);
- IterateArguments(v);
-}
-
-
-Object* JavaScriptFrame::GetParameter(int index) const {
- ASSERT(index >= 0 && index < ComputeParametersCount());
- const int offset = JavaScriptFrameConstants::kParam0Offset;
- return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
-}
-
-
-int JavaScriptFrame::ComputeParametersCount() const {
- Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
- Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
- return static_cast<int>((base - limit) / kPointerSize);
}
@@ -630,32 +613,17 @@ Code* JavaScriptFrame::unchecked_code() const {
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
+int JavaScriptFrame::GetNumberOfIncomingArguments() const {
+ ASSERT(!SafeStackFrameIterator::is_active() &&
+ Heap::gc_state() == Heap::NOT_IN_GC);
+
+ JSFunction* function = JSFunction::cast(this->function());
+ return function->shared()->formal_parameter_count();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC ||
- SafeStackFrameIterator::is_active()) {
- // If the we are currently iterating the safe stack the
- // arguments for frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when objects may have been marked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
+ return fp() + StandardFrameConstants::kCallerSPOffset;
}
@@ -833,9 +801,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
+ return fp() + StandardFrameConstants::kCallerSPOffset;
}
@@ -1074,17 +1040,6 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), code());
- IterateArguments(v);
-}
-
-
-void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
- // Traverse callee-saved registers, receiver, and parameters.
- const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
- const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
- Object** base = &Memory::Object_at(fp() + kBaseOffset);
- Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
- v->VisitPointers(base, limit);
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 537870906..03e5e671b 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -449,14 +449,11 @@ class JavaScriptFrame: public StandardFrame {
inline void set_receiver(Object* value);
// Access the parameters.
- Object* GetParameter(int index) const;
- int ComputeParametersCount() const;
-
- // Temporary way of getting access to the number of parameters
- // passed on the stack by the caller. Once argument adaptor frames
- // has been introduced on ARM, this number will always match the
- // computed parameters count.
- int GetProvidedParametersCount() const;
+ inline Address GetParameterSlot(int index) const;
+ inline Object* GetParameter(int index) const;
+ inline int ComputeParametersCount() const {
+ return GetNumberOfIncomingArguments();
+ }
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
@@ -494,6 +491,8 @@ class JavaScriptFrame: public StandardFrame {
virtual Address GetCallerStackPointer() const;
+ virtual int GetNumberOfIncomingArguments() const;
+
// Garbage collection support. Iterates over incoming arguments,
// receiver, and any callee-saved registers.
void IterateArguments(ObjectVisitor* v) const;
@@ -554,6 +553,10 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
+ virtual int GetNumberOfIncomingArguments() const {
+ return Smi::cast(GetExpression(0))->value();
+ }
+
virtual Address GetCallerStackPointer() const;
private:
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 2b7712c43..a1be11d83 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -418,20 +418,21 @@ void LCodeGen::AddToTranslation(Translation* translation,
}
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- bool adjusted) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode,
+ SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (!adjusted) {
+ if (context_mode == RESTORE_CONTEXT) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ call(code, mode);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -442,25 +443,44 @@ void LCodeGen::CallCode(Handle<Code> code,
}
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode) {
+ CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
void LCodeGen::CallRuntime(Runtime::Function* fun,
int argc,
LInstruction* instr,
- bool adjusted) {
+ ContextMode context_mode) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (!adjusted) {
+ if (context_mode == RESTORE_CONTEXT) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ CallRuntime(fun, argc);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode) {
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
@@ -472,8 +492,16 @@ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
}
RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ 0,
+ deoptimization_environment->deoptimization_index());
+ }
}
@@ -622,6 +650,7 @@ void LCodeGen::RecordSafepoint(
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
+ ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
@@ -707,48 +736,48 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::StringCharAt: {
StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::MathPow: {
MathPowStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
break;
}
default:
@@ -1098,7 +1127,7 @@ void LCodeGen::DoBitNotI(LBitNotI* instr) {
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr, false);
+ CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@@ -1170,7 +1199,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -1282,12 +1311,8 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ pushad();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ popad();
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoGoto(LGoto* instr) {
@@ -1776,7 +1801,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
NearLabel true_value, done;
__ test(eax, Operand(eax));
@@ -1795,7 +1820,7 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
__ test(eax, Operand(eax));
EmitBranch(true_block, false_block, zero);
}
@@ -1867,7 +1892,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
@@ -1878,11 +1903,12 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
- // Get the temp register reserved by the instruction. This needs to be edi as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
+ // Get the temp register reserved by the instruction. This needs to be a
+ // register which is pushed last by PushSafepointRegisters as top of the
+ // stack is used to pass the offset to the location of the map check to
+ // the stub.
Register temp = ToRegister(instr->TempAt(0));
- ASSERT(temp.is(edi));
+ ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 16;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
@@ -1890,10 +1916,13 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RESTORE_CONTEXT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
- __ PopSafepointRegisters();
}
@@ -1921,7 +1950,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@@ -1944,7 +1973,7 @@ void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
@@ -2039,7 +2068,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ mov(ecx, instr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2163,7 +2192,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(eax));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2351,7 +2380,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Setup deoptimization.
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
}
@@ -2373,7 +2402,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2394,10 +2423,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -2413,7 +2440,6 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
- __ PopSafepointRegisters();
}
@@ -2601,7 +2627,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -2609,7 +2635,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -2617,7 +2643,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
@@ -2661,7 +2687,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2672,7 +2698,7 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2682,7 +2708,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
__ Drop(1);
}
@@ -2694,7 +2720,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
}
@@ -2712,12 +2738,12 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr, false);
+ CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
}
@@ -2760,7 +2786,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2830,7 +2856,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
}
@@ -2948,7 +2974,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// contained in the register pointer map.
__ Set(result, Immediate(0));
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -2961,16 +2987,12 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
if (FLAG_debug_code) {
__ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
__ StoreToSafepointRegisterSlot(result, eax);
- __ PopSafepointRegisters();
}
@@ -3017,7 +3039,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -3039,10 +3061,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
@@ -3050,7 +3069,6 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ bind(&done);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
__ StoreToSafepointRegisterSlot(reg, reg);
- __ PopSafepointRegisters();
}
@@ -3086,13 +3104,9 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
__ Set(reg, Immediate(0));
- __ PushSafepointRegisters();
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ StoreToSafepointRegisterSlot(reg, eax);
- __ PopSafepointRegisters();
}
@@ -3503,16 +3517,16 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
}
}
@@ -3528,9 +3542,12 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
} else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ CallRuntime(Runtime::kCreateObjectLiteralShallow,
+ 4,
+ instr,
+ CONTEXT_ADJUSTED);
}
}
@@ -3556,7 +3573,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -3568,7 +3585,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
__ pop(ebx);
__ bind(&allocated);
@@ -3595,14 +3612,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
} else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? Factory::true_value()
: Factory::false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr, false);
+ CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
}
}
@@ -3614,7 +3631,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
} else {
__ push(ToOperand(input));
}
- CallRuntime(Runtime::kTypeof, 1, instr, false);
+ CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
}
@@ -3825,7 +3842,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ j(above_equal, &done);
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
__ bind(&done);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 5ba4bc435..681ea77e5 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -61,7 +61,8 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
- resolver_(this) {
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -159,16 +160,44 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateRelocPadding();
bool GenerateSafepointTable();
- void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(Runtime::Function* fun, int argc, LInstruction* instr,
- bool adjusted = true);
- void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
- bool adjusted = true) {
+ enum ContextMode {
+ RESTORE_CONTEXT,
+ CONTEXT_ADJUSTED
+ };
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ ContextMode context_mode,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(Runtime::Function* fun,
+ int argc,
+ LInstruction* instr,
+ ContextMode context_mode);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ ContextMode context_mode) {
Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr, adjusted);
+ CallRuntime(function, argc, instr, context_mode);
}
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
@@ -177,7 +206,9 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
@@ -272,6 +303,27 @@ class LCodeGen BASE_EMBEDDED {
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ }
+
+ ~PushSafepointRegistersScope() {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 62bb0f363..580aad1a5 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -635,6 +635,10 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* on_not_flat_ascii_strings);
+ static int SafepointRegisterStackIndex(Register reg) {
+ return SafepointRegisterStackIndex(reg.code());
+ }
+
private:
bool generating_stub_;
bool allow_stub_calls_;
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 0c15f60f3..4fe0e82b3 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -4332,7 +4332,7 @@ static MaybeObject* Runtime_GetArgumentsProperty(Arguments args) {
JavaScriptFrame* frame = it.frame();
// Get the actual number of provided arguments.
- const uint32_t n = frame->GetProvidedParametersCount();
+ const uint32_t n = frame->ComputeParametersCount();
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
@@ -6887,7 +6887,7 @@ static MaybeObject* Runtime_NewObjectFromBound(Arguments args) {
ASSERT(!frame->is_optimized());
it.AdvanceToArgumentsFrame();
frame = it.frame();
- int argc = frame->GetProvidedParametersCount();
+ int argc = frame->ComputeParametersCount();
// Prepend bound arguments to caller's arguments.
int total_argc = bound_argc + argc;
@@ -7092,14 +7092,13 @@ static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
ASSERT(Heap::IsAllocationAllowed());
int frames = deoptimizer->output_count();
+ deoptimizer->MaterializeHeapNumbers();
+ delete deoptimizer;
+
JavaScriptFrameIterator it;
JavaScriptFrame* frame = NULL;
- for (int i = 0; i < frames; i++) {
- if (i != 0) it.Advance();
- frame = it.frame();
- deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
- }
- delete deoptimizer;
+ for (int i = 0; i < frames - 1; i++) it.Advance();
+ frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(frame->function()));
@@ -7720,7 +7719,7 @@ static void PrintTransition(Object* result) {
// supplied parameters, not all parameters required)
PrintF("(this=");
PrintObject(frame->receiver());
- const int length = frame->GetProvidedParametersCount();
+ const int length = frame->ComputeParametersCount();
for (int i = 0; i < length; i++) {
PrintF(", ");
PrintObject(frame->GetParameter(i));
@@ -9223,8 +9222,8 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
// Find the number of arguments to fill. At least fill the number of
// parameters for the function and fill more if more parameters are provided.
int argument_count = info.number_of_parameters();
- if (argument_count < it.frame()->GetProvidedParametersCount()) {
- argument_count = it.frame()->GetProvidedParametersCount();
+ if (argument_count < it.frame()->ComputeParametersCount()) {
+ argument_count = it.frame()->ComputeParametersCount();
}
// Calculate the size of the result.
@@ -9281,7 +9280,7 @@ static MaybeObject* Runtime_GetFrameDetails(Arguments args) {
// TODO(3141533): We should be able to get the actual parameter
// value for optimized frames.
if (!is_optimized_frame &&
- (i < it.frame()->GetProvidedParametersCount())) {
+ (i < it.frame()->ComputeParametersCount())) {
details->set(details_index++, it.frame()->GetParameter(i));
} else {
details->set(details_index++, Heap::undefined_value());
@@ -10161,7 +10160,7 @@ static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
}
}
- const int length = frame->GetProvidedParametersCount();
+ const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index a89bf9638..6a720f5f0 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -118,7 +118,7 @@ Scope::Scope(Type type)
params_(0),
unresolved_(0),
decls_(0) {
- SetDefaults(type, NULL, NULL);
+ SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
ASSERT(!resolved());
}
@@ -130,7 +130,7 @@ Scope::Scope(Scope* outer_scope, Type type)
params_(4),
unresolved_(16),
decls_(4) {
- SetDefaults(type, outer_scope, NULL);
+ SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
@@ -140,14 +140,14 @@ Scope::Scope(Scope* outer_scope, Type type)
}
-Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
+Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
: inner_scopes_(4),
variables_(),
temps_(4),
params_(4),
unresolved_(16),
decls_(4) {
- ASSERT(scope_info != NULL);
+ ASSERT(!scope_info.is_null());
SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
ASSERT(resolved());
if (scope_info->HasHeapAllocatedLocals()) {
@@ -176,6 +176,31 @@ Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
}
+void Scope::SetDefaults(Type type,
+ Scope* outer_scope,
+ Handle<SerializedScopeInfo> scope_info) {
+ outer_scope_ = outer_scope;
+ type_ = type;
+ scope_name_ = Factory::empty_symbol();
+ dynamics_ = NULL;
+ receiver_ = NULL;
+ function_ = NULL;
+ arguments_ = NULL;
+ arguments_shadow_ = NULL;
+ illegal_redecl_ = NULL;
+ scope_inside_with_ = false;
+ scope_contains_with_ = false;
+ scope_calls_eval_ = false;
+ outer_scope_calls_eval_ = false;
+ inner_scope_calls_eval_ = false;
+ outer_scope_is_eval_scope_ = false;
+ force_eager_compilation_ = false;
+ num_stack_slots_ = 0;
+ num_heap_slots_ = 0;
+ scope_info_ = scope_info;
+}
+
+
Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
Scope* global_scope) {
ASSERT(!info->closure().is_null());
@@ -188,8 +213,8 @@ Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
JSFunction* current = *info->closure();
do {
current = current->context()->closure();
- SerializedScopeInfo* scope_info = current->shared()->scope_info();
- if (scope_info != SerializedScopeInfo::Empty()) {
+ Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
+ if (*scope_info != SerializedScopeInfo::Empty()) {
scope = new Scope(scope, scope_info);
if (innermost_scope == NULL) innermost_scope = scope;
} else {
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 140ff1994..4a48a4c45 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -370,8 +370,8 @@ class Scope: public ZoneObject {
int num_heap_slots_;
// Serialized scopes support.
- SerializedScopeInfo* scope_info_;
- bool resolved() { return scope_info_ != NULL; }
+ Handle<SerializedScopeInfo> scope_info_;
+ bool resolved() { return !scope_info_.is_null(); }
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
@@ -406,7 +406,7 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively();
private:
- Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
+ Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
@@ -417,27 +417,7 @@ class Scope: public ZoneObject {
void SetDefaults(Type type,
Scope* outer_scope,
- SerializedScopeInfo* scope_info) {
- outer_scope_ = outer_scope;
- type_ = type;
- scope_name_ = Factory::empty_symbol();
- dynamics_ = NULL;
- receiver_ = NULL;
- function_ = NULL;
- arguments_ = NULL;
- arguments_shadow_ = NULL;
- illegal_redecl_ = NULL;
- scope_inside_with_ = false;
- scope_contains_with_ = false;
- scope_calls_eval_ = false;
- outer_scope_calls_eval_ = false;
- inner_scope_calls_eval_ = false;
- outer_scope_is_eval_scope_ = false;
- force_eager_compilation_ = false;
- num_stack_slots_ = 0;
- num_heap_slots_ = 0;
- scope_info_ = scope_info;
- }
+ Handle<SerializedScopeInfo> scope_info);
};
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 7647f4e2f..39328dcb7 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
#define BUILD_NUMBER 8
-#define PATCH_LEVEL 8
+#define PATCH_LEVEL 10
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 5bc027590..48844a5bf 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -429,14 +429,16 @@ void LCodeGen::AddToTranslation(Translation* translation,
}
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode,
+ int argc) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ call(code, mode);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, safepoint_mode, argc);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -447,6 +449,13 @@ void LCodeGen::CallCode(Handle<Code> code,
}
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
void LCodeGen::CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
@@ -456,11 +465,23 @@ void LCodeGen::CallRuntime(Runtime::Function* function,
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr) {
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
}
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode,
+ int argc) {
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
@@ -472,8 +493,17 @@ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
}
RegisterEnvironmentForDeoptimization(deoptimization_environment);
- RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ ASSERT(argc == 0);
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(),
+ argc,
+ deoptimization_environment->deoptimization_index());
+ }
}
@@ -598,6 +628,8 @@ void LCodeGen::RecordSafepoint(
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
+ ASSERT(kind == expected_safepoint_kind_);
+
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -1260,11 +1292,8 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- __ Pushad();
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ Popad();
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
@@ -1827,16 +1856,21 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- __ PushSafepointRegisters();
+ {
+ PushSafepointRegistersScope scope(this);
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ Push(instr->function());
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ movq(kScratchRegister, rax);
- __ PopSafepointRegisters();
+ __ push(ToRegister(instr->InputAt(0)));
+ __ Push(instr->function());
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ CallCodeGeneric(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS,
+ 2);
+ __ movq(kScratchRegister, rax);
+ }
__ testq(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
@@ -2292,7 +2326,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Setup deoptimization.
- RegisterLazyDeoptimization(instr);
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
// Restore context.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2317,7 +2351,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
// Preserve the value of all registers.
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
Label negative;
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2338,9 +2372,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) {
__ movq(tmp, rax);
@@ -2357,7 +2389,6 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
- __ PopSafepointRegisters();
}
@@ -2884,7 +2915,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// contained in the register pointer map.
__ Set(result, 0);
- __ PushSafepointRegisters();
+ PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
@@ -2897,16 +2928,12 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Integer32ToSmi(index, index);
__ push(index);
}
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
if (FLAG_debug_code) {
__ AbortIfNotSmi(rax);
}
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
- __ PopSafepointRegisters();
}
@@ -2971,13 +2998,12 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
__ Move(reg, Smi::FromInt(0));
- __ PushSafepointRegisters();
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- // Ensure that value in rax survives popping registers.
- __ movq(kScratchRegister, rax);
- __ PopSafepointRegisters();
+ {
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // Ensure that value in rax survives popping registers.
+ __ movq(kScratchRegister, rax);
+ }
__ movq(reg, kScratchRegister);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 420556a58..88832faf7 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -60,7 +60,8 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
- resolver_(this) {
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -151,12 +152,26 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS
+ };
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode,
+ int argc);
+
+
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
+
void CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr);
+
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
@@ -164,6 +179,11 @@ class LCodeGen BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr);
+
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
@@ -172,7 +192,9 @@ class LCodeGen BASE_EMBEDDED {
void LoadHeapObject(Register result, Handle<HeapObject> object);
- void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterLazyDeoptimization(LInstruction* instr,
+ SafepointMode safepoint_mode,
+ int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
@@ -268,6 +290,27 @@ class LCodeGen BASE_EMBEDDED {
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ }
+
+ ~PushSafepointRegistersScope() {
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ codegen_->masm_->PopSafepointRegisters();
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 4cf59c4e8..9557940a9 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -976,6 +976,10 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ static int SafepointRegisterStackIndex(Register reg) {
+ return SafepointRegisterStackIndex(reg.code());
+ }
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.