summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2009-10-28 19:25:22 +0100
committerRyan Dahl <ry@tinyclouds.org>2009-10-28 19:25:22 +0100
commit50f45d14b475a42d304e7d9872f8d91ff3a013c2 (patch)
tree2e799be8cbddd016ef8432b4ed755247a466c0ba /deps/v8/src/ia32
parent35589528992e8bf5ca70271beaef05a6d82f9dcf (diff)
downloadnode-50f45d14b475a42d304e7d9872f8d91ff3a013c2.tar.gz
Upgrade v8 to 1.3.17
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc18
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h10
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc86
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc303
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h27
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc272
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc584
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc13
-rw-r--r--deps/v8/src/ia32/frames-ia32.h2
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc367
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc130
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h25
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc20
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc39
14 files changed, 1509 insertions, 387 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index bc28710f9..698377a0c 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1850,6 +1850,22 @@ void Assembler::fucompp() {
}
+void Assembler::fucomi(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ EMIT(0xE9);
+}
+
+
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2109,7 +2125,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::OldGenerationSize())) {
+ (desc.buffer_size > Heap::MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4719f2dcf..a431d04c6 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -439,6 +439,14 @@ class Assembler : public Malloced {
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
+ // This sets the branch destination (which is in the instruction on x86).
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ static const int kCallTargetSize = kPointerSize;
+
// Distance between the address of the code target in the call instruction
// and the return address
static const int kCallTargetAddressOffset = kPointerSize;
@@ -702,6 +710,8 @@ class Assembler : public Malloced {
void ftst();
void fucomp(int i);
void fucompp();
+ void fucomi(int i);
+ void fucomip();
void fcompp();
void fnstsw_ax();
void fwait();
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index ad44026ca..963b0e3ac 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -462,6 +462,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -520,48 +522,48 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(ebp, 2 * kPointerSize)); // push arguments
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- if (FLAG_check_stack) {
- // We need to catch preemptions right here, otherwise an unlucky preemption
- // could show up as a failed apply.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- Label retry_preemption;
- Label no_preemption;
- __ bind(&retry_preemption);
- __ mov(edi, Operand::StaticVariable(stack_guard_limit));
- __ cmp(esp, Operand(edi));
- __ j(above, &no_preemption, taken);
-
- // Preemption!
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(0)));
+ // Check the stack for overflow or a break request.
+ // We need to catch preemptions right here, otherwise an unlucky preemption
+ // could show up as a failed apply.
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ Label retry_preemption;
+ Label no_preemption;
+ __ bind(&retry_preemption);
+ __ mov(edi, Operand::StaticVariable(stack_guard_limit));
+ __ cmp(esp, Operand(edi));
+ __ j(above, &no_preemption, taken);
+
+ // Preemption!
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(0)));
- // Do call to runtime routine.
- __ CallRuntime(Runtime::kStackGuard, 1);
- __ pop(eax);
- __ jmp(&retry_preemption);
-
- __ bind(&no_preemption);
-
- Label okay;
- // Make ecx the space we have left.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, Operand(eax));
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- __ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken);
-
- // Too bad: Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- }
+ // Do call to runtime routine.
+ __ CallRuntime(Runtime::kStackGuard, 1);
+ __ pop(eax);
+ __ jmp(&retry_preemption);
+
+ __ bind(&no_preemption);
+
+ Label okay;
+ // Make ecx the space we have left.
+ __ mov(ecx, Operand(esp));
+ __ sub(ecx, Operand(edi));
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, Operand(eax));
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ __ cmp(ecx, Operand(edx));
+ __ j(greater, &okay, taken);
+
+ // Too bad: Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
// Push current index and limit.
const int kLimitOffset =
@@ -606,6 +608,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 938c8e6fc..8e8ff2e0b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -697,18 +697,6 @@ void CodeGenerator::UnloadReference(Reference* ref) {
}
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
@@ -773,13 +761,6 @@ class FloatingPointHelper : public AllStatic {
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
- // Allocate a heap number in new space with undefined value.
- // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
- static void AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch1,
- Register scratch2,
- Register result);
};
@@ -2222,14 +2203,12 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() {
- if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- deferred->Branch(below);
- deferred->BindExit();
- }
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ deferred->Branch(below);
+ deferred->BindExit();
}
@@ -2282,8 +2261,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// allow us to push the arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi); // The context is the first argument.
frame_->EmitPush(Immediate(pairs));
- frame_->EmitPush(esi); // The context is the second argument.
frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
@@ -3583,11 +3562,9 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
- // Push the boilerplate on the stack.
- frame_->EmitPush(Immediate(boilerplate));
-
// Create a new closure.
frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
@@ -5175,11 +5152,10 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
Result scratch1 = allocator()->Allocate();
Result scratch2 = allocator()->Allocate();
Result heap_number = allocator()->Allocate();
- FloatingPointHelper::AllocateHeapNumber(masm_,
- call_runtime.entry_label(),
- scratch1.reg(),
- scratch2.reg(),
- heap_number.reg());
+ __ AllocateHeapNumber(heap_number.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ call_runtime.entry_label());
scratch1.Unuse();
scratch2.Unuse();
@@ -6508,11 +6484,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &true_result);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fucompp();
- __ push(eax);
- __ fnstsw_ax();
- __ sahf();
- __ pop(eax);
+ __ FCmp();
__ j(zero, &false_result);
// Fall through to |true_result|.
@@ -6531,47 +6503,52 @@ void GenericBinaryOpStub::GenerateCall(
Register left,
Register right) {
if (!ArgsInRegistersSupported()) {
- // Only pass arguments in registers if there is no smi code in the stub.
+ // Pass arguments on the stack.
__ push(left);
__ push(right);
} else {
// The calling convention with registers is left in edx and right in eax.
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- if (!(left.is(edx) && right.is(eax))) {
- if (left.is(eax) && right.is(edx)) {
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) {
SetArgsReversed();
} else {
__ xchg(left, right);
}
- } else if (left.is(edx)) {
- __ mov(eax, right);
- } else if (left.is(eax)) {
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
- __ mov(edx, right);
+ __ mov(left_arg, right);
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
- } else if (right.is(edx)) {
+ } else if (right.is(left_arg)) {
if (IsOperationCommutative()) {
- __ mov(eax, left);
+ __ mov(right_arg, left);
SetArgsReversed();
} else {
- __ mov(eax, right);
- __ mov(edx, left);
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
}
- } else if (right.is(eax)) {
- __ mov(edx, left);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6584,23 +6561,26 @@ void GenericBinaryOpStub::GenerateCall(
Register left,
Smi* right) {
if (!ArgsInRegistersSupported()) {
- // Only pass arguments in registers if there is no smi code in the stub.
+ // Pass arguments on the stack.
__ push(left);
__ push(Immediate(right));
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- if (left.is(edx)) {
- __ mov(eax, Immediate(right));
- } else if (left.is(eax) && IsOperationCommutative()) {
- __ mov(edx, Immediate(right));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, Immediate(right));
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6612,23 +6592,26 @@ void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm,
Smi* left,
Register right) {
- if (flags_ != NO_SMI_CODE_IN_STUB) {
- // Only pass arguments in registers if there is no smi code in the stub.
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
__ push(Immediate(left));
__ push(right);
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
- if (right.is(eax)) {
- __ mov(edx, Immediate(left));
- } else if (right.is(edx) && is_commutative) {
- __ mov(eax, Immediate(left));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
} else {
- __ mov(edx, Immediate(left));
- __ mov(eax, right);
+ __ mov(left_arg, Immediate(left));
+ __ mov(right_arg, right);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6836,11 +6819,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case NO_OVERWRITE: {
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- no_reg,
- ebx);
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
// Now eax can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ mov(eax, ebx);
@@ -6868,11 +6847,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- no_reg,
- ebx);
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
// Now eax can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
__ mov(eax, ebx);
@@ -6924,18 +6899,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Check if right operand is int32.
__ fist_s(Operand(esp, 0 * kPointerSize));
__ fild_s(Operand(esp, 0 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
+ __ FCmp();
__ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
// Check if left operand is int32.
__ fist_s(Operand(esp, 1 * kPointerSize));
__ fild_s(Operand(esp, 1 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
+ __ FCmp();
__ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
}
@@ -6964,7 +6935,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR) {
@@ -6982,8 +6953,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
- ecx, edx, eax);
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -6992,7 +6962,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
// Clear the FPU exception flag and reset the stack before calling
@@ -7024,7 +6994,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// If all else fails, use the runtime system to get the correct
// result. If arguments was passed in registers now place them on the
- // stack in the correct order.
+ // stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgumentsInRegisters()) {
__ pop(ecx);
@@ -7133,25 +7103,6 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
}
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch1,
- Register scratch2,
- Register result) {
- // Allocate heap number in new space.
- __ AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
-
- // Set the map.
- __ mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -7308,7 +7259,7 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
} else {
__ mov(edx, Operand(eax));
// edx: operand
- FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
@@ -7458,20 +7409,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value).
- __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ not_(eax);
- __ test(eax, Immediate(0x7ff00000));
- __ j(not_zero, &return_equal);
- __ not_(eax);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ shl(eax, 12);
- // Or with all low-bits of mantissa.
- __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
- // Return zero equal if all bits in mantissa is zero (it's an Infinity)
- // and non-zero if not (it's a NaN).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ __ setcc(above_equal, eax);
__ ret(0);
__ bind(&not_identical);
@@ -7757,11 +7707,84 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
}
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_MAC_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label get_result;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
+ ASSERT_EQ(kArgc, 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &get_result, taken);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+ // It was non-zero. Dereference to get the result value.
+ __ bind(&get_result);
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
+ 0,
+ 1);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -7811,7 +7834,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
__ ret(0);
// Handling of failure.
@@ -7910,12 +7933,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
- StackFrame::Type frame_type = is_debug_break ?
- StackFrame::EXIT_DEBUG :
- StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -7933,7 +7956,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -7942,7 +7965,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -7953,7 +7976,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index ec4a8be16..3669e9d10 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -396,7 +396,7 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- Operand ContextOperand(Register context, int index) const {
+ static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
@@ -407,7 +407,7 @@ class CodeGenerator: public AstVisitor {
JumpTarget* slow);
// Expressions
- Operand GlobalObject() const {
+ static Operand GlobalObject() {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
@@ -511,10 +511,11 @@ class CodeGenerator: public AstVisitor {
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
+ static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
@@ -616,6 +617,8 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
+ friend class FastCodeGenerator;
+ friend class CodeGenSelector;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -623,7 +626,19 @@ class CodeGenerator: public AstVisitor {
};
-// Flag that indicates whether how to generate code for the stub.
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
@@ -632,10 +647,10 @@ enum GenericBinaryFlags {
class GenericBinaryOpStub: public CodeStub {
public:
- GenericBinaryOpStub(Token::Value operation,
+ GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(operation),
+ : op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index adedf3489..3e3ca73e6 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -204,7 +204,7 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
InstructionDesc* id = &instructions_[bm[i].b];
id->mnem = bm[i].mnem;
id->op_order_ = bm[i].op_order_;
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->type = type;
}
}
@@ -216,7 +216,7 @@ void InstructionTable::SetTableRange(InstructionType type,
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = mnem;
id->type = type;
}
@@ -226,7 +226,7 @@ void InstructionTable::SetTableRange(InstructionType type,
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = jump_conditional_mnem[b & 0x0F];
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
@@ -321,6 +321,8 @@ class DisassemblerIA32 {
int SetCC(byte* data);
int CMov(byte* data);
int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...);
@@ -493,7 +495,7 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::F7Instruction(byte* data) {
- assert(*data == 0xF7);
+ ASSERT_EQ(0xF7, *data);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
@@ -526,7 +528,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
- assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
@@ -560,7 +562,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
default: UnimplementedInstruction();
}
}
- assert(mnem != NULL);
+ ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
if (imm8 > 0) {
AppendToBuffer("%d", imm8);
@@ -576,7 +578,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpShort(byte* data) {
- assert(*data == 0xEB);
+ ASSERT_EQ(0xEB, *data);
byte b = *(data+1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -586,7 +588,7 @@ int DisassemblerIA32::JumpShort(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
const char* mnem = jump_conditional_mnem[cond];
@@ -614,18 +616,18 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2);
- return 3; // includes 0x0F
+ return 3; // Includes 0x0F.
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::CMov(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_move_mnem[cond];
int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -635,107 +637,165 @@ int DisassemblerIA32::CMov(byte* data) {
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
- byte b1 = *data;
- byte b2 = *(data + 1);
- if (b1 == 0xD9) {
- const char* mnem = NULL;
- switch (b2) {
- case 0xE8: mnem = "fld1"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE0: mnem = "fchs"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xE4: mnem = "ftst"; break;
- }
- if (mnem != NULL) {
- AppendToBuffer("%s", mnem);
- return 2;
- } else if ((b2 & 0xF8) == 0xC8) {
- AppendToBuffer("fxch st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_s"; break;
- case ebx: mnem = "fstp_s"; break;
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ }
+}
+
+int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
default: UnimplementedInstruction();
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDD) {
- if ((b2 & 0xF8) == 0xC0) {
- AppendToBuffer("ffree st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_d"; break;
- case ebx: mnem = "fstp_d"; break;
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
default: UnimplementedInstruction();
}
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDB) {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fild_s"; break;
- case edx: mnem = "fist_s"; break;
- case ebx: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDF) {
- if (b2 == 0xE0) {
- AppendToBuffer("fnstsw_ax");
- return 2;
- }
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- const char* mnem = "?";
- switch (regop) {
- case ebp: mnem = "fild_d"; break;
- case edi: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDC || b1 == 0xDE) {
- bool is_pop = (b1 == 0xDE);
- if (is_pop && b2 == 0xD9) {
- AppendToBuffer("fcompp");
- return 2;
- }
- const char* mnem = "FP0xDC";
- switch (b2 & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
- return 2;
- } else if (b1 == 0xDA && b2 == 0xE9) {
- const char* mnem = "fucompp";
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ UnimplementedInstruction();
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
AppendToBuffer("%s", mnem);
- return 2;
}
- AppendToBuffer("Unknown FP instruction");
return 2;
}
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index ee1b92d09..0d661c3b6 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -29,6 +29,7 @@
#include "codegen-inl.h"
#include "fast-codegen.h"
+#include "parser.h"
namespace v8 {
namespace internal {
@@ -75,6 +76,14 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
__ bind(&ok);
}
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
{ Comment cmnt(masm_, "[ Body");
VisitStatements(fun->body());
}
@@ -84,6 +93,11 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
// body.
__ mov(eax, Factory::undefined_value());
SetReturnPosition(fun);
+
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -94,19 +108,79 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
+void FastCodeGenerator::Move(Location destination, Slot* source) {
+ switch (destination.type()) {
+ case Location::NOWHERE:
+ break;
+ case Location::TEMP:
+ __ push(Operand(ebp, SlotOffset(source)));
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Location destination, Literal* expr) {
+ switch (destination.type()) {
+ case Location::NOWHERE:
+ break;
+ case Location::TEMP:
+ __ push(Immediate(expr->handle()));
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* destination, Location source) {
+ switch (source.type()) {
+ case Location::NOWHERE:
+ UNREACHABLE();
+ case Location::TEMP:
+ __ pop(Operand(ebp, SlotOffset(destination)));
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DropAndMove(Location destination, Register source) {
+ switch (destination.type()) {
+ case Location::NOWHERE:
+ __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Location::TEMP:
+ __ mov(Operand(esp, 0), source);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(pairs));
+ __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
- Visit(stmt->expression());
- __ pop(eax);
+ Expression* expr = stmt->expression();
+ // Complete the statement based on the type of the subexpression.
+ if (expr->AsLiteral() != NULL) {
+ __ mov(eax, expr->AsLiteral()->handle());
+ } else {
+ Visit(expr);
+ Move(eax, expr->location());
+ }
+
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
__ RecordJSReturn();
+
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ mov(esp, ebp);
@@ -115,29 +189,240 @@ void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
}
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ push(esi);
+ __ push(Immediate(boilerplate));
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ Move(expr->location(), eax);
+}
+
+
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
- ASSERT(rewrite != NULL);
+ if (rewrite == NULL) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in ecx and the global
+ // object on the stack.
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, expr->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code
+ // Remember that the assembler may choose to do peephole optimization
+ // (eg, push/pop elimination).
+ __ nop();
- Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- { Comment cmnt(masm_, "[ Slot");
- if (expr->location().is_temporary()) {
- __ push(Operand(ebp, SlotOffset(slot)));
- } else {
- ASSERT(expr->location().is_nowhere());
+ DropAndMove(expr->location(), eax);
+ } else {
+ Comment cmnt(masm_, "Stack slot");
+ Move(expr->location(), rewrite->AsSlot());
+ }
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label exists;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = boilerplate
+
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(ebx);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ // Constant properties (2).
+ __ push(Immediate(expr->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&exists);
+ // eax contains boilerplate.
+ // Clone boilerplate.
+ __ push(eax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ } else {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: the result is saved on top of the stack.
+ // If result_saved == false: the result not on the stack, just is in eax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ Move(eax, value->location());
+ __ mov(ecx, Immediate(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ break;
+ }
+ // fall through
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(eax);
+ Visit(key);
+ ASSERT(key->location().is_temporary());
+ Visit(value);
+ ASSERT(value->location().is_temporary());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::GETTER:
+ __ push(eax);
+ Visit(key);
+ ASSERT(key->location().is_temporary());
+ __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ Visit(value);
+ ASSERT(value->location().is_temporary());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ default: UNREACHABLE();
}
}
+ switch (expr->location().type()) {
+ case Location::NOWHERE:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Location::TEMP:
+ if (!result_saved) __ push(eax);
+ break;
+ }
}
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
- Comment cmnt(masm_, "[ Literal");
- if (expr->location().is_temporary()) {
- __ push(Immediate(expr->handle()));
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExp Literal");
+ Label done;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = regexp literal.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &done);
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->pattern()));
+ __ push(Immediate(expr->flags()));
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ // Label done:
+ __ bind(&done);
+ Move(expr->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ Label make_clone;
+
+ // Fetch the function's literals array.
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ // Check if the literal's boilerplate has been instantiated.
+ int offset =
+ FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+ __ mov(eax, FieldOperand(ebx, offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &make_clone);
+
+ // Instantiate the boilerplate.
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->literals()));
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+ __ bind(&make_clone);
+ // Clone the boilerplate.
+ __ push(eax);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(eax);
+ result_saved = true;
+ }
+ Visit(subexpr);
+ ASSERT(subexpr->location().is_temporary());
+
+ // Store the subexpression value in the array's elements.
+ __ pop(eax); // Subexpression value.
+ __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(FieldOperand(ebx, offset), eax);
+
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, offset, eax, ecx);
+ }
+
+ switch (expr->location().type()) {
+ case Location::NOWHERE:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Location::TEMP:
+ if (!result_saved) __ push(eax);
+ break;
}
}
@@ -145,18 +430,265 @@ void FastCodeGenerator::VisitLiteral(Literal* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Visit(expr->value());
+ // Left-hand side can only be a global or a (parameter or local) slot.
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && var->slot() != NULL);
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+
+ Expression* rhs = expr->value();
+ if (var->is_global()) {
+ // Assignment to a global variable, use inline caching. Right-hand-side
+ // value is passed in eax, variable name in ecx, and the global object
+ // on the stack.
+
+ // Code for the right-hand-side expression depends on its type.
+ if (rhs->AsLiteral() != NULL) {
+ __ mov(eax, rhs->AsLiteral()->handle());
+ } else {
+ ASSERT(rhs->location().is_temporary());
+ Visit(rhs);
+ __ pop(eax);
+ }
+ __ mov(ecx, var->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Overwrite the global object on the stack with the result if needed.
+ DropAndMove(expr->location(), eax);
+ } else {
+ // Local or parameter assignment.
+
+ // Code for the right-hand side expression depends on its type.
+ if (rhs->AsLiteral() != NULL) {
+ // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+ // discarded result. Always perform the assignment.
+ __ mov(eax, rhs->AsLiteral()->handle());
+ __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+ Move(expr->location(), eax);
+ } else {
+ ASSERT(rhs->location().is_temporary());
+ Visit(rhs);
+ switch (expr->location().type()) {
+ case Location::NOWHERE:
+ // Case 'var = temp'. Discard right-hand-side temporary.
+ Move(var->slot(), rhs->location());
+ break;
+ case Location::TEMP:
+ // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
+ // temporary on the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+ break;
+ }
+ }
+ }
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in ecx and the receiver on the stack.
+ __ mov(ecx, Immediate(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ switch (expr->location().type()) {
+ case Location::TEMP:
+ __ mov(Operand(esp, 0), eax);
+ break;
+ case Location::NOWHERE:
+ __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Expression* fun = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL && !var->is_this() && var->is_global());
+ ASSERT(!var->is_possibly_eval());
+
+ __ push(Immediate(var->name()));
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT(args->at(i)->location().is_temporary());
+ }
+ // Record source position for debugger
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->location(), eax);
+}
- if (expr->location().is_temporary()) {
- __ mov(eax, Operand(esp, 0));
- __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+
+void FastCodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(node->expression());
+ ASSERT(node->expression()->location().is_temporary());
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT(args->at(i)->location().is_temporary());
+ // If location is temporary, it is already on the stack,
+ // so nothing to do here.
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(node->position());
+
+ // Load function, arg_count into edi and eax.
+ __ Set(eax, Immediate(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in eax, or pop it.
+ DropAndMove(node->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+ Runtime::Function* function = expr->function();
+
+ ASSERT(function != NULL);
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT(args->at(i)->location().is_temporary());
+ }
+
+ __ CallRuntime(function, arg_count);
+ Move(expr->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ // Compile a short-circuited boolean or operation in a non-test
+ // context.
+ ASSERT(expr->op() == Token::OR);
+ // Compile (e0 || e1) as if it were
+ // (let (temp = e0) temp ? temp : e1).
+
+ Label eval_right, done;
+ Location destination = expr->location();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+
+ // Use the shared ToBoolean stub to find the boolean value of the
+ // left-hand subexpression. Load the value into eax to perform some
+ // inlined checks assumed by the stub.
+
+ // Compile the left-hand value into eax. Put it on the stack if we may
+ // need it as the value of the whole expression.
+ if (left->AsLiteral() != NULL) {
+ __ mov(eax, left->AsLiteral()->handle());
+ if (destination.is_temporary()) __ push(eax);
+ } else {
+ Visit(left);
+ ASSERT(left->location().is_temporary());
+ switch (destination.type()) {
+ case Location::NOWHERE:
+ // Pop the left-hand value into eax because we will not need it as the
+ // final result.
+ __ pop(eax);
+ break;
+ case Location::TEMP:
+ // Copy the left-hand value into eax because we may need it as the
+ // final result.
+ __ mov(eax, Operand(esp, 0));
+ break;
+ }
+ }
+ // The left-hand value is in eax. It is also on the stack iff the
+ // destination location is temporary.
+
+ // Perform fast checks assumed by the stub.
+ __ cmp(eax, Factory::undefined_value()); // The undefined value is false.
+ __ j(equal, &eval_right);
+ __ cmp(eax, Factory::true_value()); // True is true.
+ __ j(equal, &done);
+ __ cmp(eax, Factory::false_value()); // False is false.
+ __ j(equal, &eval_right);
+ ASSERT(kSmiTag == 0);
+ __ test(eax, Operand(eax)); // The smi zero is false.
+ __ j(zero, &eval_right);
+ __ test(eax, Immediate(kSmiTagMask)); // All other smis are true.
+ __ j(zero, &done);
+
+ // Call the stub for all other cases.
+ __ push(eax);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax)); // The stub returns nonzero for true.
+ __ j(not_zero, &done);
+
+ __ bind(&eval_right);
+ // Discard the left-hand value if present on the stack.
+ if (destination.is_temporary()) {
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ // Save or discard the right-hand value as needed.
+ if (right->AsLiteral() != NULL) {
+ Move(destination, right->AsLiteral());
} else {
- ASSERT(expr->location().is_nowhere());
- __ pop(Operand(ebp, SlotOffset(var->slot())));
+ Visit(right);
+ Move(destination, right->location());
}
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index dea439f24..5c900bedd 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -56,19 +56,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- // Determine frame type.
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- return EXIT_DEBUG;
- } else {
- return EXIT;
- }
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Exit frames on IA-32 do not contain any pointers. The arguments
- // are traversed as part of the expression stack of the calling
- // frame.
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 3a7c86bf7..c3fe6c748 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -76,7 +76,7 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index af0568033..3aa3c3467 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -301,7 +301,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
- KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
__ bind(&check_string);
// The key is not a smi.
@@ -342,6 +342,166 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ // Load name and receiver.
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the map of the receiver.
+ __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the instance type from the map of the receiver.
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Check that the object is a JS object.
+ __ cmp(edx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: index (as a smi)
+ // ecx: JSObject
+ __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the index is in range.
+ __ sar(eax, kSmiTagSize); // Untag the index.
+ __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // eax: untagged index
+ // ecx: elements array
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(eax, Operand(ecx, eax, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ fld_s(Operand(ecx, eax, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // eax: value
+ // For floating-point array type:
+ // FP(0): value
+
+ if (array_type == kExternalIntArray ||
+ array_type == kExternalUnsignedIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ if (array_type == kExternalIntArray) {
+ // See Smi::IsValid for why this works.
+ __ mov(ebx, eax);
+ __ add(Operand(ebx), Immediate(0x40000000));
+ __ cmp(ebx, 0x80000000);
+ __ j(above_equal, &box_int);
+ } else {
+ ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ // The test is different for unsigned int values. Since we need
+ // the Smi-encoded result to be treated as unsigned, we can't
+ // handle either of the top two bits being set in the value.
+ __ test(eax, Immediate(0xC0000000));
+ __ j(not_zero, &box_int);
+ }
+
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ if (array_type == kExternalIntArray) {
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ } else {
+ ASSERT(array_type == kExternalUnsignedIntArray);
+ // Need to zero-extend the value.
+ // There's no fild variant for unsigned values, so zero-extend
+ // to a 64-bit int manually.
+ __ push(Immediate(0));
+ __ push(eax);
+ __ fild_d(Operand(esp, 0));
+ __ pop(eax);
+ __ pop(eax);
+ }
+ // FP(0): value
+ __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ // Set the value.
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ // Set the value.
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else {
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+ }
+
+ // If we fail allocation of the HeapNumber, we still have a value on
+ // top of the FPU stack. Remove it.
+ __ bind(&failed_allocation);
+ __ ffree();
+ __ fincstp();
+ // Fall through to slow case.
+
+ // Slow case: Load name and receiver from stack and jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -395,15 +555,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi)
__ j(below, &fast, taken);
- // Slow case: Push extra copies of the arguments (3).
+ // Slow case: call runtime.
__ bind(&slow);
- __ pop(ecx);
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ecx);
- // Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ Generate(masm, ExternalReference(Runtime::kSetProperty));
// Check whether the elements is a pixel array.
// eax: value
@@ -485,6 +639,201 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Get the receiver from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Get the key from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ // Check that the key is a smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // Get the instance type from the map of the receiver.
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // Check that the object is a JS object.
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: value
+ // edx: JSObject
+ // ebx: index (as a smi)
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ sar(ebx, kSmiTagSize); // Untag the index.
+ __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // eax: value
+ // ecx: elements array
+ // ebx: untagged index
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_equal, &check_heap_number);
+ // smi case
+ __ mov(edx, eax); // Save the value.
+ __ sar(eax, kSmiTagSize); // Untag the value.
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ __ fstp_s(Operand(ecx, ebx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &slow);
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, eax); // Save the value.
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ebx: untagged index
+ // ecx: base pointer of external storage
+ // top of FPU stack: value
+ if (array_type == kExternalFloatArray) {
+ __ fstp_s(Operand(ecx, ebx, times_4, 0));
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+ } else {
+ // Need to perform float-to-int conversion.
+ // Test the top of the FP stack for NaN.
+ Label is_nan;
+ __ fucomi(0);
+ __ j(parity_even, &is_nan);
+
+ if (array_type != kExternalUnsignedIntArray) {
+ __ push(eax); // Make room on stack
+ __ fistp_s(Operand(esp, 0));
+ __ pop(eax);
+ } else {
+ // fistp stores values as signed integers.
+ // To represent the entire range, we need to store as a 64-bit
+ // int and discard the high 32 bits.
+ __ push(eax); // Make room on stack
+ __ push(eax); // Make room on stack
+ __ fistp_d(Operand(esp, 0));
+ __ pop(eax);
+ __ mov(Operand(esp, 0), eax);
+ __ pop(eax);
+ }
+ // eax: untagged integer value
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray: {
+ // We also need to explicitly check for +/-Infinity. These are
+ // converted to MIN_INT, but we need to be careful not to
+ // confuse with legal uses of MIN_INT.
+ Label not_infinity;
+ // This test would apparently detect both NaN and Infinity,
+ // but we've already checked for NaN using the FPU hardware
+ // above.
+ __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
+ __ and_(edi, 0x7FF0);
+ __ cmp(edi, 0x7FF0);
+ __ j(not_equal, &not_infinity);
+ __ mov(eax, 0);
+ __ bind(&not_infinity);
+ __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+
+ __ bind(&is_nan);
+ __ ffree();
+ __ fincstp();
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov(eax, 0);
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ Generate(masm, ExternalReference(Runtime::kSetProperty));
+}
+
+
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a3b214972..34d4fd5f6 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -319,11 +319,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
+ if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+ fucomip();
+ ffree(0);
+ fincstp();
+ } else {
+ fucompp();
+ push(eax);
+ fnstsw_ax();
+ sahf();
+ pop(eax);
+ }
}
@@ -349,10 +355,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
-
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -363,23 +366,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
- push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ push(Immediate(0));
+ } else {
+ push(Immediate(CodeObject()));
+ }
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
+}
- // Setup argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
- lea(esi, Operand(ebp, eax, times_4, offset));
-
+void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@@ -390,8 +394,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
- // Reserve space for two arguments: argc and argv.
- sub(Operand(esp), Immediate(2 * kPointerSize));
+ // Reserve space for arguments.
+ sub(Operand(esp), Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -405,15 +409,39 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+ EnterExitFramePrologue(mode);
+
+ // Setup argc and argv in callee-saved registers.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ mov(edi, Operand(eax));
+ lea(esi, Operand(ebp, eax, times_4, offset));
+
+ EnterExitFrameEpilogue(mode, 2);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
+ int stack_space,
+ int argc) {
+ EnterExitFramePrologue(mode);
+
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
+
+ EnterExitFrameEpilogue(mode, argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
@@ -767,6 +795,24 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
}
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
@@ -907,6 +953,48 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
}
+void MacroAssembler::PushHandleScope(Register scratch) {
+ // Push the number of extensions, smi-tagged so the gc will ignore it.
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ ASSERT_EQ(0, kSmiTag);
+ shl(scratch, kSmiTagSize);
+ push(scratch);
+ mov(Operand::StaticVariable(extensions_address), Immediate(0));
+ // Push next and limit pointers which will be wordsize aligned and
+ // hence automatically smi tagged.
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ push(Operand::StaticVariable(next_address));
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ push(Operand::StaticVariable(limit_address));
+}
+
+
+void MacroAssembler::PopHandleScope(Register scratch) {
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ Label write_back;
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ cmp(Operand(scratch), Immediate(0));
+ j(equal, &write_back);
+ CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+
+ bind(&write_back);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ pop(Operand::StaticVariable(limit_address));
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ pop(Operand::StaticVariable(next_address));
+ pop(scratch);
+ shr(scratch, kSmiTagSize);
+ mov(Operand::StaticVariable(extensions_address), scratch);
+}
+
+
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index ed72c96b9..18d221c40 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -77,16 +77,18 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register eax and
+ // Enter specific kind of exit frame; either in normal or debug mode.
+ // Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
+
+ void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
// ---------------------------------------------------------------------------
@@ -206,6 +208,15 @@ class MacroAssembler: public Assembler {
// un-done.
void UndoAllocationInNewSpace(Register object);
+ // Allocate a heap number in new space with undefined value. The
+ // register scratch2 can be passed as no_reg; the others must be
+ // valid registers. Returns tagged pointer in result register, or
+ // jumps to gc_required if new space is full.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -260,6 +271,9 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ void PushHandleScope(Register scratch);
+ void PopHandleScope(Register scratch);
+
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
@@ -337,6 +351,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ void EnterExitFramePrologue(ExitFrame::Mode mode);
+ void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 7af4e89e0..76d36a939 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1093,17 +1093,15 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
void RegExpMacroAssemblerIA32::CheckStackLimit() {
- if (FLAG_check_stack) {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ j(above, &no_stack_overflow);
- SafeCall(&stack_overflow_label_);
+ SafeCall(&stack_overflow_label_);
- __ bind(&no_stack_overflow);
- }
+ __ bind(&no_stack_overflow);
}
@@ -1163,10 +1161,6 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- __ int3(); // Unused on ia32.
-}
-
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index ca4e14210..3e5fc0479 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -776,20 +776,39 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
- // Push the arguments on the JS stack of the caller.
- __ pop(scratch2); // remove return address
+ Handle<AccessorInfo> callback_handle(callback);
+
+ Register other = reg.is(scratch1) ? scratch2 : scratch1;
+ __ EnterInternalFrame();
+ __ PushHandleScope(other);
+ // Push the stack address where the list of arguments ends
+ __ mov(other, esp);
+ __ sub(Operand(other), Immediate(2 * kPointerSize));
+ __ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
- __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
- __ push(reg);
- __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ mov(other, Immediate(callback_handle));
+ __ push(other);
+ __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name
- __ push(scratch2); // restore return address
+ // Save a pointer to where we pushed the arguments pointer.
+ // This will be passed as the const Arguments& to the C++ callback.
+ __ mov(eax, esp);
+ __ add(Operand(eax), Immediate(5 * kPointerSize));
+ __ mov(ebx, esp);
+
+ // Do call through the api.
+ ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ApiGetterEntryStub stub(callback_handle, &fun);
+ __ CallStub(&stub);
- // Do tail-call to the runtime system.
- ExternalReference load_callback_property =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5, 1);
+ Register tmp = other.is(eax) ? reg : other;
+ __ PopHandleScope(tmp);
+ __ LeaveInternalFrame();
+
+ __ ret(0);
}