summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-05-06 09:59:35 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-05-06 09:59:35 -0700
commit6aab6ebe6168464de8e7b0c7254fad4771f281c1 (patch)
treeb0c6b91086cbf16147ba247ac6283652bfece086 /deps/v8/src
parent4ce1e1fca59bc6ad6e2d19643db2ed3159ccb7dc (diff)
downloadnode-6aab6ebe6168464de8e7b0c7254fad4771f281c1.tar.gz
Upgrade V8 to 2.2.8
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/api.cc3
-rw-r--r--deps/v8/src/arm/codegen-arm.cc1181
-rw-r--r--deps/v8/src/arm/codegen-arm.h164
-rw-r--r--deps/v8/src/arm/debug-arm.cc3
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc13
-rw-r--r--deps/v8/src/arm/ic-arm.cc103
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc131
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h27
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc465
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc79
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h13
-rw-r--r--deps/v8/src/array.js9
-rw-r--r--deps/v8/src/bootstrapper.cc87
-rw-r--r--deps/v8/src/builtins.h3
-rw-r--r--deps/v8/src/codegen.cc7
-rw-r--r--deps/v8/src/codegen.h1
-rwxr-xr-xdeps/v8/src/compiler.cc6
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/conversions.cc6
-rw-r--r--deps/v8/src/d8.cc5
-rw-r--r--deps/v8/src/date.js8
-rw-r--r--deps/v8/src/dateparser-inl.h13
-rw-r--r--deps/v8/src/dateparser.cc16
-rw-r--r--deps/v8/src/dateparser.h12
-rw-r--r--deps/v8/src/debug-debugger.js65
-rw-r--r--deps/v8/src/factory.cc28
-rw-r--r--deps/v8/src/factory.h8
-rw-r--r--deps/v8/src/fast-dtoa.cc12
-rw-r--r--deps/v8/src/fast-dtoa.h6
-rw-r--r--deps/v8/src/full-codegen.cc5
-rw-r--r--deps/v8/src/handles.cc1
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap.cc83
-rw-r--r--deps/v8/src/heap.h15
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc360
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h19
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc18
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc19
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc11
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc115
-rw-r--r--deps/v8/src/ic.cc6
-rw-r--r--deps/v8/src/ic.h2
-rw-r--r--deps/v8/src/liveedit-debugger.js640
-rw-r--r--deps/v8/src/liveedit.cc194
-rw-r--r--deps/v8/src/liveedit.h31
-rw-r--r--deps/v8/src/objects-debug.cc26
-rw-r--r--deps/v8/src/objects-inl.h57
-rw-r--r--deps/v8/src/objects.cc9
-rw-r--r--deps/v8/src/objects.h27
-rw-r--r--deps/v8/src/runtime.cc150
-rw-r--r--deps/v8/src/runtime.h5
-rw-r--r--deps/v8/src/runtime.js11
-rw-r--r--deps/v8/src/stub-cache.cc67
-rw-r--r--deps/v8/src/stub-cache.h49
-rw-r--r--deps/v8/src/v8natives.js1
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h11
-rw-r--r--deps/v8/src/x64/assembler-x64.cc78
-rw-r--r--deps/v8/src/x64/assembler-x64.h16
-rw-r--r--deps/v8/src/x64/codegen-x64.cc1358
-rw-r--r--deps/v8/src/x64/codegen-x64.h168
-rw-r--r--deps/v8/src/x64/disasm-x64.cc53
-rw-r--r--deps/v8/src/x64/ic-x64.cc79
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc273
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h39
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc712
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc25
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h4
69 files changed, 5082 insertions, 2139 deletions
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index f738a3783..4709a156b 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3646,6 +3646,8 @@ void V8::ResumeProfilerEx(int flags, int tag) {
// those modules which haven't been started prior to making a
// snapshot.
+ // Make a GC prior to taking a snapshot.
+ i::Heap::CollectAllGarbage(false);
// Reset snapshot flag and CPU module flags.
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
const int current_flags = i::Logger::GetActiveProfilerModules();
@@ -4020,6 +4022,7 @@ void Debug::ProcessDebugMessages() {
}
Local<Context> Debug::GetDebugContext() {
+ EnsureInitialized("v8::Debug::GetDebugContext()");
ENTER_V8;
return Utils::ToLocal(i::Debugger::GetDebugContext());
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 291a763fb..30860a1f9 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -565,7 +565,7 @@ void CodeGenerator::Load(Expression* expr) {
}
ASSERT(has_valid_frame());
ASSERT(!has_cc());
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -1008,10 +1008,10 @@ static int BitPosition(unsigned x) {
}
-void CodeGenerator::VirtualFrameSmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
+void CodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode) {
int int_value = Smi::cast(*value)->value();
bool something_to_inline;
@@ -1232,189 +1232,6 @@ void CodeGenerator::VirtualFrameSmiOperation(Token::Value op,
}
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // NOTE: This is an attempt to inline (a bit) more of the code for
- // some possible smi operations (like + and -) when (at least) one
- // of the operands is a literal smi. With this optimization, the
- // performance of the system is increased by ~15%, and the generated
- // code size is increased by ~1% (measured on a combination of
- // different benchmarks).
-
- // sp[0] : operand
-
- int int_value = Smi::cast(*value)->value();
-
- JumpTarget exit;
- frame_->EmitPop(r0);
-
- bool something_to_inline = true;
- switch (op) {
- case Token::ADD: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
-
- __ add(r0, r0, Operand(value), SetCC);
- deferred->Branch(vs);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- deferred->BindExit();
- break;
- }
-
- case Token::SUB: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
-
- if (reversed) {
- __ rsb(r0, r0, Operand(value), SetCC);
- } else {
- __ sub(r0, r0, Operand(value), SetCC);
- }
- deferred->Branch(vs);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- deferred->BindExit();
- break;
- }
-
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
- case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
- case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
- default: UNREACHABLE();
- }
- deferred->BindExit();
- break;
- }
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- if (reversed) {
- something_to_inline = false;
- break;
- }
- int shift_value = int_value & 0x1f; // least significant 5 bits
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode, r0);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
- switch (op) {
- case Token::SHL: {
- if (shift_value != 0) {
- __ mov(r2, Operand(r2, LSL, shift_value));
- }
- // check that the *unsigned* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
- break;
- }
- case Token::SHR: {
- // LSR by immediate 0 means shifting 32 bits.
- if (shift_value != 0) {
- __ mov(r2, Operand(r2, LSR, shift_value));
- }
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ and_(r3, r2, Operand(0xc0000000), SetCC);
- deferred->Branch(ne);
- break;
- }
- case Token::SAR: {
- if (shift_value != 0) {
- // ASR by immediate 0 means shifting 32 bits.
- __ mov(r2, Operand(r2, ASR, shift_value));
- }
- break;
- }
- default: UNREACHABLE();
- }
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- deferred->BindExit();
- break;
- }
-
- case Token::MOD: {
- if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
- something_to_inline = false;
- break;
- }
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
- unsigned mask = (0x80000000u | kSmiTagMask);
- __ tst(r0, Operand(mask));
- deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
- mask = (int_value << kSmiTagSize) - 1;
- __ and_(r0, r0, Operand(mask));
- deferred->BindExit();
- break;
- }
-
- case Token::MUL: {
- if (!IsEasyToMultiplyBy(int_value)) {
- something_to_inline = false;
- break;
- }
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0);
- unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
- max_smi_that_wont_overflow <<= kSmiTagSize;
- unsigned mask = 0x80000000u;
- while ((mask & max_smi_that_wont_overflow) == 0) {
- mask |= mask >> 1;
- }
- mask |= kSmiTagMask;
- // This does a single mask that checks for a too high value in a
- // conservative way and for a non-Smi. It also filters out negative
- // numbers, unfortunately, but since this code is inline we prefer
- // brevity to comprehensiveness.
- __ tst(r0, Operand(mask));
- deferred->Branch(ne);
- MultiplyByKnownInt(masm_, r0, r0, int_value);
- deferred->BindExit();
- break;
- }
-
- default:
- something_to_inline = false;
- break;
- }
-
- if (!something_to_inline) {
- if (!reversed) {
- frame_->EmitPush(r0);
- __ mov(r0, Operand(value));
- frame_->EmitPush(r0);
- GenericBinaryOperation(op, mode, int_value);
- } else {
- __ mov(ip, Operand(value));
- frame_->EmitPush(ip);
- frame_->EmitPush(r0);
- GenericBinaryOperation(op, mode, kUnknownIntValue);
- }
- }
-
- exit.Bind();
-}
-
-
void CodeGenerator::Comparison(Condition cc,
Expression* left,
Expression* right,
@@ -1526,9 +1343,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// give us a megamorphic load site. Not super, but it works.
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
- __ mov(r2, Operand(name));
- __ ldr(r0, MemOperand(sp, 0));
- frame_->CallLoadIC(RelocInfo::CODE_TARGET);
+ frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
// Load the receiver and the existing arguments object onto the
@@ -1746,12 +1561,11 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
- __ mov(r0, Operand(pairs));
- frame_->EmitPush(r0);
- __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(pairs));
+ frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// The result is discarded.
}
@@ -1761,7 +1575,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
@@ -1776,28 +1589,27 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
ASSERT(var->is_dynamic());
// For now, just do a runtime call.
frame_->EmitPush(cp);
- __ mov(r0, Operand(var->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(var->name()));
// Declaration nodes are always declared in only two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- __ mov(r0, Operand(Smi::FromInt(attr)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
} else if (node->fun() != NULL) {
- LoadAndSpill(node->fun());
+ Load(node->fun());
} else {
- __ mov(r0, Operand(0)); // no initial value!
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(0));
}
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
+
ASSERT(frame_->height() == original_height);
return;
}
@@ -1813,12 +1625,11 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
}
if (val != NULL) {
- {
- // Set initial value.
- Reference target(this, node->proxy());
- LoadAndSpill(val);
- target.SetValue(NOT_CONST_INIT);
- }
+ // Set initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ target.SetValue(NOT_CONST_INIT);
+
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
}
@@ -2904,7 +2715,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
return;
}
InstantiateFunction(function_info);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -2916,7 +2727,7 @@ void CodeGenerator::VisitSharedFunctionInfoLiteral(
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
InstantiateFunction(node->shared_function_info());
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -2943,7 +2754,7 @@ void CodeGenerator::VisitConditional(Conditional* node) {
LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3199,11 +3010,10 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register and call load IC.
- frame_->SpillAllButCopyTOSToR0();
- __ mov(r2, Operand(slot->var()->name()));
- frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT);
+ frame_->CallLoadIC(slot->var()->name(),
+ typeof_state == INSIDE_TYPEOF
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT);
// Drop the global object. The result is in r0.
frame_->Drop();
}
@@ -3215,7 +3025,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
#endif
Comment cmnt(masm_, "[ Slot");
LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3234,7 +3044,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
Reference ref(this, node);
ref.GetValue();
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3246,7 +3056,7 @@ void CodeGenerator::VisitLiteral(Literal* node) {
Register reg = frame_->GetTOSRegister();
__ mov(reg, Operand(node->handle()));
frame_->EmitPush(reg);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3290,7 +3100,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
done.Bind();
// Push the literal.
frame_->EmitPush(r2);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3371,7 +3181,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
}
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3430,7 +3240,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ mov(r3, Operand(offset));
__ RecordWrite(r1, r3, r2);
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3446,70 +3256,318 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
LoadAndSpill(node->value());
frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
-void CodeGenerator::VisitAssignment(Assignment* node) {
- VirtualFrame::RegisterAllocationScope scope(this);
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- Comment cmnt(masm_, "[ Assignment");
+ Comment cmnt(masm(), "[ Variable Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL);
+ Slot* slot = var->slot();
+ ASSERT(slot != NULL);
- { Reference target(this, node->target(), node->is_compound());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- Register tos = frame_->GetTOSRegister();
- __ mov(tos, Operand(Smi::FromInt(0)));
- frame_->EmitPush(tos);
- ASSERT(frame_->height() == original_height + 1);
- return;
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+
+ // Perform the binary operation.
+ Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(),
+ literal->handle(),
+ false,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ Load(node->value());
+ VirtualFrameBinaryOperation(
+ node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ }
+ } else {
+ Load(node->value());
+ }
+
+ // Perform the assignment.
+ if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+ CodeForSourcePosition(node->position());
+ StoreToSlot(slot,
+ node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+ }
+ ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ Comment cmnt(masm(), "[ Named Property Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
+ ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+ // Initialize name and evaluate the receiver sub-expression if necessary. If
+ // the receiver is trivial it is not placed on the stack at this point, but
+ // loaded whenever actually needed.
+ Handle<String> name;
+ bool is_trivial_receiver = false;
+ if (var != NULL) {
+ name = var->name();
+ } else {
+ Literal* lit = prop->key()->AsLiteral();
+ ASSERT_NOT_NULL(lit);
+ name = Handle<String>::cast(lit->handle());
+ // Do not materialize the receiver on the frame if it is trivial.
+ is_trivial_receiver = prop->obj()->IsTrivial();
+ if (!is_trivial_receiver) Load(prop->obj());
+ }
+
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ if (node->starts_initialization_block()) {
+ // Initialization block consists of assignments of the form expr.x = ..., so
+ // this will never be an assignment to a variable, so there must be a
+ // receiver object.
+ ASSERT_EQ(NULL, var);
+ if (is_trivial_receiver) {
+ Load(prop->obj());
+ } else {
+ frame_->Dup();
+ }
+ frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ if (node->ends_initialization_block() && !is_trivial_receiver) {
+ frame_->Dup();
+ }
+
+ // Stack layout:
+ // [tos] : receiver (only materialized if non-trivial)
+ // [tos+1] : receiver if at the end of an initialization block
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ if (is_trivial_receiver) {
+ Load(prop->obj());
+ } else if (var != NULL) {
+ LoadGlobal();
+ } else {
+ frame_->Dup();
}
+ EmitNamedLoad(name, var != NULL);
+ frame_->Drop(); // Receiver is left on the stack.
+ frame_->EmitPush(r0);
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
+ // Perform the binary operation.
+ Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(),
+ literal->handle(),
+ false,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
Load(node->value());
+ VirtualFrameBinaryOperation(
+ node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ }
+ } else {
+ // For non-compound assignment just load the right-hand side.
+ Load(node->value());
+ }
+
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : receiver (only materialized if non-trivial)
+ // [tos+2] : receiver if at the end of an initialization block
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(var == NULL || var->mode() != Variable::CONST);
+ ASSERT_NE(Token::INIT_CONST, node->op());
+ if (is_trivial_receiver) {
+ // Load the receiver and swap with the value.
+ Load(prop->obj());
+ Register t0 = frame_->PopToRegister();
+ Register t1 = frame_->PopToRegister(t0);
+ frame_->EmitPush(t0);
+ frame_->EmitPush(t1);
+ }
+ CodeForSourcePosition(node->position());
+ bool is_contextual = (var != NULL);
+ EmitNamedStore(name, is_contextual);
+ frame_->EmitPush(r0);
- } else { // Assignment is a compound assignment.
- // Get the old value of the lhs.
- target.GetValue();
- Literal* literal = node->value()->AsLiteral();
- bool overwrite =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- if (literal != NULL && literal->handle()->IsSmi()) {
- VirtualFrameSmiOperation(node->binary_op(),
- literal->handle(),
- false,
- overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- Load(node->value());
- VirtualFrameBinaryOperation(node->binary_op(),
- overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
- }
+ // Change to fast case at the end of an initialization block.
+ if (node->ends_initialization_block()) {
+ ASSERT_EQ(NULL, var);
+ // The argument to the runtime call is the receiver.
+ if (is_trivial_receiver) {
+ Load(prop->obj());
+ } else {
+ // A copy of the receiver is below the value of the assignment. Swap
+ // the receiver and the value of the assignment expression.
+ Register t0 = frame_->PopToRegister();
+ Register t1 = frame_->PopToRegister(t0);
+ frame_->EmitPush(t0);
+ frame_->EmitPush(t1);
}
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- (var->mode() == Variable::CONST) &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
+ frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ // Stack layout:
+ // [tos] : result
+
+ ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ Comment cmnt(masm_, "[ Keyed Property Assignment");
+ Property* prop = node->target()->AsProperty();
+ ASSERT_NOT_NULL(prop);
+
+ // Evaluate the receiver subexpression.
+ Load(prop->obj());
+
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ if (node->starts_initialization_block()) {
+ frame_->Dup();
+ frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ if (node->ends_initialization_block()) {
+ frame_->Dup();
+ }
+
+ // Evaluate the key subexpression.
+ Load(prop->key());
+
+ // Stack layout:
+ // [tos] : key
+ // [tos+1] : receiver
+ // [tos+2] : receiver if at the end of an initialization block
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ // Load of the current value leaves receiver and key on the stack.
+ EmitKeyedLoad();
+ frame_->EmitPush(r0);
+
+ // Perform the binary operation.
+ Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(),
+ literal->handle(),
+ false,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
+ Load(node->value());
+ VirtualFrameBinaryOperation(
+ node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
+ } else {
+ // For non-compound assignment just load the right-hand side.
+ Load(node->value());
}
- ASSERT(frame_->height() == original_height + 1);
+
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : key
+ // [tos+2] : receiver
+ // [tos+3] : receiver if at the end of an initialization block
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(node->op() != Token::INIT_CONST);
+ CodeForSourcePosition(node->position());
+ frame_->PopToR0();
+ EmitKeyedStore(prop->key()->type());
+ frame_->Drop(2); // Key and receiver are left on the stack.
+ frame_->EmitPush(r0);
+
+ // Stack layout:
+ // [tos] : result
+ // [tos+1] : receiver if at the end of an initialization block
+
+ // Change to fast case at the end of an initialization block.
+ if (node->ends_initialization_block()) {
+ // The argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment. Swap the receiver and
+ // the value of the assignment expression.
+ Register t0 = frame_->PopToRegister();
+ Register t1 = frame_->PopToRegister(t0);
+ frame_->EmitPush(t1);
+ frame_->EmitPush(t0);
+ frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ // Stack layout:
+ // [tos] : result
+
+ ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+ VirtualFrame::RegisterAllocationScope scope(this);
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ Comment cmnt(masm_, "[ Assignment");
+
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
+
+ if (var != NULL && !var->is_global()) {
+ EmitSlotAssignment(node);
+
+ } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+ (var != NULL && var->is_global())) {
+ // Properties whose keys are property names and global variables are
+ // treated as named property references. We do not need to consider
+ // global 'this' because it is not a valid left-hand side.
+ EmitNamedPropertyAssignment(node);
+
+ } else if (prop != NULL) {
+ // Other properties (including rewritten parameters for a function that
+ // uses arguments) are keyed property assignments.
+ EmitKeyedPropertyAssignment(node);
+
+ } else {
+ // Invalid left-hand side.
+ Load(node->target());
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ // The runtime call doesn't actually return but the code generator will
+ // still generate code and expects a certain frame height.
+ frame_->EmitPush(r0);
+ }
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3524,7 +3582,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
CodeForSourcePosition(node->position());
frame_->CallRuntime(Runtime::kThrow, 1);
frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3537,7 +3595,7 @@ void CodeGenerator::VisitProperty(Property* node) {
{ Reference property(this, node);
property.GetValue();
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3744,7 +3802,7 @@ void CodeGenerator::VisitCall(Call* node) {
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3787,7 +3845,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3950,9 +4008,12 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
}
-// This should generate code that performs a charCodeAt() call or returns
+// This generates code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
+// It can handle flat, 8 and 16 bit characters and cons strings where the
+// answer is found in the left hand branch of the cons. The slow case will
+// flatten the string, which will ensure that the answer is in the left hand
+// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
@@ -3960,75 +4021,28 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
LoadAndSpill(args->at(0));
LoadAndSpill(args->at(1));
- frame_->EmitPop(r0); // Index.
- frame_->EmitPop(r1); // String.
-
- Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+ frame_->EmitPop(r1); // Index.
+ frame_->EmitPop(r2); // String.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow); // The 'string' was a Smi.
-
- ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
- __ b(ne, &slow); // The index was negative or not a Smi.
-
- __ bind(&try_again_with_new_string);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &slow);
-
- // Now r2 has the string type.
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- // Now r3 has the length of the string. Compare with the index.
- __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
- __ b(le, &slow);
-
- // Here we know the index is in range. Check that string is sequential.
- ASSERT_EQ(0, kSeqStringTag);
- __ tst(r2, Operand(kStringRepresentationMask));
- __ b(ne, &not_a_flat_string);
-
- // Check whether it is an ASCII string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ tst(r2, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string. We can add without shifting since the Smi tag size is the
- // log2 of the number of bytes in a two-byte character.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiShiftSize);
- __ add(r1, r1, Operand(r0));
- __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ jmp(&end);
-
- __ bind(&ascii_string);
- __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
- __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ jmp(&end);
-
- __ bind(&not_a_flat_string);
- __ and_(r2, r2, Operand(kStringRepresentationMask));
- __ cmp(r2, Operand(kConsStringTag));
- __ b(ne, &slow);
-
- // ConsString.
- // Check that the right hand side is the empty string (ie if this is really a
- // flat string in a cons string). If that is not the case we would rather go
- // to the runtime system now, to flatten the string.
- __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
- __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
- __ cmp(r2, Operand(r3));
- __ b(ne, &slow);
-
- // Get the first of the two strings.
- __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
- __ jmp(&try_again_with_new_string);
+ Label slow_case;
+ Label exit;
+ StringHelper::GenerateFastCharCodeAt(masm_,
+ r2,
+ r1,
+ r3,
+ r0,
+ &slow_case,
+ &slow_case,
+ &slow_case,
+ &slow_case);
+ __ jmp(&exit);
- __ bind(&slow);
+ __ bind(&slow_case);
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&end);
+ __ bind(&exit);
frame_->EmitPush(r0);
}
@@ -4037,37 +4051,19 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateCharFromCode");
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
-
- JumpTarget slow_case;
- JumpTarget exit;
-
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(r0, Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- slow_case.Branch(nz);
-
- ASSERT(kSmiTag == 0);
- __ mov(r1, Operand(Factory::single_character_string_cache()));
- __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- slow_case.Branch(eq);
-
- frame_->EmitPush(r1);
- exit.Jump();
+ Register code = r1;
+ Register scratch = ip;
+ Register result = r0;
- slow_case.Bind();
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kCharFromCode, 1);
- frame_->EmitPush(r0);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(code);
- exit.Bind();
+ StringHelper::GenerateCharFromCode(masm_,
+ code,
+ scratch,
+ result,
+ CALL_FUNCTION);
+ frame_->EmitPush(result);
}
@@ -4508,6 +4504,110 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Register index2 = r2;
+ Register index1 = r1;
+ Register object = r0;
+ Register tmp1 = r3;
+ Register tmp2 = r4;
+
+ frame_->EmitPop(index2);
+ frame_->EmitPop(index1);
+ frame_->EmitPop(object);
+
+ DeferredSwapElements* deferred =
+ new DeferredSwapElements(object, index1, index2);
+
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
+ deferred->Branch(lt);
+ __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
+ __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(nz);
+
+ // Check the object's elements are in fast case.
+ __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(tmp2, ip);
+ deferred->Branch(ne);
+
+ // Smi-tagging is equivalent to multiplying by 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ // Check that both indices are smis.
+ __ mov(tmp2, index1);
+ __ orr(tmp2, tmp2, index2);
+ __ tst(tmp2, Operand(kSmiTagMask));
+ deferred->Branch(nz);
+
+ // Bring the offsets into the fixed array in tmp1 into index1 and
+ // index2.
+ __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Swap elements.
+ Register tmp3 = object;
+ object = no_reg;
+ __ ldr(tmp3, MemOperand(tmp1, index1));
+ __ ldr(tmp2, MemOperand(tmp1, index2));
+ __ str(tmp3, MemOperand(tmp1, index2));
+ __ str(tmp2, MemOperand(tmp1, index1));
+
+ Label done;
+ __ InNewSpace(tmp1, tmp2, eq, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ mov(tmp2, tmp1);
+ RecordWriteStub recordWrite1(tmp1, index1, tmp3);
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp2, index2, tmp3);
+ __ CallStub(&recordWrite2);
+
+ __ bind(&done);
+
+ deferred->BindExit();
+ __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPush(tmp1);
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
@@ -4598,7 +4698,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
frame_->CallRuntime(function, arg_count);
frame_->EmitPush(r0);
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4762,7 +4862,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
}
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
return;
}
target.GetValue();
@@ -4830,7 +4930,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Postfix: Discard the new value and use the old.
if (is_postfix) frame_->EmitPop(r0);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4968,18 +5068,17 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->left());
- VirtualFrameSmiOperation(
- node->op(),
- rliteral->handle(),
- false,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ SmiOperation(node->op(),
+ rliteral->handle(),
+ false,
+ overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->right());
- VirtualFrameSmiOperation(node->op(),
- lliteral->handle(),
- true,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
+ SmiOperation(node->op(),
+ lliteral->handle(),
+ true,
+ overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else {
VirtualFrame::RegisterAllocationScope scope(this);
OverwriteMode overwrite_mode = NO_OVERWRITE;
@@ -5006,7 +5105,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
+ ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -5289,7 +5388,8 @@ void DeferredReferenceGetKeyedValue::Generate() {
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Call keyed load IC. It has all arguments on the stack.
+ // Call keyed load IC. It has all arguments on the stack and the key in r0.
+ __ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
@@ -5343,11 +5443,10 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
// Setup the name register and call load IC.
- frame_->SpillAllButCopyTOSToR0();
- __ mov(r2, Operand(name));
- frame_->CallLoadIC(is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET);
+ frame_->CallLoadIC(name,
+ is_contextual
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET);
} else {
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
@@ -5400,9 +5499,18 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
+void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+ int expected_height = frame_->height() - (is_contextual ? 1 : 2);
+#endif
+ frame_->CallStoreIC(name, is_contextual);
+
+ ASSERT_EQ(expected_height, frame_->height());
+}
+
+
void CodeGenerator::EmitKeyedLoad() {
if (loop_nesting() == 0) {
- VirtualFrame::SpilledScope spilled(frame_);
Comment cmnt(masm_, "[ Load from keyed property");
frame_->CallKeyedLoadIC();
} else {
@@ -5414,7 +5522,7 @@ void CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
- // Load the receiver and key from the stack.
+ // Load the receiver and key from the stack.
frame_->SpillAllButCopyTOSToR1R0();
Register receiver = r0;
Register key = r1;
@@ -5489,7 +5597,7 @@ void CodeGenerator::EmitKeyedLoad() {
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
- frame_->AssertIsSpilled();
+ VirtualFrame::SpilledScope scope(frame_);
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
@@ -5657,21 +5765,13 @@ void Reference::SetValue(InitState init_state) {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
case NAMED: {
- VirtualFrame::SpilledScope scope(frame);
Comment cmnt(masm, "[ Store to named Property");
- // Call the appropriate IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- Handle<String> name(GetName());
-
- frame->EmitPop(r0);
- frame->EmitPop(r1);
- __ mov(r2, Operand(name));
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+ cgen_->EmitNamedStore(GetName(), false);
frame->EmitPush(r0);
set_unloaded();
break;
@@ -6489,6 +6589,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ __ RecordWriteHelper(object_, offset_, scratch_);
+ __ Ret();
+}
+
+
// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
@@ -8388,14 +8494,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
// r2: Number of capture registers
- // r3: Length of subject string
+ // r3: Length of subject string as a smi
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ cmp(r3, Operand(r0, ASR, kSmiTagSize + kSmiShiftSize));
- __ b(ls, &runtime);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ __ cmp(r3, Operand(r0));
+ __ b(le, &runtime);
// r2: Number of capture registers
// subject: Subject string
@@ -8521,6 +8629,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
__ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
__ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
@@ -8750,12 +8859,151 @@ int CompareStub::MinorKey() {
}
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
+void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
+ Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_smi,
+ Label* index_out_of_range,
+ Label* slow_case) {
+ Label not_a_flat_string;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ BranchOnSmi(object, receiver_not_string);
+
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ tst(result, Operand(kIsNotStringMask));
+ __ b(ne, receiver_not_string);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ BranchOnNotSmi(index, index_not_smi);
+
+ // Check for index out of range.
+ __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
+ // Now scratch has the length of the string. Compare with the index.
+ __ cmp(scratch, Operand(index));
+ __ b(ls, index_out_of_range);
+
+ __ bind(&try_again_with_new_string);
+ // ----------- S t a t e -------------
+ // -- object : string to access
+ // -- result : instance type of the string
+ // -- scratch : non-negative index < length
+ // -----------------------------------
+
+ // We need special handling for non-flat strings.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(ne, &not_a_flat_string);
+
+ // Check for 1-byte or 2-byte string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(result, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string. We can add without shifting since the Smi tag size is the
+ // log2 of the number of bytes in a two-byte character.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiShiftSize);
+ __ add(scratch, object, Operand(index));
+ __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // Handle non-flat strings.
+ __ bind(&not_a_flat_string);
+ __ and_(result, result, Operand(kStringRepresentationMask));
+ __ cmp(result, Operand(kConsStringTag));
+ __ b(ne, slow_case);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
+ __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
+ __ cmp(result, Operand(scratch));
+ __ b(ne, slow_case);
+
+ // Get the first of the two strings and load its instance type.
+ __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
+ __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&try_again_with_new_string);
+
+ // ASCII string.
+ __ bind(&ascii_string);
+ __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
+ __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ mov(result, Operand(result, LSL, kSmiTagSize));
+}
+
+
+void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
+ Register code,
+ Register scratch,
+ Register result,
+ InvokeFlag flag) {
+ ASSERT(!code.is(result));
+
+ Label slow_case;
+ Label exit;
+
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ tst(code, Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ b(nz, &slow_case);
+
+ ASSERT(kSmiTag == 0);
+ __ mov(result, Operand(Factory::single_character_string_cache()));
+ __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ cmp(result, scratch);
+ __ b(eq, &slow_case);
+ __ b(&exit);
+
+ __ bind(&slow_case);
+ if (flag == CALL_FUNCTION) {
+ __ push(code);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result.is(r0)) {
+ __ mov(result, r0);
+ }
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ ASSERT(result.is(r0));
+ __ push(code);
+ __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
+ }
+
+ __ bind(&exit);
+ if (flag == JUMP_FUNCTION) {
+ ASSERT(result.is(r0));
+ __ Ret();
+ }
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
Label loop;
Label done;
// This loop just copies one character at a time, as it is only used for very
@@ -8786,16 +9034,16 @@ enum CopyCharactersFlags {
};
-void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
bool ascii = (flags & COPY_ASCII) != 0;
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
@@ -8929,15 +9177,15 @@ void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
// Register scratch3 is the general scratch register in this function.
Register scratch = scratch3;
@@ -8959,9 +9207,9 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ bind(&not_array_index);
// Calculate the two character string hash.
Register hash = scratch1;
- GenerateHashInit(masm, hash, c1);
- GenerateHashAddCharacter(masm, hash, c2);
- GenerateHashGetHash(masm, hash);
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
// Collect the two characters in a register.
Register chars = c1;
@@ -9028,7 +9276,7 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// If length is not 2 the string is not a candidate.
__ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(2));
+ __ cmp(scratch, Operand(Smi::FromInt(2)));
__ b(ne, &next_probe[i]);
// Check that the candidate is a non-external ascii string.
@@ -9055,9 +9303,9 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
// hash = character + (character << 10);
__ add(hash, character, Operand(character, LSL, 10));
// hash ^= hash >> 6;
@@ -9065,9 +9313,9 @@ void StringStubBase::GenerateHashInit(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
// hash += character;
__ add(hash, hash, Operand(character));
// hash += hash << 10;
@@ -9077,8 +9325,8 @@ void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
// hash += hash << 3;
__ add(hash, hash, Operand(hash, LSL, 3));
// hash ^= hash >> 11;
@@ -9179,7 +9427,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r6: from (smi)
// r7: to (smi)
__ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
- __ cmp(r4, Operand(r7, ASR, 1));
+ __ cmp(r4, Operand(r7));
__ b(lt, &runtime); // Fail if to > length.
// r1: instance type.
@@ -9205,8 +9453,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table.
Label make_two_character_string;
- GenerateTwoCharacterSymbolTableProbe(masm, r3, r4, r1, r5, r6, r7, r9,
- &make_two_character_string);
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -9240,8 +9488,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length.
// r5: first character of sub string to copy.
ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
- GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -9271,8 +9519,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result length.
// r5: first character of string to copy.
ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
- GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -9298,9 +9546,13 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register length_delta = scratch3;
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
+ ASSERT(kSmiTag == 0);
__ tst(min_length, Operand(min_length));
__ b(eq, &compare_lengths);
+ // Untag smi.
+ __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+
// Setup registers so that we only need to increment one register
// in the loop.
__ add(scratch2, min_length,
@@ -9410,9 +9662,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Check if either of the strings are empty. In that case return the other.
__ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- __ cmp(r2, Operand(0)); // Test if first string is empty.
+ ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
__ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- __ cmp(r3, Operand(0), ne); // Else test if second string is empty.
+ ASSERT(kSmiTag == 0);
+ // Else test if second string is empty.
+ __ cmp(r3, Operand(Smi::FromInt(0)), ne);
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
@@ -9422,6 +9677,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&strings_not_empty);
}
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize));
// Both strings are non-empty.
// r0: first string
// r1: second string
@@ -9456,8 +9713,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- GenerateTwoCharacterSymbolTableProbe(masm, r2, r3, r6, r7, r4, r5, r9,
- &make_two_character_string);
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -9566,7 +9823,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r3: length of second string.
// r6: first character of result.
// r7: result string.
- GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
// Load second argument and locate first character.
__ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -9574,7 +9831,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r3: length of second string.
// r6: next character of result.
// r7: result string.
- GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
__ mov(r0, Operand(r7));
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -9605,7 +9862,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r3: length of second string.
// r6: first character of result.
// r7: result string.
- GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
// Locate first character of second argument.
__ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -9614,7 +9871,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r3: length of second string.
// r6: next character of result (after copy of first string).
// r7: result string.
- GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
__ mov(r0, Operand(r7));
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 80df65448..bb76b633b 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -312,10 +312,20 @@ class CodeGenerator: public AstVisitor {
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
- // Load a named property, leaving it in r0. The receiver is passed on the
+ // Support for compiling assignment expressions.
+ void EmitSlotAssignment(Assignment* node);
+ void EmitNamedPropertyAssignment(Assignment* node);
+ void EmitKeyedPropertyAssignment(Assignment* node);
+
+ // Load a named property, returning it in r0. The receiver is passed on the
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
+ // Store to a named property. If the store is contextual, value is passed on
+ // the frame and consumed. Otherwise, receiver and value are passed on the
+ // frame and consumed. The result is returned in r0.
+ void EmitNamedStore(Handle<String> name, bool is_contextual);
+
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad();
@@ -357,11 +367,6 @@ class CodeGenerator: public AstVisitor {
bool reversed,
OverwriteMode mode);
- void VirtualFrameSmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
void CallWithArguments(ZoneList<Expression*>* arguments,
CallFunctionFlags flags,
int position);
@@ -457,6 +462,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
@@ -667,34 +675,66 @@ class GenericBinaryOpStub : public CodeStub {
};
-class StringStubBase: public CodeStub {
+class StringHelper : public AllStatic {
public:
+ // Generates fast code for getting a char code out of a string
+ // object at the given index. May bail out for four reasons (in the
+ // listed order):
+ // * Receiver is not a string (receiver_not_string label).
+ // * Index is not a smi (index_not_smi label).
+ // * Index is out of range (index_out_of_range).
+ // * Some other reason (slow_case label). In this case it's
+ // guaranteed that the above conditions are not violated,
+ // e.g. it's safe to assume the receiver is a string and the
+ // index is a non-negative smi < length.
+ // When successful, object, index, and scratch are clobbered.
+ // Otherwise, scratch and result are clobbered.
+ static void GenerateFastCharCodeAt(MacroAssembler* masm,
+ Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_smi,
+ Label* index_out_of_range,
+ Label* slow_case);
+
+ // Generates code for creating a one-char string from the given char
+ // code. May do a runtime call, so any register can be clobbered
+ // and, if the given invoke flag specifies a call, an internal frame
+ // is required. In tail call mode the result must be r0 register.
+ static void GenerateCharFromCode(MacroAssembler* masm,
+ Register code,
+ Register scratch,
+ Register result,
+ InvokeFlag flag);
+
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
// overhead. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
- void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
// Probe the symbol table for a two character string. If the string is
@@ -704,27 +744,30 @@ class StringStubBase: public CodeStub {
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
// Generate string hash.
- void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
- void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
- void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -735,7 +778,7 @@ enum StringAddFlags {
};
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -752,7 +795,7 @@ class StringAddStub: public StringStubBase {
};
-class SubStringStub: public StringStubBase {
+class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@@ -861,6 +904,43 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register offset, Register scratch)
+ : object_(object), offset_(offset), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register offset_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
+ " (scratch reg %d)\n",
+ object_.code(), offset_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, offset and scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class OffsetBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ OffsetBits::encode(offset_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index d8149f092..d02ba764f 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -161,9 +161,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, r0.bit());
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index bed93640f..e9bdfe55f 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -728,7 +728,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT_NOT_NULL(object_slot);
// Load the object.
- Move(r2, object_slot);
+ Move(r1, object_slot);
// Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
@@ -736,12 +736,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- __ mov(r1, Operand(key_literal->handle()));
+ __ mov(r0, Operand(key_literal->handle()));
// Push both as arguments to ic.
- __ Push(r2, r1);
+ __ Push(r1, r0);
- // Do a keyed property load.
+ // Call keyed load IC. It has all arguments on the stack and the key in r0.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1005,6 +1005,8 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
+ // Call keyed load IC. It has all arguments on the stack and the key in r0.
+ __ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
@@ -1247,6 +1249,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForValue(prop->key(), kStack);
// Record source code position for IC call.
SetSourcePosition(prop->position());
+ // Call keyed load IC. It has all arguments on the stack and the key in
+ // r0.
+ __ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Load receiver object into r1.
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index eec1f2138..5b1915f63 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -682,12 +682,13 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
- __ ldm(ia, sp, r2.bit() | r3.bit());
- __ Push(r3, r2);
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ Push(r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
@@ -697,12 +698,13 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
- __ ldm(ia, sp, r2.bit() | r3.bit());
- __ Push(r3, r2);
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ Push(r1, r0);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
@@ -711,13 +713,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow, fast, check_pixel_array, check_number_dictionary;
- // Get the key and receiver object from the stack.
- __ ldm(ia, sp, r0.bit() | r1.bit());
+ // Get the object from the stack.
+ __ ldr(r1, MemOperand(sp, kPointerSize));
// Check that the object isn't a smi.
__ BranchOnSmi(r1, &slow);
@@ -790,6 +793,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
+ __ ldr(r0, MemOperand(sp, 0));
GenerateRuntimeGetProperty(masm);
}
@@ -797,31 +801,71 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
+ Label miss;
+ Label index_not_smi;
+ Label index_out_of_range;
+ Label slow_char_code;
+ Label got_char_code;
- Label miss, index_ok;
-
- // Get the key and receiver object from the stack.
- __ ldm(ia, sp, r0.bit() | r1.bit());
-
- // Check that the receiver isn't a smi.
- __ BranchOnSmi(r1, &miss);
+ // Get the object from the stack.
+ __ ldr(r1, MemOperand(sp, kPointerSize));
- // Check that the receiver is a string.
- Condition is_string = masm->IsObjectStringType(r1, r2);
- __ b(NegateCondition(is_string), &miss);
+ Register object = r1;
+ Register index = r0;
+ Register code = r2;
+ Register scratch = r3;
- // Check if key is a smi or a heap number.
- __ BranchOnSmi(r0, &index_ok);
- __ CheckMap(r0, r2, Factory::heap_number_map(), &miss, false);
+ StringHelper::GenerateFastCharCodeAt(masm,
+ object,
+ index,
+ scratch,
+ code,
+ &miss, // When not a string.
+ &index_not_smi,
+ &index_out_of_range,
+ &slow_char_code);
+
+ // If we didn't bail out, code register contains smi tagged char
+ // code.
+ __ bind(&got_char_code);
+ StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from char from code tail call");
+#endif
+
+ // Check if key is a heap number.
+ __ bind(&index_not_smi);
+ __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
+
+ // Push receiver and key on the stack (now that we know they are a
+ // string and a number), and call runtime.
+ __ bind(&slow_char_code);
+ __ EnterInternalFrame();
+ __ Push(object, index);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ ASSERT(!code.is(r0));
+ __ mov(code, r0);
+ __ LeaveInternalFrame();
- __ bind(&index_ok);
- // Duplicate receiver and key since they are expected on the stack after
- // the KeyedLoadIC call.
- __ Push(r1, r0);
- __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS);
+ // Check if the runtime call returned NaN char code. If yes, return
+ // undefined. Otherwise, we can continue.
+ if (FLAG_debug_code) {
+ __ BranchOnSmi(code, &got_char_code);
+ __ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, ip);
+ __ Assert(eq, "StringCharCodeAt must return smi or heap number");
+ }
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ cmp(code, scratch);
+ __ b(ne, &got_char_code);
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
__ bind(&miss);
GenerateGeneric(masm);
@@ -868,13 +912,14 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow, failed_allocation;
- // Get the key and receiver object from the stack.
- __ ldm(ia, sp, r0.bit() | r1.bit());
+ // Get the object from the stack.
+ __ ldr(r1, MemOperand(sp, kPointerSize));
// r0: key
// r1: receiver object
@@ -1104,6 +1149,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
+ __ ldr(r0, MemOperand(sp, 0));
GenerateRuntimeGetProperty(masm);
}
@@ -1111,13 +1157,14 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow;
- // Get the key and receiver object from the stack.
- __ ldm(ia, sp, r0.bit() | r1.bit());
+ // Get the object from the stack.
+ __ ldr(r1, MemOperand(sp, kPointerSize));
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index ccabe2c67..d97f04b71 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -232,30 +232,23 @@ void MacroAssembler::LoadRoot(Register destination,
}
-// Will clobber 4 registers: object, offset, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
- Register scratch) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register offset,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
const int kRSetWordShift = 3;
- Label fast, done;
-
- // First, test that the object is not in the new space. We cannot set
- // remembered set bits in the new space.
- // object: heap object pointer (with tag)
- // offset: offset to store location from the object
- and_(scratch, object, Operand(ExternalReference::new_space_mask()));
- cmp(scratch, Operand(ExternalReference::new_space_start()));
- b(eq, &done);
+ Label fast;
// Compute the bit offset in the remembered set.
// object: heap object pointer (with tag)
@@ -307,6 +300,38 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset));
str(scratch, MemOperand(object));
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ ASSERT(cc == eq || cc == ne);
+ and_(scratch, object, Operand(ExternalReference::new_space_mask()));
+ cmp(scratch, Operand(ExternalReference::new_space_start()));
+ b(cc, branch);
+}
+
+
+// Will clobber 4 registers: object, offset, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register offset,
+ Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // remembered set bits in the new space.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, offset, scratch);
bind(&done);
@@ -399,6 +424,20 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
}
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ mov(scratch1, Operand(length, LSL, kSmiTagSize));
+ LoadRoot(scratch2, map_index);
+ str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ mov(scratch1, Operand(String::kEmptyHashField));
+ str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+ str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
int MacroAssembler::ActivationFrameAlignment() {
#if defined(V8_HOST_ARCH_ARM)
// Running on the real platform. Use the alignment as mandated by the local
@@ -722,6 +761,7 @@ void MacroAssembler::PopTryHandler() {
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
+ int save_at_depth,
Label* miss) {
// Make sure there's no overlap between scratch and the other
// registers.
@@ -729,7 +769,11 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
// Keep track of the current object in register reg.
Register reg = object_reg;
- int depth = 1;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ str(reg, MemOperand(sp));
+ }
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
@@ -769,6 +813,10 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
mov(reg, Operand(Handle<JSObject>(prototype)));
}
+ if (save_at_depth == depth) {
+ str(reg, MemOperand(sp));
+ }
+
// Go to the next object in the prototype chain.
object = prototype;
}
@@ -779,7 +827,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
b(ne, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth));
+ LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
@@ -1020,11 +1068,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- LoadRoot(scratch1, Heap::kStringMapRootIndex);
- str(length, FieldMemOperand(result, String::kLengthOffset));
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- mov(scratch2, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
}
@@ -1054,12 +1102,11 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT);
// Set the map, length and hash field.
- LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
- mov(scratch1, Operand(Factory::ascii_string_map()));
- str(length, FieldMemOperand(result, String::kLengthOffset));
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- mov(scratch2, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
}
@@ -1074,11 +1121,12 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
scratch2,
gc_required,
TAG_OBJECT);
- LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
- mov(scratch2, Operand(String::kEmptyHashField));
- str(length, FieldMemOperand(result, String::kLengthOffset));
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
}
@@ -1093,11 +1141,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
scratch2,
gc_required,
TAG_OBJECT);
- LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
- mov(scratch2, Operand(String::kEmptyHashField));
- str(length, FieldMemOperand(result, String::kLengthOffset));
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
- str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 062c5c678..2ec7a39ea 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -86,6 +86,20 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
+
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // eq for new space, ne otherwise
+ Label* branch);
+
+
+ // Set the remebered set bit for an offset into an
+ // object. RecordWriteHelper only works if the object is not in new
+ // space.
+ void RecordWriteHelper(Register object, Register offset, Register scracth);
+
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
@@ -243,9 +257,14 @@ class MacroAssembler: public Assembler {
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
+ // The function can optionally (when save_at_depth !=
+ // kInvalidProtoDepth) save the object at the given depth by moving
+ // it to [sp].
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
- Register scratch, Label* miss);
+ Register scratch,
+ int save_at_depth,
+ Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
@@ -553,6 +572,12 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index a770d160f..095631d64 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -229,7 +229,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
// Check if the object is a JSValue wrapper.
@@ -241,7 +240,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
}
@@ -597,6 +595,258 @@ static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
}
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+ Register scratch) {
+ __ mov(scratch, Operand(Smi::FromInt(0)));
+ __ push(scratch);
+ __ push(scratch);
+ __ push(scratch);
+ __ push(scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+ __ Drop(4);
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // Get the function and setup the context.
+ JSFunction* function = optimization.constant_function();
+ __ mov(r7, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
+
+ // Pass the additional arguments FastHandleApiCall expects.
+ bool info_loaded = false;
+ Object* callback = optimization.api_call_info()->callback();
+ if (Heap::InNewSpace(callback)) {
+ info_loaded = true;
+ __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
+ } else {
+ __ Move(r6, Handle<Object>(callback));
+ }
+ Object* call_data = optimization.api_call_info()->data();
+ if (Heap::InNewSpace(call_data)) {
+ if (!info_loaded) {
+ __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ }
+ __ ldr(r5, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
+ } else {
+ __ Move(r5, Handle<Object>(call_data));
+ }
+
+ __ add(sp, sp, Operand(1 * kPointerSize));
+ __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit());
+ __ sub(sp, sp, Operand(1 * kPointerSize));
+
+ // Set the number of arguments.
+ __ mov(r0, Operand(argc + 4));
+
+ // Jump to the fast api call builtin (tail call).
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::FastHandleApiCall));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name) {}
+
+ void Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ CallOptimization optimization(lookup);
+
+ if (optimization.is_constant_call()) {
+ CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
+ } else {
+ CompileRegular(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ name,
+ holder,
+ miss);
+ }
+ }
+
+ private:
+ void CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
+ ASSERT(optimization.is_constant_call());
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ int depth1 = kInvalidProtoDepth;
+ int depth2 = kInvalidProtoDepth;
+ bool can_do_fast_api_call = false;
+ if (optimization.is_simple_api_call() &&
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+ lookup->holder());
+ }
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+ (depth2 != kInvalidProtoDepth);
+ }
+
+ __ IncrementCounter(&Counters::call_const_interceptor, 1,
+ scratch1, scratch2);
+
+ if (can_do_fast_api_call) {
+ __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1,
+ scratch1, scratch2);
+ ReserveSpaceForFastApiCall(masm, scratch1);
+ }
+
+ Label miss_cleanup;
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
+ scratch2, name, depth1, miss);
+
+ Label regular_invoke;
+ LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
+ &regular_invoke);
+
+ // Generate code for the failed interceptor case.
+
+ // Check the lookup is still valid.
+ stub_compiler_->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+
+ if (can_do_fast_api_call) {
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ } else {
+ __ InvokeFunction(optimization.constant_function(), arguments_,
+ JUMP_FUNCTION);
+ }
+
+ if (can_do_fast_api_call) {
+ __ bind(&miss_cleanup);
+ FreeSpaceForFastApiCall(masm);
+ __ b(miss_label);
+ }
+
+ __ bind(&regular_invoke);
+ if (can_do_fast_api_call) {
+ FreeSpaceForFastApiCall(masm);
+ }
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+ scratch1, scratch2, name,
+ miss_label);
+
+ // Call a runtime function to load the interceptor property.
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ 5);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Register scratch,
+ Label* interceptor_succeeded) {
+ __ EnterInternalFrame();
+ __ Push(holder, name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // If interceptor returns no-result sentinel, call the constant function.
+ __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch);
+ __ b(ne, interceptor_succeeded);
+ }
+
+ StubCompiler* stub_compiler_;
+ const ParameterCount& arguments_;
+ Register name_;
+};
+
+
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
@@ -631,12 +881,10 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
String* name,
int save_at_depth,
Label* miss) {
- // TODO(602): support object saving.
- ASSERT(save_at_depth == kInvalidProtoDepth);
-
// Check that the maps haven't changed.
Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+ masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
+ save_at_depth, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
@@ -837,6 +1085,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// -- lr : return address
// -----------------------------------
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray()) {
+ return Heap::undefined_value();
+ }
+
// TODO(639): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
@@ -886,6 +1139,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// -- lr : return address
// -----------------------------------
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray()) {
+ return Heap::undefined_value();
+ }
+
// TODO(642): faster implementation.
ASSERT(check == RECEIVER_MAP_CHECK);
@@ -938,10 +1196,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
- return generator(this, object, holder, function, name, check);
+ Object* result = generator(this, object, holder, function, name, check);
+ // undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) {
+ return result;
+ }
}
- Label miss;
+ Label miss_in_smi_check;
// Get the receiver from the stack
const int argc = arguments().immediate();
@@ -950,21 +1212,39 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
__ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ b(eq, &miss_in_smi_check);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+ CallOptimization optimization(function);
+ int depth = kInvalidProtoDepth;
+ Label miss;
+
switch (check) {
case RECEIVER_MAP_CHECK:
+ __ IncrementCounter(&Counters::call_const, 1, r0, r3);
+
+ if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+ depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ }
+
+ if (depth != kInvalidProtoDepth) {
+ __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3);
+ ReserveSpaceForFastApiCall(masm(), r0);
+ }
+
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name,
+ depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
+ ASSERT(depth == kInvalidProtoDepth);
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
@@ -1037,10 +1317,19 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ if (depth != kInvalidProtoDepth) {
+ GenerateFastApiCall(masm(), optimization, argc);
+ } else {
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ }
// Handle call cache miss.
__ bind(&miss);
+ if (depth != kInvalidProtoDepth) {
+ FreeSpaceForFastApiCall(masm());
+ }
+
+ __ bind(&miss_in_smi_check);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1060,14 +1349,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// -- r2 : name
// -- lr : return address
// -----------------------------------
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- Label miss;
- const Register receiver = r0;
- const Register holder_reg = r1;
- const Register name_reg = r2;
- const Register scratch = r3;
+ Label miss;
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -1075,80 +1358,24 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
- // Get the receiver from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, holder_reg,
- scratch, name, &miss);
- if (!reg.is(holder_reg)) {
- __ mov(holder_reg, reg);
- }
-
- // If we call a constant function when the interceptor returns
- // the no-result sentinel, generate code that optimizes this case.
- if (lookup.IsProperty() &&
- lookup.IsCacheable() &&
- lookup.type() == CONSTANT_FUNCTION &&
- lookup.GetConstantFunction()->is_compiled() &&
- !holder->IsJSArray()) {
- // Constant functions cannot sit on global object.
- ASSERT(!lookup.holder()->IsGlobalObject());
-
- // Call the interceptor.
- __ EnterInternalFrame();
- __ Push(holder_reg, name_reg);
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- holder);
- __ pop(name_reg);
- __ pop(holder_reg);
- __ LeaveInternalFrame();
- // r0 no longer contains the receiver.
-
- // If interceptor returns no-result sentinal, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- Label invoke;
- __ b(ne, &invoke);
- // Check the prototypes between the interceptor's holder and the
- // constant function's holder.
- CheckPrototypes(holder, holder_reg,
- lookup.holder(), r0,
- scratch,
- name,
- &miss);
-
- __ InvokeFunction(lookup.GetConstantFunction(),
- arguments(),
- JUMP_FUNCTION);
-
- __ bind(&invoke);
-
- } else {
- // Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
- __ push(name_reg);
-
- PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
- 5);
+ // Get the receiver from the stack.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- __ pop(name_reg);
- __ LeaveInternalFrame();
- }
+ CallInterceptorCompiler compiler(this, arguments(), r2);
+ compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ r1,
+ r3,
+ r4,
+ &miss);
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
// Restore receiver.
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -1597,18 +1824,18 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
int index) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
- __ ldr(r2, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss);
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
+ GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1622,19 +1849,19 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
- __ ldr(r2, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
+ bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1651,19 +1878,18 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
Object* value) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
- // Check the key is the cached one
- __ ldr(r2, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss);
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
+ GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1677,27 +1903,26 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
String* name) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
- // Check the key is the cached one
- __ ldr(r2, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
GenerateLoadInterceptor(receiver,
holder,
&lookup,
+ r1,
r0,
r2,
r3,
- r1,
name,
&miss);
__ bind(&miss);
@@ -1710,19 +1935,18 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
- // Check the key is the cached one
- __ ldr(r2, MemOperand(sp, 0));
- __ ldr(r0, MemOperand(sp, kPointerSize));
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadArrayLength(masm(), r0, r3, &miss);
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
+ GenerateLoadArrayLength(masm(), r1, r2, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1733,19 +1957,19 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
- __ ldr(r2, MemOperand(sp));
- __ ldr(r0, MemOperand(sp, kPointerSize)); // receiver
-
- __ cmp(r2, Operand(Handle<String>(name)));
+ // Check the key is the cached one.
+ __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadStringLength(masm(), r0, r1, r3, &miss);
+ __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
+ GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
@@ -1759,6 +1983,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
// -- lr : return address
+ // -- r0 : key
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index ed26c41d1..bf5cff299 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -298,19 +298,38 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
}
-void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ SpillAllButCopyTOSToR0();
+ __ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
+void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ PopToR0();
+ if (is_contextual) {
+ SpillAll();
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ } else {
+ EmitPop(r1);
+ SpillAll();
+ }
+ __ mov(r2, Operand(name));
+ CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+}
+
+
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ SpillAllButCopyTOSToR0();
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallKeyedStoreIC() {
+ ASSERT(SpilledScope::is_spilled());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
@@ -477,6 +496,38 @@ Register VirtualFrame::Peek() {
}
+void VirtualFrame::Dup() {
+ AssertIsNotSpilled();
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ __ ldr(r0, MemOperand(sp, 0));
+ top_of_stack_state_ = R0_TOS;
+ break;
+ case R0_TOS:
+ __ mov(r1, r0);
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ case R1_TOS:
+ __ mov(r0, r1);
+ top_of_stack_state_ = R0_R1_TOS;
+ break;
+ case R0_R1_TOS:
+ __ push(r1);
+ __ mov(r1, r0);
+ // No need to change state as r0 and r1 now contains the same value.
+ break;
+ case R1_R0_TOS:
+ __ push(r0);
+ __ mov(r0, r1);
+ // No need to change state as r0 and r1 now contains the same value.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ element_count_++;
+}
+
+
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
@@ -541,6 +592,19 @@ Register VirtualFrame::GetTOSRegister() {
}
+void VirtualFrame::EmitPush(Operand operand) {
+ element_count_++;
+ if (SpilledScope::is_spilled()) {
+ __ mov(r0, operand);
+ __ push(r0);
+ return;
+ }
+ EnsureOneFreeTOSRegister();
+ top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+ __ mov(kTopRegister[top_of_stack_state_], operand);
+}
+
+
void VirtualFrame::EmitPush(MemOperand operand) {
element_count_++;
if (SpilledScope::is_spilled()) {
@@ -554,6 +618,19 @@ void VirtualFrame::EmitPush(MemOperand operand) {
}
+void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
+ element_count_++;
+ if (SpilledScope::is_spilled()) {
+ __ LoadRoot(r0, index);
+ __ push(r0);
+ return;
+ }
+ EnsureOneFreeTOSRegister();
+ top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+ __ LoadRoot(kTopRegister[top_of_stack_state_], index);
+}
+
+
void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
ASSERT(SpilledScope::is_spilled());
Adjust(count);
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 7b56bc244..77bc70ec3 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -308,9 +308,13 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
- // Call load IC. Receiver is on the stack and the property name is in r2.
+ // Call load IC. Receiver is on the stack. Result is returned in r0.
+ void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
+
+ // Call store IC. If the load is contextual, value is found on top of the
+ // frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
- void CallLoadIC(RelocInfo::Mode mode);
+ void CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed load IC. Key and receiver are on the stack. Result is returned
// in r0.
@@ -348,6 +352,9 @@ class VirtualFrame : public ZoneObject {
// must be copied to a scratch register before modification.
Register Peek();
+ // Duplicate the top of stack.
+ void Dup();
+
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
@@ -372,7 +379,9 @@ class VirtualFrame : public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
+ void EmitPush(Operand operand);
void EmitPush(MemOperand operand);
+ void EmitPushRoot(Heap::RootListIndex index);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 00010de91..216c03b63 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -684,8 +684,7 @@ function ArraySort(comparefn) {
var pivot = a[pivot_index];
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
- a[pivot_index] = a[from];
- a[from] = pivot;
+ %_SwapElements(a, from, pivot_index);
var low_end = from; // Upper bound of the elements lower than pivot.
var high_start = to; // Lower bound of the elements greater than pivot.
// From low_end to i are elements equal to pivot.
@@ -694,14 +693,12 @@ function ArraySort(comparefn) {
var element = a[i];
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
+ %_SwapElements(a, i, low_end);
i++;
low_end++;
} else if (order > 0) {
high_start--;
- a[i] = a[high_start];
- a[high_start] = element;
+ %_SwapElements(a, i, high_start);
} else { // order == 0
i++;
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ac9663d21..657d0dc3d 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -248,9 +248,13 @@ class Genesis BASE_EMBEDDED {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
+ enum PrototypePropertyMode {
+ DONT_ADD_PROTOTYPE,
+ ADD_READONLY_PROTOTYPE,
+ ADD_WRITEABLE_PROTOTYPE
+ };
Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
- bool make_prototype_read_only,
- bool make_prototype_enumerable = false);
+ PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
static bool CompileBuiltin(int index);
@@ -330,7 +334,8 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
bool is_ecma_native) {
Handle<String> symbol = Factory::LookupAsciiSymbol(name);
Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
- Handle<JSFunction> function =
+ Handle<JSFunction> function = prototype.is_null() ?
+ Factory::NewFunctionWithoutPrototype(symbol, call_code) :
Factory::NewFunctionWithPrototype(symbol,
type,
instance_size,
@@ -346,23 +351,23 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
- bool make_prototype_read_only,
- bool make_prototype_enumerable) {
+ PrototypePropertyMode prototypeMode) {
Handle<DescriptorArray> result = Factory::empty_descriptor_array();
- // Add prototype.
- PropertyAttributes attributes = static_cast<PropertyAttributes>(
- (make_prototype_enumerable ? 0 : DONT_ENUM)
- | DONT_DELETE
- | (make_prototype_read_only ? READ_ONLY : 0));
- result =
- Factory::CopyAppendProxyDescriptor(
- result,
- Factory::prototype_symbol(),
- Factory::NewProxy(&Accessors::FunctionPrototype),
- attributes);
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(
+ DONT_ENUM |
+ DONT_DELETE |
+ (prototypeMode == ADD_READONLY_PROTOTYPE ? READ_ONLY : 0));
+ result =
+ Factory::CopyAppendProxyDescriptor(
+ result,
+ Factory::prototype_symbol(),
+ Factory::NewProxy(&Accessors::FunctionPrototype),
+ attributes);
+ }
- attributes =
+ PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Add length.
result =
@@ -407,14 +412,29 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
// Please note that the prototype property for function instances must be
// writable.
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false, false);
+ ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
fm->set_instance_descriptors(*function_map_descriptors);
+ fm->set_function_with_prototype(true);
+
+ // Functions with this map will not have a 'prototype' property, and
+ // can not be used as constructors.
+ Handle<Map> function_without_prototype_map =
+ Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ global_context()->set_function_without_prototype_map(
+ *function_without_prototype_map);
+ Handle<DescriptorArray> function_without_prototype_map_descriptors =
+ ComputeFunctionInstanceDescriptor(DONT_ADD_PROTOTYPE);
+ function_without_prototype_map->set_instance_descriptors(
+ *function_without_prototype_map_descriptors);
+ function_without_prototype_map->set_function_with_prototype(false);
// Allocate the function map first and then patch the prototype later
fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
global_context()->set_function_map(*fm);
- function_map_descriptors = ComputeFunctionInstanceDescriptor(true);
+ function_map_descriptors =
+ ComputeFunctionInstanceDescriptor(ADD_READONLY_PROTOTYPE);
fm->set_instance_descriptors(*function_map_descriptors);
+ fm->set_function_with_prototype(true);
Handle<String> object_name = Handle<String>(Heap::Object_symbol());
@@ -442,7 +462,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
// 262 15.3.4.
Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
Handle<JSFunction> empty_function =
- Factory::NewFunction(symbol, Factory::null_value());
+ Factory::NewFunctionWithoutPrototype(symbol);
// --- E m p t y ---
Handle<Code> code =
@@ -457,10 +477,14 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
empty_function->shared()->DontAdaptArguments();
global_context()->function_map()->set_prototype(*empty_function);
global_context()->function_instance_map()->set_prototype(*empty_function);
+ global_context()->function_without_prototype_map()->
+ set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
- Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
- empty_fm->set_instance_descriptors(*function_map_descriptors);
+ Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(
+ function_without_prototype_map);
+ empty_fm->set_instance_descriptors(
+ *function_without_prototype_map_descriptors);
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
return empty_function;
@@ -1215,12 +1239,12 @@ bool Genesis::InstallNatives() {
// Install the call and the apply functions.
Handle<JSFunction> call =
InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Factory::NewJSObject(Top::object_function(), TENURED),
+ Handle<JSObject>::null(),
Builtins::FunctionCall,
false);
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- Factory::NewJSObject(Top::object_function(), TENURED),
+ Handle<JSObject>::null(),
Builtins::FunctionApply,
false);
@@ -1311,14 +1335,12 @@ bool Genesis::InstallNatives() {
static FixedArray* CreateCache(int size, JSFunction* factory) {
// Caches are supposed to live for a long time, allocate in old space.
int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
- Handle<FixedArray> cache =
- Factory::NewFixedArrayWithHoles(array_size, TENURED);
+ // Cannot use cast as object is not fully initialized yet.
+ JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
+ *Factory::NewFixedArrayWithHoles(array_size, TENURED));
cache->set(JSFunctionResultCache::kFactoryIndex, factory);
- cache->set(JSFunctionResultCache::kFingerIndex,
- Smi::FromInt(JSFunctionResultCache::kEntriesIndex));
- cache->set(JSFunctionResultCache::kCacheSizeIndex,
- Smi::FromInt(JSFunctionResultCache::kEntriesIndex));
- return *cache;
+ cache->MakeZeroSize();
+ return cache;
}
@@ -1655,9 +1677,10 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
HandleScope scope;
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false);
+ ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
+ fm->set_function_with_prototype(true);
Top::context()->global_context()->set_function_map(*fm);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index ccb6c0c53..dd2e3cbfc 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -164,8 +164,7 @@ enum BuiltinExtraArguments {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1) \
- V(STRING_CHAR_AT, 1)
+ V(APPLY_OVERFLOW, 1)
class ObjectVisitor;
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 5bbf050cc..f89399a97 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -31,7 +31,6 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "liveedit.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
@@ -204,7 +203,6 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
// all the pieces into a Code object. This function is only to be called by
// the compiler.cc code.
Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
- LiveEditFunctionTracker live_edit_tracker(info->function());
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
@@ -216,7 +214,6 @@ Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
MacroAssembler masm(NULL, kInitialBufferSize);
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
- live_edit_tracker.RecordFunctionScope(info->function()->scope());
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
@@ -225,9 +222,7 @@ Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> result = MakeCodeEpilogue(cgen.masm(), flags, info);
- live_edit_tracker.RecordFunctionCode(result);
- return result;
+ return MakeCodeEpilogue(cgen.masm(), flags, info);
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index a42eb4a8b..a5bb31f14 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -126,6 +126,7 @@ namespace internal {
F(RegExpConstructResult, 3, 1) \
F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \
+ F(SwapElements, 3, 1) \
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index c342dc227..901f2186a 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -192,6 +192,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
FunctionLiteral* lit =
MakeAST(is_global, script, extension, pre_data, is_json);
+ LiveEditFunctionTracker live_edit_tracker(lit);
+
// Check for parse errors.
if (lit == NULL) {
ASSERT(Top::has_pending_exception());
@@ -253,6 +255,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
+ live_edit_tracker.RecordFunctionInfo(result, lit);
+
return result;
}
@@ -448,6 +452,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script,
AstVisitor* caller) {
+ LiveEditFunctionTracker live_edit_tracker(literal);
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
@@ -552,6 +557,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
+ live_edit_tracker.RecordFunctionInfo(result, literal);
return result;
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index ae9bd76d2..01bb21b5f 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -74,6 +74,7 @@ enum ContextLookupFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
+ V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
@@ -179,6 +180,7 @@ class Context: public FixedArray {
JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
FUNCTION_MAP_INDEX,
+ FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 2929191a2..66faae862 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -769,9 +769,11 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
char* decimal_rep;
bool used_gay_dtoa = false;
- char fast_dtoa_buffer[kFastDtoaMaximalLength + 1];
+ const int kFastDtoaBufferCapacity = kFastDtoaMaximalLength + 1;
+ char fast_dtoa_buffer[kFastDtoaBufferCapacity];
int length;
- if (FastDtoa(v, fast_dtoa_buffer, &sign, &length, &decimal_point)) {
+ if (FastDtoa(v, Vector<char>(fast_dtoa_buffer, kFastDtoaBufferCapacity),
+ &sign, &length, &decimal_point)) {
decimal_rep = fast_dtoa_buffer;
} else {
decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 531064f99..a69320a2c 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -447,9 +447,10 @@ void Shell::Initialize() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
i::Debug::Load();
- i::JSObject* debug = i::Debug::debug_context()->global();
+ i::Handle<i::JSObject> debug
+ = i::Handle<i::JSObject>(i::Debug::debug_context()->global());
utility_context_->Global()->Set(String::New("$debug"),
- Utils::ToLocal(&debug));
+ Utils::ToLocal(debug));
#endif
// Run the d8 shell utility script in the utility context
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 216d5df99..b9e19d68e 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -620,7 +620,7 @@ function DatePrintString(time) {
// -------------------------------------------------------------------
// Reused output buffer. Used when parsing date strings.
-var parse_buffer = $Array(7);
+var parse_buffer = $Array(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
@@ -628,13 +628,13 @@ function DateParse(string) {
if (IS_NULL(arr)) return $NaN;
var day = MakeDay(arr[0], arr[1], arr[2]);
- var time = MakeTime(arr[3], arr[4], arr[5], 0);
+ var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
var date = MakeDate(day, time);
- if (IS_NULL(arr[6])) {
+ if (IS_NULL(arr[7])) {
return TimeClip(UTC(date));
} else {
- return TimeClip(date - arr[6] * 1000);
+ return TimeClip(date - arr[7] * 1000);
}
}
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index d5921d568..be353a373 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -54,16 +54,25 @@ bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
} else {
// n + ":"
if (!time.Add(n)) return false;
+ in.Skip('.');
}
+ } else if (in.Skip('.') && time.IsExpecting(n)) {
+ time.Add(n);
+ if (!in.IsAsciiDigit()) return false;
+ int n = in.ReadUnsignedNumber();
+ time.AddFinal(n);
} else if (tz.IsExpecting(n)) {
tz.SetAbsoluteMinute(n);
} else if (time.IsExpecting(n)) {
time.AddFinal(n);
- // Require end or white space immediately after finalizing time.
- if (!in.IsEnd() && !in.SkipWhiteSpace()) return false;
+ // Require end, white space or Z immediately after finalizing time.
+ if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z')) return false;
} else {
if (!day.Add(n)) return false;
in.Skip('-'); // Ignore suffix '-' for year, month, or day.
+ // Skip trailing 'T' for ECMAScript 5 date string format but make
+ // sure that it is followed by a digit (for the time).
+ if (in.Skip('T') && !in.IsAsciiDigit()) return false;
}
} else if (in.IsAsciiAlphaOrAbove()) {
// Parse a "word" (sequence of chars. >= 'A').
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 51a63e1a0..e68532f68 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -33,6 +33,16 @@ namespace v8 {
namespace internal {
bool DateParser::DayComposer::Write(FixedArray* output) {
+ // Set year to 0 by default.
+ if (index_ < 1) {
+ comp_[index_++] = 1;
+ }
+
+ // Day and month defaults to 1.
+ while (index_ < kSize) {
+ comp_[index_++] = 1;
+ }
+
int year = 0; // Default year is 0 (=> 2000) for KJS compatibility.
int month = kNone;
int day = kNone;
@@ -88,6 +98,7 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
int& hour = comp_[0];
int& minute = comp_[1];
int& second = comp_[2];
+ int& millisecond = comp_[3];
if (hour_offset_ != kNone) {
if (!IsHour12(hour)) return false;
@@ -95,11 +106,13 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
hour += hour_offset_;
}
- if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
+ if (!IsHour(hour) || !IsMinute(minute) ||
+ !IsSecond(second) || !IsMillisecond(millisecond)) return false;
output->set(HOUR, Smi::FromInt(hour));
output->set(MINUTE, Smi::FromInt(minute));
output->set(SECOND, Smi::FromInt(second));
+ output->set(MILLISECOND, Smi::FromInt(millisecond));
return true;
}
@@ -134,6 +147,7 @@ const int8_t DateParser::KeywordTable::
{'p', 'm', '\0', DateParser::AM_PM, 12},
{'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
{'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
+ {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
{'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
{'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
{'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index d339a4fb7..d999d9ca7 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -44,13 +44,14 @@ class DateParser : public AllStatic {
// [3]: hour
// [4]: minute
// [5]: second
- // [6]: UTC offset in seconds, or null value if no timezone specified
+ // [6]: millisecond
+ // [7]: UTC offset in seconds, or null value if no timezone specified
// If parsing fails, return false (content of output array is not defined).
template <typename Char>
static bool Parse(Vector<Char> str, FixedArray* output);
enum {
- YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, UTC_OFFSET, OUTPUT_SIZE
+ YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
};
private:
@@ -189,7 +190,9 @@ class DateParser : public AllStatic {
TimeComposer() : index_(0), hour_offset_(kNone) {}
bool IsEmpty() const { return index_ == 0; }
bool IsExpecting(int n) const {
- return (index_ == 1 && IsMinute(n)) || (index_ == 2 && IsSecond(n));
+ return (index_ == 1 && IsMinute(n)) ||
+ (index_ == 2 && IsSecond(n)) ||
+ (index_ == 3 && IsMillisecond(n));
}
bool Add(int n) {
return index_ < kSize ? (comp_[index_++] = n, true) : false;
@@ -207,8 +210,9 @@ class DateParser : public AllStatic {
static bool IsHour(int x) { return Between(x, 0, 23); }
static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsSecond(int x) { return Between(x, 0, 59); }
+ static bool IsMillisecond(int x) { return Between(x, 0, 999); }
- static const int kSize = 3;
+ static const int kSize = 4;
int comp_[kSize];
int index_;
int hour_offset_;
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index a0f3bdc5a..e94cee41d 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -124,12 +124,6 @@ BreakPoint.prototype.source_position = function() {
};
-BreakPoint.prototype.updateSourcePosition = function(new_position, script) {
- this.source_position_ = new_position;
- // TODO(635): also update line and column.
-};
-
-
BreakPoint.prototype.hit_count = function() {
return this.hit_count_;
};
@@ -245,6 +239,21 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
}
+//Creates a clone of script breakpoint that is linked to another script.
+ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
+ var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+ other_script.id, this.line_, this.column_, this.groupId_);
+ copy.number_ = next_break_point_number++;
+ script_break_points.push(copy);
+
+ copy.hit_count_ = this.hit_count_;
+ copy.active_ = this.active_;
+ copy.condition_ = this.condition_;
+ copy.ignoreCount_ = this.ignoreCount_;
+ return copy;
+}
+
+
ScriptBreakPoint.prototype.number = function() {
return this.number_;
};
@@ -280,6 +289,13 @@ ScriptBreakPoint.prototype.column = function() {
};
+ScriptBreakPoint.prototype.update_positions = function(line, column) {
+ this.line_ = line;
+ this.column_ = column;
+}
+
+
+
ScriptBreakPoint.prototype.hit_count = function() {
return this.hit_count_;
};
@@ -406,6 +422,17 @@ function UpdateScriptBreakPoints(script) {
}
+function GetScriptBreakPoints(script) {
+ var result = [];
+ for (var i = 0; i < script_break_points.length; i++) {
+ if (script_break_points[i].matchesScript(script)) {
+ result.push(script_break_points[i]);
+ }
+ }
+ return result;
+}
+
+
Debug.setListener = function(listener, opt_data) {
if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
throw new Error('Parameters have wrong types.');
@@ -1991,32 +2018,16 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
return;
}
- // A function that calls a proper signature of LiveEdit API.
- var invocation;
-
var change_log = new Array();
- if (IS_STRING(request.arguments.new_source)) {
- var new_source = request.arguments.new_source;
- invocation = function() {
- return Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
- }
- } else {
- var change_pos = parseInt(request.arguments.change_pos);
- var change_len = parseInt(request.arguments.change_len);
- var new_string = request.arguments.new_string;
- if (!IS_STRING(new_string)) {
- response.failed('Argument "new_string" is not a string value');
- return;
- }
- invocation = function() {
- return Debug.LiveEdit.ApplyPatch(the_script, change_pos, change_len,
- new_string, change_log);
- }
+ if (!IS_STRING(request.arguments.new_source)) {
+ throw "new_source argument expected";
}
+ var new_source = request.arguments.new_source;
+
try {
- invocation();
+ Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure) {
// Let's treat it as a "success" so that body with change_log will be
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 20f82612d..35d3c54e6 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -513,6 +513,16 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
}
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code) {
+ Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
+ function->set_code(*code);
+ ASSERT(!function->has_initial_map());
+ ASSERT(!function->has_prototype());
+ return function;
+}
+
+
Handle<Code> Factory::NewCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
Code::Flags flags,
@@ -705,6 +715,24 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
}
+Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
+ Handle<String> name) {
+ Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
+ CALL_HEAP_FUNCTION(Heap::AllocateFunction(
+ *Top::function_without_prototype_map(),
+ *function_share,
+ *the_hole_value()),
+ JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name) {
+ Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name);
+ fun->set_context(Top::context()->global_context());
+ return fun;
+}
+
+
Handle<Object> Factory::ToObject(Handle<Object> object) {
CALL_HEAP_FUNCTION(object->ToObject(), Object);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 0f2ae86b5..8a190fa09 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -218,6 +218,8 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
+ static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name);
+
static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
@@ -291,6 +293,9 @@ class Factory : public AllStatic {
Handle<Code> code,
bool force_initial_map);
+ static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
+ Handle<Code> code);
+
static Handle<DescriptorArray> CopyAppendProxyDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
@@ -376,6 +381,9 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
Handle<Object> prototype);
+ static Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
+ Handle<String> name);
+
static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
Handle<Object> descriptors);
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index 029954416..4c0d15d68 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -61,7 +61,7 @@ static const int maximal_target_exponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
-bool RoundWeed(char* buffer,
+bool RoundWeed(Vector<char> buffer,
int length,
uint64_t distance_too_high_w,
uint64_t unsafe_interval,
@@ -324,7 +324,7 @@ static void BiggestPowerTen(uint32_t number,
bool DigitGen(DiyFp low,
DiyFp w,
DiyFp high,
- char* buffer,
+ Vector<char> buffer,
int* length,
int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
@@ -437,7 +437,7 @@ bool DigitGen(DiyFp low,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
-bool grisu3(double v, char* buffer, int* length, int* decimal_exponent) {
+bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
@@ -488,7 +488,11 @@ bool grisu3(double v, char* buffer, int* length, int* decimal_exponent) {
}
-bool FastDtoa(double v, char* buffer, int* sign, int* length, int* point) {
+bool FastDtoa(double v,
+ Vector<char> buffer,
+ int* sign,
+ int* length,
+ int* point) {
ASSERT(v != 0);
ASSERT(!Double(v).IsSpecial());
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index 91f6ce76b..9f1f76aeb 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -48,7 +48,11 @@ static const int kFastDtoaMaximalLength = 17;
// one closest to v.
// The variable 'sign' will be '0' if the given number is positive, and '1'
// otherwise.
-bool FastDtoa(double d, char* buffer, int* sign, int* length, int* point);
+bool FastDtoa(double d,
+ Vector<char> buffer,
+ int* sign,
+ int* length,
+ int* point);
} } // namespace v8::internal
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 2d6deb324..699a1e97d 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -450,7 +450,6 @@ Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
- LiveEditFunctionTracker live_edit_tracker(info->function());
FullCodeGenerator cgen(&masm);
cgen.Generate(info, PRIMARY);
@@ -459,9 +458,7 @@ Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
return Handle<Code>::null();
}
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
- Handle<Code> result = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- live_edit_tracker.RecordFunctionCode(result);
- return result;
+ return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 84ee20bba..1d4465f3b 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -203,6 +203,7 @@ void FlattenString(Handle<String> string) {
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
+ ASSERT(function->should_have_prototype());
CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
*prototype,
NULL),
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 5baceee33..ea13deffa 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -42,7 +42,7 @@ namespace internal {
template<class T>
class Handle {
public:
- INLINE(Handle(T** location)) { location_ = location; }
+ INLINE(explicit Handle(T** location)) { location_ = location; }
INLINE(explicit Handle(T* obj));
INLINE(Handle()) : location_(NULL) {}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 0cd17917a..193f082f3 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -306,6 +306,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
TranscendentalCache::Clear();
+ ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
#ifdef DEBUG
@@ -541,6 +542,28 @@ void Heap::EnsureFromSpaceIsCommitted() {
}
+class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor {
+ virtual void VisitThread(ThreadLocalTop* top) {
+ Context* context = top->context_;
+ if (context == NULL) return;
+
+ FixedArray* caches =
+ context->global()->global_context()->jsfunction_result_caches();
+ int length = caches->length();
+ for (int i = 0; i < length; i++) {
+ JSFunctionResultCache::cast(caches->get(i))->Clear();
+ }
+ }
+};
+
+
+void Heap::ClearJSFunctionResultCaches() {
+ if (Bootstrapper::IsActive()) return;
+ ClearThreadJSFunctionResultCachesVisitor visitor;
+ ThreadManager::IterateThreads(&visitor);
+}
+
+
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
@@ -1761,41 +1784,6 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
}
-Object* Heap::SmiOrNumberFromDouble(double value,
- bool new_object,
- PretenureFlag pretenure) {
- // We need to distinguish the minus zero value and this cannot be
- // done after conversion to int. Doing this by comparing bit
- // patterns is faster than using fpclassify() et al.
- static const DoubleRepresentation plus_zero(0.0);
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation nan(OS::nan_value());
- ASSERT(minus_zero_value() != NULL);
- ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
-
- DoubleRepresentation rep(value);
- if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
- if (rep.bits == minus_zero.bits) {
- return new_object ? AllocateHeapNumber(-0.0, pretenure)
- : minus_zero_value();
- }
- if (rep.bits == nan.bits) {
- return new_object
- ? AllocateHeapNumber(OS::nan_value(), pretenure)
- : nan_value();
- }
-
- // Try to represent the value as a tagged small integer.
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return Smi::FromInt(int_value);
- }
-
- // Materialize the value in the heap.
- return AllocateHeapNumber(value, pretenure);
-}
-
-
Object* Heap::NumberToString(Object* number, bool check_number_string_cache) {
Counters::number_to_string_runtime.Increment();
if (check_number_string_cache) {
@@ -1853,17 +1841,24 @@ Heap::RootListIndex Heap::RootIndexForExternalArrayType(
}
-Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
- return SmiOrNumberFromDouble(value,
- true /* number object must be new */,
- pretenure);
-}
+Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
+ // We need to distinguish the minus zero value and this cannot be
+ // done after conversion to int. Doing this by comparing bit
+ // patterns is faster than using fpclassify() et al.
+ static const DoubleRepresentation minus_zero(-0.0);
+ DoubleRepresentation rep(value);
+ if (rep.bits == minus_zero.bits) {
+ return AllocateHeapNumber(-0.0, pretenure);
+ }
-Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
- return SmiOrNumberFromDouble(value,
- false /* use preallocated NaN, -0.0 */,
- pretenure);
+ int int_value = FastD2I(value);
+ if (value == int_value && Smi::IsValid(int_value)) {
+ return Smi::FromInt(int_value);
+ }
+
+ // Materialize the value in the heap.
+ return AllocateHeapNumber(value, pretenure);
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 6661b5db6..902fc77ee 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -527,13 +527,6 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateArgumentsObject(Object* callee, int length);
- // Converts a double into either a Smi or a HeapNumber object.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- static Object* NewNumberFromDouble(double value,
- PretenureFlag pretenure = NOT_TENURED);
-
// Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_)
static Object* NumberFromDouble(double value,
@@ -1131,12 +1124,6 @@ class Heap : public AllStatic {
GarbageCollector collector,
GCTracer* tracer);
- // Returns either a Smi or a Number object from 'value'. If 'new_object'
- // is false, it may return a preallocated immutable object.
- static Object* SmiOrNumberFromDouble(double value,
- bool new_object,
- PretenureFlag pretenure = NOT_TENURED);
-
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@@ -1191,6 +1178,8 @@ class Heap : public AllStatic {
HeapObject* target,
int size);
+ static void ClearJSFunctionResultCaches();
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record the copy of an object in the NewSpace's statistics.
static void RecordCopiedObject(HeapObject* obj);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index e9db5ffb3..83060c1c2 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1258,8 +1258,6 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
Result answer;
if (left_is_string) {
if (right_is_string) {
- // TODO(lrn): if both are constant strings
- // -- do a compile time cons, if allocation during codegen is allowed.
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
answer = frame_->CallStub(&stub, 2);
} else {
@@ -2043,8 +2041,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
answer = *operand;
@@ -2082,8 +2080,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
operand->Unuse();
@@ -2117,7 +2115,9 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
}
deferred->BindExit();
} else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
+ }
if (shift_value > 0) {
__ sar(operand->reg(), shift_value);
__ and_(operand->reg(), ~kSmiTagMask);
@@ -2149,8 +2149,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
__ SmiUntag(answer.reg());
@@ -2168,12 +2168,12 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
case Token::SHL:
if (reversed) {
+ // Move operand into ecx and also into a second register.
+ // If operand is already in a register, take advantage of that.
+ // This lets us modify ecx, but still bail out to deferred code.
Result right;
Result right_copy_in_ecx;
-
- // Make sure to get a copy of the right operand into ecx. This
- // allows us to modify it without having to restore it in the
- // deferred code.
+ TypeInfo right_type_info = operand->type_info();
operand->ToRegister();
if (operand->reg().is(ecx)) {
right = allocator()->Allocate();
@@ -2193,14 +2193,14 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
answer.reg(),
smi_value,
right.reg(),
- right.type_info(),
+ right_type_info,
overwrite_mode);
__ mov(answer.reg(), Immediate(int_value));
__ sar(ecx, kSmiTagSize);
- if (!right.type_info().IsSmi()) {
+ if (!right_type_info.IsSmi()) {
deferred->Branch(carry);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right.reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right.reg());
}
__ shl_cl(answer.reg());
__ cmp(answer.reg(), 0xc0000000);
@@ -2241,8 +2241,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
@@ -2285,8 +2285,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(operand->reg());
}
if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value));
@@ -2339,6 +2339,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
}
}
break;
+
// Generate inline code for mod of powers of 2 and negative powers of 2.
case Token::MOD:
if (!reversed &&
@@ -2693,7 +2694,7 @@ void CodeGenerator::Comparison(AstNode* node,
if (cc == equal) {
Label comparison_done;
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(1));
+ Immediate(Smi::FromInt(1)));
__ j(not_equal, &comparison_done);
uint8_t char_value =
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
@@ -2703,6 +2704,7 @@ void CodeGenerator::Comparison(AstNode* node,
} else {
__ mov(temp2.reg(),
FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ SmiUntag(temp2.reg());
__ sub(Operand(temp2.reg()), Immediate(1));
Label comparison;
// If the length is 0 then the subtraction gave -1 which compares less
@@ -2722,7 +2724,7 @@ void CodeGenerator::Comparison(AstNode* node,
// If the first character is the same then the long string sorts after
// the short one.
__ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(1));
+ Immediate(Smi::FromInt(1)));
__ bind(&characters_were_different);
}
temp2.Unuse();
@@ -5367,10 +5369,13 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
// Evaluate the right-hand side.
if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
frame()->Push(&result);
Load(node->value());
+ // Perform the binary operation.
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
@@ -5380,6 +5385,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ // For non-compound assignment just load the right-hand side.
Load(node->value());
}
@@ -5402,7 +5408,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
Property* prop = node->target()->AsProperty();
ASSERT(var == NULL || (prop == NULL && var->is_global()));
- // Initialize name and evaluate the receiver subexpression if necessary.
+ // Initialize name and evaluate the receiver sub-expression if necessary. If
+ // the receiver is trivial it is not placed on the stack at this point, but
+ // loaded whenever actually needed.
Handle<String> name;
bool is_trivial_receiver = false;
if (var != NULL) {
@@ -5416,10 +5424,13 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
if (!is_trivial_receiver) Load(prop->obj());
}
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
+ // Initialization block consists of assignments of the form expr.x = ..., so
+ // this will never be an assignment to a variable, so there must be a
+ // receiver object.
ASSERT_EQ(NULL, var);
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
if (is_trivial_receiver) {
frame()->Push(prop->obj());
} else {
@@ -5428,14 +5439,21 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
if (node->ends_initialization_block() && !is_trivial_receiver) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
frame()->Dup();
}
+ // Stack layout:
+ // [tos] : receiver (only materialized if non-trivial)
+ // [tos+1] : receiver if at the end of an initialization block
+
// Evaluate the right-hand side.
if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
if (is_trivial_receiver) {
frame()->Push(prop->obj());
} else if (var != NULL) {
@@ -5459,9 +5477,15 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ // For non-compound assignment just load the right-hand side.
Load(node->value());
}
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : receiver (only materialized if non-trivial)
+ // [tos+2] : receiver if at the end of an initialization block
+
// Perform the assignment. It is safe to ignore constants here.
ASSERT(var == NULL || var->mode() != Variable::CONST);
ASSERT_NE(Token::INIT_CONST, node->op());
@@ -5475,6 +5499,10 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
Result answer = EmitNamedStore(name, is_contextual);
frame()->Push(&answer);
+ // Stack layout:
+ // [tos] : result
+ // [tos+1] : receiver if at the end of an initialization block
+
if (node->ends_initialization_block()) {
ASSERT_EQ(NULL, var);
// The argument to the runtime call is the receiver.
@@ -5491,6 +5519,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
+ // Stack layout:
+ // [tos] : result
+
ASSERT_EQ(frame()->height(), original_height + 1);
}
@@ -5499,38 +5530,47 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame()->height();
#endif
- Comment cmnt(masm_, "[ Named Property Assignment");
+ Comment cmnt(masm_, "[ Keyed Property Assignment");
Property* prop = node->target()->AsProperty();
ASSERT_NOT_NULL(prop);
// Evaluate the receiver subexpression.
Load(prop->obj());
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
frame_->Dup();
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
frame_->Dup();
}
// Evaluate the key subexpression.
Load(prop->key());
+ // Stack layout:
+ // [tos] : key
+ // [tos+1] : receiver
+ // [tos+2] : receiver if at the end of an initialization block
+
// Evaluate the right-hand side.
if (node->is_compound()) {
- // Duplicate receiver and key.
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ // Duplicate receiver and key for loading the current property value.
frame()->PushElementAt(1);
frame()->PushElementAt(1);
Result value = EmitKeyedLoad();
frame()->Push(&value);
Load(node->value());
+ // Perform the binary operation.
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
@@ -5539,15 +5579,27 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ // For non-compound assignment just load the right-hand side.
Load(node->value());
}
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : key
+ // [tos+2] : receiver
+ // [tos+3] : receiver if at the end of an initialization block
+
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
Result answer = EmitKeyedStore(prop->key()->type());
frame()->Push(&answer);
+ // Stack layout:
+ // [tos] : result
+ // [tos+1] : receiver if at the end of an initialization block
+
+ // Change to fast case at the end of an initialization block.
if (node->ends_initialization_block()) {
// The argument to the runtime call is the extra copy of the receiver,
// which is below the value of the assignment. Swap the receiver and
@@ -5559,6 +5611,9 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
+ // Stack layout:
+ // [tos] : result
+
ASSERT(frame()->height() == original_height + 1);
}
@@ -5950,6 +6005,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
result.reg(),
&slow_case,
&slow_case,
+ &slow_case,
&slow_case);
__ jmp(&exit);
@@ -6607,6 +6663,121 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ // Note: this code assumes that indices are passed are within
+ // elements' bounds and refer to valid (not holes) values.
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result index2 = frame_->Pop();
+ index2.ToRegister();
+
+ Result index1 = frame_->Pop();
+ index1.ToRegister();
+
+ Result object = frame_->Pop();
+ object.ToRegister();
+
+ Result tmp1 = allocator()->Allocate();
+ tmp1.ToRegister();
+ Result tmp2 = allocator()->Allocate();
+ tmp2.ToRegister();
+
+ frame_->Spill(object.reg());
+ frame_->Spill(index1.reg());
+ frame_->Spill(index2.reg());
+
+ DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+ index1.reg(),
+ index2.reg());
+
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+ deferred->Branch(less);
+ __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
+ __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(not_zero);
+
+ // Check the object's elements are in fast case.
+ __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Smi-tagging is equivalent to multiplying by 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ // Check that both indices are smis.
+ __ mov(tmp2.reg(), index1.reg());
+ __ or_(tmp2.reg(), Operand(index2.reg()));
+ __ test(tmp2.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Bring addresses into index1 and index2.
+ __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+ index1.reg(),
+ times_half_pointer_size, // index1 is Smi
+ FixedArray::kHeaderSize));
+ __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+ index2.reg(),
+ times_half_pointer_size, // index2 is Smi
+ FixedArray::kHeaderSize));
+
+ // Swap elements.
+ __ mov(object.reg(), Operand(index1.reg(), 0));
+ __ mov(tmp2.reg(), Operand(index2.reg(), 0));
+ __ mov(Operand(index2.reg(), 0), object.reg());
+ __ mov(Operand(index1.reg(), 0), tmp2.reg());
+
+ Label done;
+ __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ mov(tmp2.reg(), tmp1.reg());
+ RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+ __ CallStub(&recordWrite2);
+
+ __ bind(&done);
+
+ deferred->BindExit();
+ frame_->Push(Factory::undefined_value());
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
@@ -6623,10 +6794,10 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
}
-// Generates the Math.pow method - only handles special cases and branches to
-// the runtime system if not.Please note - this function assumes that
-// the callsite has executed ToNumber on both arguments and that the
-// arguments are not the same identifier.
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Load(args->at(0));
@@ -6649,8 +6820,6 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
- // We can safely assume that the base and exponent is not in the same
- // register since we only call this from one callsite (math.js).
ASSERT(!exponent.reg().is(base.reg()));
JumpTarget call_runtime;
@@ -6699,7 +6868,6 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
Label while_true;
Label no_multiply;
- // Label allocate_and_return;
__ bind(&while_true);
__ shr(exponent.reg(), 1);
__ j(not_carry, &no_multiply);
@@ -8390,6 +8558,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
Result tmp = allocator_->Allocate();
ASSERT(tmp.is_valid());
+ Result tmp2 = allocator_->Allocate();
+ ASSERT(tmp2.is_valid());
// Determine whether the value is a constant before putting it in a
// register.
@@ -8406,12 +8576,9 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
receiver.reg(),
tmp.reg());
- // Check that the value is a smi if it is not a constant. We can skip
- // the write barrier for smis and constants.
- if (!value_is_constant) {
- __ test(result.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- }
+ // Check that the receiver is not a smi.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
// Check that the key is a smi.
if (!key.is_smi()) {
@@ -8421,10 +8588,6 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
}
- // Check that the receiver is not a smi.
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
-
// Check that the receiver is a JSArray.
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
deferred->Branch(not_equal);
@@ -8438,7 +8601,19 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Get the elements array from the receiver and check that it is not a
// dictionary.
__ mov(tmp.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ FieldOperand(receiver.reg(), JSArray::kElementsOffset));
+
+ // Check whether it is possible to omit the write barrier. If the elements
+ // array is in new space or the value written is a smi we can safely update
+ // the elements array without updating the remembered set.
+ Label in_new_space;
+ __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+ if (!value_is_constant) {
+ __ test(result.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+
+ __ bind(&in_new_space);
// Bind the deferred code patch site to be able to locate the fixed
// array map comparison. When debugging, we patch this comparison to
// always fail so that we will hit the IC call in the deferred code
@@ -8819,6 +8994,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string);
__ mov(edx, FieldOperand(eax, String::kLengthOffset));
+ ASSERT(kSmiTag == 0);
__ test(edx, Operand(edx));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -10775,15 +10951,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the length of the string to ebx.
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- // ebx: Length of subject string
+ // ebx: Length of subject string as a smi
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(eax);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
__ cmp(eax, Operand(ebx));
- __ j(above, &runtime);
+ __ j(above_equal, &runtime);
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
@@ -10906,6 +11083,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(edi, Operand(edi));
__ mov(edi, FieldOperand(eax, String::kLengthOffset));
__ j(zero, &setup_two_byte);
+ __ SmiUntag(edi);
__ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
@@ -10913,7 +11091,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // edi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
@@ -12051,7 +12230,8 @@ void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
Register scratch,
Register result,
Label* receiver_not_string,
- Label* index_not_positive_smi,
+ Label* index_not_smi,
+ Label* index_out_of_range,
Label* slow_case) {
Label not_a_flat_string;
Label try_again_with_new_string;
@@ -12070,25 +12250,20 @@ void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
__ test(result, Immediate(kIsNotStringMask));
__ j(not_zero, receiver_not_string);
- // If the index is negative or non-smi trigger the non-positive-smi
- // case.
+ // If the index is non-smi trigger the non-smi case.
ASSERT(kSmiTag == 0);
- __ test(index, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, index_not_positive_smi);
-
- // Put untagged index into scratch register.
- __ mov(scratch, index);
- __ SmiUntag(scratch);
+ __ test(index, Immediate(kSmiTagMask));
+ __ j(not_zero, index_not_smi);
// Check for index out of range.
- __ cmp(scratch, FieldOperand(object, String::kLengthOffset));
- __ j(greater_equal, slow_case);
+ __ cmp(index, FieldOperand(object, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range);
__ bind(&try_again_with_new_string);
// ----------- S t a t e -------------
// -- object : string to access
// -- result : instance type of the string
- // -- scratch : positive smi index < length
+ // -- scratch : non-negative index < length
// -----------------------------------
// We need special handling for non-flat strings.
@@ -12102,9 +12277,10 @@ void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
__ j(not_zero, &ascii_string);
// 2-byte string.
- // Load the 2-byte character code into the temp register.
+ // Load the 2-byte character code into the result register.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // index is smi (powered by 2).
__ movzx_w(result, FieldOperand(object,
- scratch, times_2,
+ index, times_1,
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
@@ -12130,7 +12306,11 @@ void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
// ASCII string.
__ bind(&ascii_string);
- // Load the byte into the temp register.
+ // Put untagged index into scratch register.
+ __ mov(scratch, index);
+ __ SmiUntag(scratch);
+
+ // Load the byte into the result register.
__ movzx_b(result, FieldOperand(object,
scratch, times_1,
SeqAsciiString::kHeaderSize));
@@ -12220,6 +12400,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx));
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in eax.
@@ -12227,6 +12408,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ ASSERT(kSmiTag == 0);
__ test(ebx, Operand(ebx));
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in edx.
@@ -12236,16 +12418,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are non-empty.
// eax: first string
- // ebx: length of first string
- // ecx: length of second string
+ // ebx: length of first string as a smi
+ // ecx: length of second string as a smi
// edx: second string
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
__ add(ebx, Operand(ecx));
+ ASSERT(Smi::kMaxValue == String::kMaxLength);
+ // Handle exceptionally long strings in the runtime system.
+ __ j(overflow, &string_add_runtime);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
- __ cmp(ebx, 2);
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
// Check that both strings are non-external ascii strings.
@@ -12265,17 +12450,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
- __ Set(ebx, Immediate(2));
+ __ Set(ebx, Immediate(Smi::FromInt(2)));
__ jmp(&make_flat_ascii_string);
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(ebx, String::kMinNonFlatLength);
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
__ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ cmp(ebx, String::kMaxLength);
- __ j(above, &string_add_runtime);
// If result is not supposed to be flat allocate a cons string object. If both
// strings are ascii the result is an ascii cons string.
@@ -12292,6 +12473,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
@@ -12308,7 +12490,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Handle creating a flat result. First check that both strings are not
// external strings.
// eax: first string
- // ebx: length of resulting flat string
+ // ebx: length of resulting flat string as a smi
// edx: second string
__ bind(&string_add_flat_result);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -12323,7 +12505,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings.
// eax: first string
- // ebx: length of resulting flat string
+ // ebx: length of resulting flat string as a smi
// edx: second string
Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -12338,7 +12520,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
- // ebx: length of resulting flat string
+ // ebx: length of resulting flat string as a smi
+ __ SmiUntag(ebx);
__ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
// eax: result string
__ mov(ecx, eax);
@@ -12347,6 +12530,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
__ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
@@ -12356,6 +12540,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Load second argument and locate first character.
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
__ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
@@ -12367,7 +12552,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Handle creating a flat two byte result.
// eax: first string - known to be two byte
- // ebx: length of resulting flat string
+ // ebx: length of resulting flat string as a smi
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -12376,6 +12561,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
+ __ SmiUntag(ebx);
__ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
// eax: result string
__ mov(ecx, eax);
@@ -12385,6 +12571,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
__ add(Operand(edx),
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// eax: result string
@@ -12395,6 +12582,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Load second argument and locate first character.
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
__ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
@@ -12579,7 +12767,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ j(equal, not_found);
// If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+ __ cmp(FieldOperand(candidate, String::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
__ j(not_equal, &next_probe[i]);
// As we are out of registers save the mask on the stack and use that
@@ -12848,6 +13037,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Change index to run from -min_length to -1 by adding min_length
// to string start. This means that loop ends when index reaches zero,
// which doesn't need an additional compare.
+ __ SmiUntag(min_length);
__ lea(left,
FieldOperand(left,
min_length, times_1,
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index b3db0c42d..0d3fee592 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -636,6 +636,11 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements. Takes three expressions, the object and two
+ // indices. This should only be used if the indices are known to be
+ // non-negative and within bounds of the elements array at the call site.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
@@ -886,14 +891,15 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
// Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for three reasons (in the
+ // object at the given index. May bail out for four reasons (in the
// listed order):
// * Receiver is not a string (receiver_not_string label).
- // * Index is not a positive smi (index_not_positive_smi label).
+ // * Index is not a smi (index_not_smi label).
+ // * Index is out of range (index_out_of_range).
// * Some other reason (slow_case label). In this case it's
// guaranteed that the above conditions are not violated,
// e.g. it's safe to assume the receiver is a string and the
- // index is a positive smi.
+ // index is a non-negative smi < length.
// When successful, object, index, and scratch are clobbered.
// Otherwise, scratch and result are clobbered.
static void GenerateFastCharCodeAt(MacroAssembler* masm,
@@ -902,7 +908,8 @@ class StringHelper : public AllStatic {
Register scratch,
Register result,
Label* receiver_not_string,
- Label* index_not_positive_smi,
+ Label* index_not_smi,
+ Label* index_out_of_range,
Label* slow_case);
// Generates code for creating a one-char string from the given char
@@ -1075,8 +1082,8 @@ class RecordWriteStub : public CodeStub {
}
#endif
- // Minor key encoding in 12 bits of three registers (object, address and
- // scratch) OOOOAAAASSSS.
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, address and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 34b32be74..bc7a33c6c 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -496,7 +496,8 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
- Label not_positive_smi;
+ Label index_not_smi;
+ Label index_out_of_range;
Label slow_char_code;
Label got_char_code;
@@ -511,7 +512,8 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
scratch,
code,
&miss, // When not a string.
- &not_positive_smi,
+ &index_not_smi,
+ &index_out_of_range,
&slow_char_code);
// If we didn't bail out, code register contains smi tagged char
// code.
@@ -521,14 +523,9 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
__ Abort("Unexpected fall-through from char from code tail call");
#endif
- // Check if key is a smi or a heap number.
- __ bind(&not_positive_smi);
- ASSERT(kSmiTag == 0);
- __ test(index, Immediate(kSmiTagMask));
- __ j(zero, &slow_char_code);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ecx, Factory::heap_number_map());
- __ j(not_equal, &miss);
+ // Check if key is a heap number.
+ __ bind(&index_not_smi);
+ __ CheckMap(index, Factory::heap_number_map(), &miss, true);
// Push receiver and key on the stack (now that we know they are a
// string and a number), and call runtime.
@@ -553,6 +550,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
__ cmp(code, Factory::nan_value());
__ j(not_equal, &got_char_code);
+ __ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 7a9bcf0ea..a7d283452 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -50,6 +50,14 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
+ if (FLAG_debug_code) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
Label fast;
// Compute the page start address from the heap object pointer, and reuse
@@ -100,6 +108,7 @@ void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
+ ASSERT(cc == equal || cc == not_equal);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
@@ -133,7 +142,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits.
+ // for the remembered set bits).
Label done;
// Skip barrier if writing a smi.
@@ -901,7 +910,9 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::string_map()));
- mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -934,7 +945,9 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::ascii_string_map()));
- mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(scratch1, length);
+ SmiTag(scratch1);
+ mov(FieldOperand(result, String::kLengthOffset), scratch1);
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index b11a6977e..c3a019ba4 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -48,7 +48,9 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
-
+ // Set the remebered set bit for an address which points into an
+ // object. RecordWriteHelper only works if the object is not in new
+ // space.
void RecordWriteHelper(Register object,
Register addr,
Register scratch);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 10dcb6aee..fdf3b9feb 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1102,19 +1102,22 @@ void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- __ call(to);
+ Label return_to;
+ __ push(Immediate::CodeRelativeOffset(&return_to));
+ __ jmp(to);
+ __ bind(&return_to);
}
void RegExpMacroAssemblerIA32::SafeReturn() {
- __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
- __ ret(0);
+ __ pop(ebx);
+ __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+ __ jmp(Operand(ebx));
}
void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
__ bind(name);
- __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index eba4e1fd5..809228c7e 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -221,7 +221,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ SmiTag(eax);
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -234,7 +233,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
__ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ SmiTag(eax);
__ ret(0);
}
@@ -477,107 +475,6 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
- explicit CallOptimization(LookupResult* lookup) {
- if (!lookup->IsProperty() || !lookup->IsCacheable() ||
- lookup->type() != CONSTANT_FUNCTION) {
- Initialize(NULL);
- } else {
- // We only optimize constant function calls.
- Initialize(lookup->GetConstantFunction());
- }
- }
-
- explicit CallOptimization(JSFunction* function) {
- Initialize(function);
- }
-
- bool is_constant_call() const {
- return constant_function_ != NULL;
- }
-
- JSFunction* constant_function() const {
- ASSERT(constant_function_ != NULL);
- return constant_function_;
- }
-
- bool is_simple_api_call() const {
- return is_simple_api_call_;
- }
-
- FunctionTemplateInfo* expected_receiver_type() const {
- ASSERT(is_simple_api_call_);
- return expected_receiver_type_;
- }
-
- CallHandlerInfo* api_call_info() const {
- ASSERT(is_simple_api_call_);
- return api_call_info_;
- }
-
- // Returns the depth of the object having the expected type in the
- // prototype chain between the two arguments.
- int GetPrototypeDepthOfExpectedType(JSObject* object,
- JSObject* holder) const {
- ASSERT(is_simple_api_call_);
- if (expected_receiver_type_ == NULL) return 0;
- int depth = 0;
- while (object != holder) {
- if (object->IsInstanceOf(expected_receiver_type_)) return depth;
- object = JSObject::cast(object->GetPrototype());
- ++depth;
- }
- if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
- return kInvalidProtoDepth;
- }
-
- private:
- void Initialize(JSFunction* function) {
- constant_function_ = NULL;
- is_simple_api_call_ = false;
- expected_receiver_type_ = NULL;
- api_call_info_ = NULL;
-
- if (function == NULL || !function->is_compiled()) return;
-
- constant_function_ = function;
- AnalyzePossibleApiFunction(function);
- }
-
- // Determines whether the given function can be called using the
- // fast api call builtin.
- void AnalyzePossibleApiFunction(JSFunction* function) {
- SharedFunctionInfo* sfi = function->shared();
- if (!sfi->IsApiFunction()) return;
- FunctionTemplateInfo* info = sfi->get_api_func_data();
-
- // Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ = CallHandlerInfo::cast(info->call_code());
-
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
- if (!info->signature()->IsUndefined()) {
- SignatureInfo* signature = SignatureInfo::cast(info->signature());
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ =
- FunctionTemplateInfo::cast(signature->receiver());
- }
- }
-
- is_simple_api_call_ = true;
- }
-
- JSFunction* constant_function_;
- bool is_simple_api_call_;
- FunctionTemplateInfo* expected_receiver_type_;
- CallHandlerInfo* api_call_info_;
-};
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -1280,7 +1177,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ add(Operand(eax), Immediate(argc << 1));
+ __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
@@ -1333,7 +1230,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin);
__ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(greater, &call_builtin);
+ __ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
@@ -1399,7 +1296,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
return Heap::undefined_value();
}
- Label miss, empty_array, call_builtin;
+ Label miss, return_undefined, call_builtin;
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -1408,7 +1305,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
-
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, name, &miss);
@@ -1424,7 +1320,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
__ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
- __ j(negative, &empty_array);
+ __ j(negative, &return_undefined);
// Get the last element.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -1445,12 +1341,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
Immediate(Factory::the_hole_value()));
__ ret((argc + 1) * kPointerSize);
- __ bind(&empty_array);
+ __ bind(&return_undefined);
__ mov(eax, Immediate(Factory::undefined_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
-
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index eaa0554f1..64c3ec181 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -615,7 +615,8 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+ if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ JSFunction::cast(*object)->should_have_prototype()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
@@ -824,7 +825,8 @@ Object* KeyedLoadIC::Load(State state,
}
// Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+ if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+ JSFunction::cast(*object)->should_have_prototype()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
Object* code =
StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 7464a57c6..6aae096fc 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -301,7 +301,6 @@ class KeyedLoadIC: public IC {
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
- private:
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
// Access check is necessary explicitly since generic stub does not perform
@@ -309,6 +308,7 @@ class KeyedLoadIC: public IC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+ private:
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
State state,
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index 8fbff41b4..34d5c0da8 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -44,205 +44,47 @@
// LiveEdit namespace is declared inside a single function constructor.
Debug.LiveEdit = new function() {
- // Applies the change to the script.
- // The change is always a substring (change_pos, change_pos + change_len)
- // being replaced with a completely different string new_str.
- // This API is a legacy and is obsolete.
- //
- // @param {Script} script that is being changed
- // @param {Array} change_log a list that collects engineer-readable
- // description of what happened.
- function ApplyPatch(script, change_pos, change_len, new_str,
- change_log) {
- var old_source = script.source;
-
- // Prepare new source string.
- var new_source = old_source.substring(0, change_pos) +
- new_str + old_source.substring(change_pos + change_len);
-
- return ApplyPatchMultiChunk(script,
- [ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, change_log);
- }
- // Function is public.
- this.ApplyPatch = ApplyPatch;
-
// Forward declaration for minifier.
var FunctionStatus;
+
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
- // Fully compiles source string as a script. Returns Array of
- // FunctionCompileInfo -- a descriptions of all functions of the script.
- // Elements of array are ordered by start positions of functions (from top
- // to bottom) in the source. Fields outer_index and next_sibling_index help
- // to navigate the nesting structure of functions.
- //
- // The script is used for compilation, because it produces code that
- // needs to be linked with some particular script (for nested functions).
- function DebugGatherCompileInfo(source) {
- // Get function info, elements are partially sorted (it is a tree of
- // nested functions serialized as parent followed by serialized children.
- var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
-
- // Sort function infos by start position field.
- var compile_info = new Array();
- var old_index_map = new Array();
- for (var i = 0; i < raw_compile_info.length; i++) {
- compile_info.push(new FunctionCompileInfo(raw_compile_info[i]));
- old_index_map.push(i);
- }
-
- for (var i = 0; i < compile_info.length; i++) {
- var k = i;
- for (var j = i + 1; j < compile_info.length; j++) {
- if (compile_info[k].start_position > compile_info[j].start_position) {
- k = j;
- }
- }
- if (k != i) {
- var temp_info = compile_info[k];
- var temp_index = old_index_map[k];
- compile_info[k] = compile_info[i];
- old_index_map[k] = old_index_map[i];
- compile_info[i] = temp_info;
- old_index_map[i] = temp_index;
- }
- }
-
- // After sorting update outer_inder field using old_index_map. Also
- // set next_sibling_index field.
- var current_index = 0;
-
- // The recursive function, that goes over all children of a particular
- // node (i.e. function info).
- function ResetIndexes(new_parent_index, old_parent_index) {
- var previous_sibling = -1;
- while (current_index < compile_info.length &&
- compile_info[current_index].outer_index == old_parent_index) {
- var saved_index = current_index;
- compile_info[saved_index].outer_index = new_parent_index;
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = saved_index;
- }
- previous_sibling = saved_index;
- current_index++;
- ResetIndexes(saved_index, old_index_map[saved_index]);
- }
- if (previous_sibling != -1) {
- compile_info[previous_sibling].next_sibling_index = -1;
- }
- }
-
- ResetIndexes(-1, -1);
- Assert(current_index == compile_info.length);
-
- return compile_info;
- }
-
- // Variable forward declarations. Preprocessor "Minifier" needs them.
- var old_compile_info;
- var shared_infos;
- // Finds SharedFunctionInfo that corresponds compile info with index
- // in old version of the script.
- function FindFunctionInfo(index) {
- var old_info = old_compile_info[index];
- for (var i = 0; i < shared_infos.length; i++) {
- var info = shared_infos[i];
- if (info.start_position == old_info.start_position &&
- info.end_position == old_info.end_position) {
- return info;
- }
- }
- }
-
- // Replaces function's Code.
- function PatchCode(new_info, shared_info) {
- if (shared_info) {
- %LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
- change_log.push( {function_patched: new_info.function_name} );
- } else {
- change_log.push( {function_patched: new_info.function_name,
- function_info_not_found: true} );
- }
-
- }
-
-
- var position_patch_report;
- function PatchPositions(old_info, shared_info) {
- if (!shared_info) {
- // TODO(LiveEdit): function is not compiled yet or is already collected.
- position_patch_report.push(
- { name: old_info.function_name, info_not_found: true } );
- return;
- }
- var breakpoint_position_update = %LiveEditPatchFunctionPositions(
- shared_info.raw_array, diff_array);
- for (var i = 0; i < breakpoint_position_update.length; i += 2) {
- var new_pos = breakpoint_position_update[i];
- var break_point_object = breakpoint_position_update[i + 1];
- change_log.push( { breakpoint_position_update:
- { from: break_point_object.source_position(), to: new_pos } } );
- break_point_object.updateSourcePosition(new_pos, script);
- }
- position_patch_report.push( { name: old_info.function_name } );
- }
-
- var link_to_old_script_report;
- var old_script;
- // Makes a function associated with another instance of a script (the
- // one representing its old version). This way the function still
- // may access its own text.
- function LinkToOldScript(shared_info, old_info_node) {
- if (shared_info) {
- %LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
- link_to_old_script_report.push( { name: shared_info.function_name } );
- } else {
- link_to_old_script_report.push(
- { name: old_info_node.info.function_name, not_found: true } );
- }
- }
-
-
var old_source = script.source;
-
- // Find all SharedFunctionInfo's that are compiled from this script.
- var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
-
- var shared_infos = new Array();
-
- for (var i = 0; i < shared_raw_list.length; i++) {
- shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
- }
-
+
// Gather compile information about old version of script.
- var old_compile_info = DebugGatherCompileInfo(old_source);
+ var old_compile_info = GatherCompileInfo(old_source, script);
+ // Build tree structures for old and new versions of the script.
+ var root_old_node = BuildCodeInfoTree(old_compile_info);
+
+ var pos_translator = new PosTranslator(diff_array);
+
+ // Analyze changes.
+ MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
+
+ // Find all SharedFunctionInfo's that were compiled from this script.
+ FindLiveSharedInfos(root_old_node, script);
+
// Gather compile information about new version of script.
var new_compile_info;
try {
- new_compile_info = DebugGatherCompileInfo(new_source);
+ new_compile_info = GatherCompileInfo(new_source, script);
} catch (e) {
throw new Failure("Failed to compile new version of script: " + e);
}
-
- var pos_translator = new PosTranslator(diff_array);
-
- // Build tree structures for old and new versions of the script.
- var root_old_node = BuildCodeInfoTree(old_compile_info);
var root_new_node = BuildCodeInfoTree(new_compile_info);
- // Analyze changes.
- MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
+ // Link recompiled script data with other data.
FindCorrespondingFunctions(root_old_node, root_new_node);
// Prepare to-do lists.
var replace_code_list = new Array();
var link_to_old_script_list = new Array();
+ var link_to_original_script_list = new Array();
var update_positions_list = new Array();
function HarvestTodo(old_node) {
@@ -252,6 +94,15 @@ Debug.LiveEdit = new function() {
CollectDamaged(node.children[i]);
}
}
+
+ // Recursively collects all newly compiled functions that are going into
+ // business and should be have link to the actual script updated.
+ function CollectNew(node_list) {
+ for (var i = 0; i < node_list.length; i++) {
+ link_to_original_script_list.push(node_list[i]);
+ CollectNew(node_list[i].children);
+ }
+ }
if (old_node.status == FunctionStatus.DAMAGED) {
CollectDamaged(old_node);
@@ -263,6 +114,7 @@ Debug.LiveEdit = new function() {
update_positions_list.push(old_node);
} else if (old_node.status == FunctionStatus.CHANGED) {
replace_code_list.push(old_node);
+ CollectNew(old_node.unmatched_new_nodes);
}
for (var i = 0; i < old_node.children.length; i++) {
HarvestTodo(old_node.children[i]);
@@ -274,9 +126,9 @@ Debug.LiveEdit = new function() {
// Collect shared infos for functions whose code need to be patched.
var replaced_function_infos = new Array();
for (var i = 0; i < replace_code_list.length; i++) {
- var info = FindFunctionInfo(replace_code_list[i].array_index);
- if (info) {
- replaced_function_infos.push(info);
+ var info_wrapper = replace_code_list[i].live_shared_info_wrapper;
+ if (info_wrapper) {
+ replaced_function_infos.push(info_wrapper);
}
}
@@ -286,14 +138,24 @@ Debug.LiveEdit = new function() {
// We haven't changed anything before this line yet.
// Committing all changes.
+
+ // Start with breakpoints. Convert their line/column positions and
+ // temporary remove.
+ var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
+
+ var old_script;
- // Create old script if there are function linked to old version.
- if (link_to_old_script_list.length > 0) {
+ // Create an old script only if there are function that should be linked
+ // to old version.
+ if (link_to_old_script_list.length == 0) {
+ %LiveEditReplaceScript(script, new_source, null);
+ old_script = void 0;
+ } else {
var old_script_name = CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
- var old_script = %LiveEditReplaceScript(script, new_source,
+ old_script = %LiveEditReplaceScript(script, new_source,
old_script_name);
var link_to_old_script_report = new Array();
@@ -301,16 +163,19 @@ Debug.LiveEdit = new function() {
// We need to link to old script all former nested functions.
for (var i = 0; i < link_to_old_script_list.length; i++) {
- LinkToOldScript(
- FindFunctionInfo(link_to_old_script_list[i].array_index),
- link_to_old_script_list[i]);
+ LinkToOldScript(link_to_old_script_list[i], old_script,
+ link_to_old_script_report);
}
}
+ // Link to an actual script all the functions that we are going to use.
+ for (var i = 0; i < link_to_original_script_list.length; i++) {
+ %LiveEditFunctionSetScript(
+ link_to_original_script_list[i].info.shared_function_info, script);
+ }
for (var i = 0; i < replace_code_list.length; i++) {
- PatchCode(replace_code_list[i].corresponding_node.info,
- FindFunctionInfo(replace_code_list[i].array_index));
+ PatchFunctionCode(replace_code_list[i], change_log);
}
var position_patch_report = new Array();
@@ -319,14 +184,214 @@ Debug.LiveEdit = new function() {
for (var i = 0; i < update_positions_list.length; i++) {
// TODO(LiveEdit): take into account wether it's source_changed or
// unchanged and whether positions changed at all.
- PatchPositions(update_positions_list[i].info,
- FindFunctionInfo(update_positions_list[i].array_index));
+ PatchPositions(update_positions_list[i], diff_array,
+ position_patch_report);
}
+
+ break_points_restorer(pos_translator, old_script);
}
// Function is public.
this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
+ // Fully compiles source string as a script. Returns Array of
+ // FunctionCompileInfo -- a descriptions of all functions of the script.
+ // Elements of array are ordered by start positions of functions (from top
+ // to bottom) in the source. Fields outer_index and next_sibling_index help
+ // to navigate the nesting structure of functions.
+ //
+ // All functions get compiled linked to script provided as parameter script.
+ // TODO(LiveEdit): consider not using actual scripts as script, because
+ // we have to manually erase all links right after compile.
+ function GatherCompileInfo(source, script) {
+ // Get function info, elements are partially sorted (it is a tree of
+ // nested functions serialized as parent followed by serialized children.
+ var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
+
+ // Sort function infos by start position field.
+ var compile_info = new Array();
+ var old_index_map = new Array();
+ for (var i = 0; i < raw_compile_info.length; i++) {
+ var info = new FunctionCompileInfo(raw_compile_info[i]);
+ // Remove all links to the actual script. Breakpoints system and
+ // LiveEdit itself believe that any function in heap that points to a
+ // particular script is a regular function.
+ // For some functions we will restore this link later.
+ %LiveEditFunctionSetScript(info.shared_function_info, void 0);
+ compile_info.push(info);
+ old_index_map.push(i);
+ }
+
+ for (var i = 0; i < compile_info.length; i++) {
+ var k = i;
+ for (var j = i + 1; j < compile_info.length; j++) {
+ if (compile_info[k].start_position > compile_info[j].start_position) {
+ k = j;
+ }
+ }
+ if (k != i) {
+ var temp_info = compile_info[k];
+ var temp_index = old_index_map[k];
+ compile_info[k] = compile_info[i];
+ old_index_map[k] = old_index_map[i];
+ compile_info[i] = temp_info;
+ old_index_map[i] = temp_index;
+ }
+ }
+
+ // After sorting update outer_inder field using old_index_map. Also
+ // set next_sibling_index field.
+ var current_index = 0;
+
+ // The recursive function, that goes over all children of a particular
+ // node (i.e. function info).
+ function ResetIndexes(new_parent_index, old_parent_index) {
+ var previous_sibling = -1;
+ while (current_index < compile_info.length &&
+ compile_info[current_index].outer_index == old_parent_index) {
+ var saved_index = current_index;
+ compile_info[saved_index].outer_index = new_parent_index;
+ if (previous_sibling != -1) {
+ compile_info[previous_sibling].next_sibling_index = saved_index;
+ }
+ previous_sibling = saved_index;
+ current_index++;
+ ResetIndexes(saved_index, old_index_map[saved_index]);
+ }
+ if (previous_sibling != -1) {
+ compile_info[previous_sibling].next_sibling_index = -1;
+ }
+ }
+
+ ResetIndexes(-1, -1);
+ Assert(current_index == compile_info.length);
+
+ return compile_info;
+ }
+
+
+ // Replaces function's Code.
+ function PatchFunctionCode(old_node, change_log) {
+ var new_info = old_node.corresponding_node.info;
+ var shared_info_wrapper = old_node.live_shared_info_wrapper;
+ if (shared_info_wrapper) {
+ %LiveEditReplaceFunctionCode(new_info.raw_array,
+ shared_info_wrapper.raw_array);
+
+ // The function got a new code. However, this new code brings all new
+ // instances of SharedFunctionInfo for nested functions. However,
+ // we want the original instances to be used wherever possible.
+ // (This is because old instances and new instances will be both
+ // linked to a script and breakpoints subsystem does not really
+ // expects this; neither does LiveEdit subsystem on next call).
+ for (var i = 0; i < old_node.children.length; i++) {
+ if (old_node.children[i].corresponding_node) {
+ var corresponding_child = old_node.children[i].corresponding_node;
+ var child_shared_info_wrapper =
+ old_node.children[i].live_shared_info_wrapper;
+ if (child_shared_info_wrapper) {
+ %LiveEditReplaceRefToNestedFunction(shared_info_wrapper.info,
+ corresponding_child.info.shared_function_info,
+ child_shared_info_wrapper.info);
+ }
+ }
+ }
+
+ change_log.push( {function_patched: new_info.function_name} );
+ } else {
+ change_log.push( {function_patched: new_info.function_name,
+ function_info_not_found: true} );
+ }
+ }
+
+
+ // Makes a function associated with another instance of a script (the
+ // one representing its old version). This way the function still
+ // may access its own text.
+ function LinkToOldScript(old_info_node, old_script, report_array) {
+ var shared_info = old_info_node.live_shared_info_wrapper;
+ if (shared_info) {
+ %LiveEditFunctionSetScript(shared_info.info, old_script);
+ report_array.push( { name: shared_info.function_name } );
+ } else {
+ report_array.push(
+ { name: old_info_node.info.function_name, not_found: true } );
+ }
+ }
+
+
+ // Returns function that restores breakpoints.
+ function TemporaryRemoveBreakPoints(original_script, change_log) {
+ var script_break_points = GetScriptBreakPoints(original_script);
+
+ var break_points_update_report = [];
+ change_log.push( { break_points_update: break_points_update_report } );
+
+ var break_point_old_positions = [];
+ for (var i = 0; i < script_break_points.length; i++) {
+ var break_point = script_break_points[i];
+
+ break_point.clear();
+
+ // TODO(LiveEdit): be careful with resource offset here.
+ var break_point_position = Debug.findScriptSourcePosition(original_script,
+ break_point.line(), break_point.column());
+
+ var old_position_description = {
+ position: break_point_position,
+ line: break_point.line(),
+ column: break_point.column()
+ }
+ break_point_old_positions.push(old_position_description);
+ }
+
+
+ // Restores breakpoints and creates their copies in the "old" copy of
+ // the script.
+ return function (pos_translator, old_script_copy_opt) {
+ // Update breakpoints (change positions and restore them in old version
+ // of script.
+ for (var i = 0; i < script_break_points.length; i++) {
+ var break_point = script_break_points[i];
+ if (old_script_copy_opt) {
+ var clone = break_point.cloneForOtherScript(old_script_copy_opt);
+ clone.set(old_script_copy_opt);
+
+ break_points_update_report.push( {
+ type: "copied_to_old",
+ id: break_point.number(),
+ new_id: clone.number(),
+ positions: break_point_old_positions[i]
+ } );
+ }
+
+ var updated_position = pos_translator.Translate(
+ break_point_old_positions[i].position,
+ PosTranslator.ShiftWithTopInsideChunkHandler);
+
+ var new_location =
+ original_script.locationFromPosition(updated_position, false);
+
+ break_point.update_positions(new_location.line, new_location.column);
+
+ var new_position_description = {
+ position: updated_position,
+ line: new_location.line,
+ column: new_location.column
+ }
+
+ break_point.set(original_script);
+
+ break_points_update_report.push( { type: "position_changed",
+ id: break_point.number(),
+ old_positions: break_point_old_positions[i],
+ new_positions: new_position_description
+ } );
+ }
+ }
+ }
+
+
function Assert(condition, message) {
if (!condition) {
if (message) {
@@ -346,15 +411,15 @@ Debug.LiveEdit = new function() {
function PosTranslator(diff_array) {
var chunks = new Array();
- var pos1 = 0;
- var pos2 = 0;
+ var current_diff = 0;
for (var i = 0; i < diff_array.length; i += 3) {
- pos2 += diff_array[i] - pos1 + pos2;
- pos1 = diff_array[i];
- chunks.push(new DiffChunk(pos1, pos2, diff_array[i + 1] - pos1,
- diff_array[i + 2] - pos2));
- pos1 = diff_array[i + 1];
- pos2 = diff_array[i + 2];
+ var pos1_begin = diff_array[i];
+ var pos2_begin = pos1_begin + current_diff;
+ var pos1_end = diff_array[i + 1];
+ var pos2_end = diff_array[i + 2];
+ chunks.push(new DiffChunk(pos1_begin, pos2_begin, pos1_end - pos1_begin,
+ pos2_end - pos2_begin));
+ current_diff = pos2_end - pos1_end;
}
this.chunks = chunks;
}
@@ -364,14 +429,14 @@ Debug.LiveEdit = new function() {
PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
var array = this.chunks;
- if (array.length == 0 || pos < array[0]) {
+ if (array.length == 0 || pos < array[0].pos1) {
return pos;
}
var chunk_index1 = 0;
var chunk_index2 = array.length - 1;
while (chunk_index1 < chunk_index2) {
- var middle_index = (chunk_index1 + chunk_index2) / 2;
+ var middle_index = Math.floor((chunk_index1 + chunk_index2) / 2);
if (pos < array[middle_index + 1].pos1) {
chunk_index2 = middle_index;
} else {
@@ -380,17 +445,24 @@ Debug.LiveEdit = new function() {
}
var chunk = array[chunk_index1];
if (pos >= chunk.pos1 + chunk.len1) {
- return pos += chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
+ return pos + chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
}
if (!inside_chunk_handler) {
- inside_chunk_handler = PosTranslator.default_inside_chunk_handler;
+ inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
}
- inside_chunk_handler(pos, chunk);
+ return inside_chunk_handler(pos, chunk);
}
- PosTranslator.default_inside_chunk_handler = function() {
- Assert(false, "Cannot translate position in chaged area");
+ PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
+ Assert(false, "Cannot translate position in changed area");
+ }
+
+ PosTranslator.ShiftWithTopInsideChunkHandler =
+ function(pos, diff_chunk) {
+ // We carelessly do not check whether we stay inside the chunk after
+ // translation.
+ return pos - diff_chunk.pos1 + diff_chunk.pos2;
}
var FunctionStatus = {
@@ -412,15 +484,17 @@ Debug.LiveEdit = new function() {
this.children = children;
// an index in array of compile_info
this.array_index = array_index;
- this.parent = void(0);
+ this.parent = void 0;
this.status = FunctionStatus.UNCHANGED;
// Status explanation is used for debugging purposes and will be shown
// in user UI if some explanations are needed.
- this.status_explanation = void(0);
- this.new_start_pos = void(0);
- this.new_end_pos = void(0);
- this.corresponding_node = void(0);
+ this.status_explanation = void 0;
+ this.new_start_pos = void 0;
+ this.new_end_pos = void 0;
+ this.corresponding_node = void 0;
+ this.unmatched_new_nodes = void 0;
+ this.live_shared_info_wrapper = void 0;
}
// From array of function infos that is implicitly a tree creates
@@ -564,6 +638,8 @@ Debug.LiveEdit = new function() {
function ProcessChildren(old_node, new_node) {
var old_children = old_node.children;
var new_children = new_node.children;
+
+ var unmatched_new_nodes_list = [];
var old_index = 0;
var new_index = 0;
@@ -573,6 +649,7 @@ Debug.LiveEdit = new function() {
} else if (new_index < new_children.length) {
if (new_children[new_index].info.start_position <
old_children[old_index].new_start_pos) {
+ unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
} else if (new_children[new_index].info.start_position ==
old_children[old_index].new_start_pos) {
@@ -584,6 +661,9 @@ Debug.LiveEdit = new function() {
ProcessChildren(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
+ unmatched_new_nodes_list.push(
+ old_children[old_index].corresponding_node);
+ old_children[old_index].corresponding_node = void 0;
old_node.status = FunctionStatus.CHANGED;
}
}
@@ -592,6 +672,7 @@ Debug.LiveEdit = new function() {
old_children[old_index].status_explanation =
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
+ unmatched_new_nodes_list.push(new_children[new_index]);
}
new_index++;
old_index++;
@@ -611,12 +692,18 @@ Debug.LiveEdit = new function() {
}
}
+ while (new_index < new_children.length) {
+ unmatched_new_nodes_list.push(new_children[new_index]);
+ new_index++;
+ }
+
if (old_node.status == FunctionStatus.CHANGED) {
if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
old_node.status = FunctionStatus.DAMAGED;
old_node.status_explanation = "Changed code expectations";
}
}
+ old_node.unmatched_new_nodes = unmatched_new_nodes_list;
}
ProcessChildren(old_code_tree, new_code_tree);
@@ -625,6 +712,40 @@ Debug.LiveEdit = new function() {
Assert(old_code_tree.status != FunctionStatus.DAMAGED,
"Script became damaged");
}
+
+ function FindLiveSharedInfos(old_code_tree, script) {
+ var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
+
+ var shared_infos = new Array();
+
+ for (var i = 0; i < shared_raw_list.length; i++) {
+ shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
+ }
+
+ // Finds SharedFunctionInfo that corresponds compile info with index
+ // in old version of the script.
+ function FindFunctionInfo(compile_info) {
+ for (var i = 0; i < shared_infos.length; i++) {
+ var wrapper = shared_infos[i];
+ if (wrapper.start_position == compile_info.start_position &&
+ wrapper.end_position == compile_info.end_position) {
+ return wrapper;
+ }
+ }
+ }
+
+ function TraverseTree(node) {
+ var info_wrapper = FindFunctionInfo(node.info);
+ if (info_wrapper) {
+ node.live_shared_info_wrapper = info_wrapper;
+ }
+ for (var i = 0; i < node.children.length; i++) {
+ TraverseTree(node.children[i]);
+ }
+ }
+
+ TraverseTree(old_code_tree);
+ }
// An object describing function compilation details. Its index fields
@@ -637,6 +758,7 @@ Debug.LiveEdit = new function() {
this.code = raw_array[4];
this.scope_info = raw_array[5];
this.outer_index = raw_array[6];
+ this.shared_function_info = raw_array[7];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
@@ -648,7 +770,21 @@ Debug.LiveEdit = new function() {
this.info = raw_array[3];
this.raw_array = raw_array;
}
-
+
+ // Changes positions (including all statments) in function.
+ function PatchPositions(old_info_node, diff_array, report_array) {
+ var shared_info_wrapper = old_info_node.live_shared_info_wrapper;
+ if (!shared_info_wrapper) {
+ // TODO(LiveEdit): function is not compiled yet or is already collected.
+ report_array.push(
+ { name: old_info_node.info.function_name, info_not_found: true } );
+ return;
+ }
+ %LiveEditPatchFunctionPositions(shared_info_wrapper.raw_array,
+ diff_array);
+ report_array.push( { name: old_info_node.info.function_name } );
+ }
+
// Adds a suffix to script name to mark that it is old version.
function CreateNameForOldScript(script) {
// TODO(635): try better than this; support several changes.
@@ -776,71 +912,33 @@ Debug.LiveEdit = new function() {
function CompareStringsLinewise(s1, s2) {
return %LiveEditCompareStringsLinewise(s1, s2);
}
- // Function is public (for tests).
- this.CompareStringsLinewise = CompareStringsLinewise;
+ // Applies the change to the script.
+ // The change is always a substring (change_pos, change_pos + change_len)
+ // being replaced with a completely different string new_str.
+ // This API is a legacy and is obsolete.
+ //
+ // @param {Script} script that is being changed
+ // @param {Array} change_log a list that collects engineer-readable
+ // description of what happened.
+ function ApplySingleChunkPatch(script, change_pos, change_len, new_str,
+ change_log) {
+ var old_source = script.source;
- // Finds a difference between 2 strings in form of a single chunk.
- // This is a temporary solution. We should calculate a read diff instead.
- function FindSimpleDiff(old_source, new_source) {
- var change_pos;
- var old_len;
- var new_len;
+ // Prepare new source string.
+ var new_source = old_source.substring(0, change_pos) +
+ new_str + old_source.substring(change_pos + change_len);
- // A find range block. Whenever control leaves it, it should set 3 local
- // variables declared above.
- find_range:
- {
- // First look from the beginning of strings.
- var pos1;
- {
- var next_pos;
- for (pos1 = 0; true; pos1 = next_pos) {
- if (pos1 >= old_source.length) {
- change_pos = pos1;
- old_len = 0;
- new_len = new_source.length - pos1;
- break find_range;
- }
- if (pos1 >= new_source.length) {
- change_pos = pos1;
- old_len = old_source.length - pos1;
- new_len = 0;
- break find_range;
- }
- if (old_source[pos1] != new_source[pos1]) {
- break;
- }
- next_pos = pos1 + 1;
- }
- }
- // Now compare strings from the ends.
- change_pos = pos1;
- var pos_old;
- var pos_new;
- {
- for (pos_old = old_source.length - 1, pos_new = new_source.length - 1;
- true;
- pos_old--, pos_new--) {
- if (pos_old - change_pos + 1 < 0 || pos_new - change_pos + 1 < 0) {
- old_len = pos_old - change_pos + 2;
- new_len = pos_new - change_pos + 2;
- break find_range;
- }
- if (old_source[pos_old] != new_source[pos_new]) {
- old_len = pos_old - change_pos + 1;
- new_len = pos_new - change_pos + 1;
- break find_range;
- }
- }
- }
- }
+ return ApplyPatchMultiChunk(script,
+ [ change_pos, change_pos + change_len, change_pos + new_str.length],
+ new_source, change_log);
+ }
- if (old_len == 0 && new_len == 0) {
- // no change
- return;
- }
-
- return { "change_pos": change_pos, "old_len": old_len, "new_len": new_len };
+
+ // Functions are public for tests.
+ this.TestApi = {
+ PosTranslator: PosTranslator,
+ CompareStringsLinewise: CompareStringsLinewise,
+ ApplySingleChunkPatch: ApplySingleChunkPatch
}
}
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index ba417e53c..592ef4990 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -417,6 +417,8 @@ static void CompileScriptForTracker(Handle<Script> script) {
// Compile the code.
CompilationInfo info(lit, script, is_eval);
+
+ LiveEditFunctionTracker tracker(lit);
Handle<Code> code = MakeCodeForLiveEdit(&info);
// Check for stack-overflow exceptions.
@@ -424,6 +426,7 @@ static void CompileScriptForTracker(Handle<Script> script) {
Top::StackOverflow();
return;
}
+ tracker.RecordRootFunctionInfo(code);
}
// Unwraps JSValue object, returning its field "value"
@@ -501,9 +504,13 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<JSValue> wrapper = WrapInJSValue(*function_code);
this->SetField(kCodeOffset_, wrapper);
}
- void SetScopeInfo(Handle<JSArray> scope_info_array) {
+ void SetScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kScopeInfoOffset_, scope_info_array);
}
+ void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
+ Handle<JSValue> info_holder = WrapInJSValue(*info);
+ this->SetField(kSharedFunctionInfoOffset_, info_holder);
+ }
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -527,7 +534,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kCodeOffset_ = 4;
static const int kScopeInfoOffset_ = 5;
static const int kParentIndexOffset_ = 6;
- static const int kSize_ = 7;
+ static const int kSharedFunctionInfoOffset_ = 7;
+ static const int kSize_ = 8;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -537,6 +545,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
// wrapped into BlindReference for sanitizing reasons.
class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
+ static bool IsInstance(Handle<JSArray> array) {
+ return array->length() == Smi::FromInt(kSize_) &&
+ array->GetElement(kSharedInfoOffset_)->IsJSValue();
+ }
+
explicit SharedInfoWrapper(Handle<JSArray> array)
: JSArrayBasedStruct<SharedInfoWrapper>(array) {
}
@@ -593,7 +606,11 @@ class FunctionInfoListener {
current_parent_index_ = info.GetParentIndex();
}
- void FunctionScope(Scope* scope) {
+// TODO(LiveEdit): Move private method below.
+// This private section was created here to avoid moving the function
+// to keep already complex diff simpler.
+ private:
+ Object* SerializeFunctionScope(Scope* scope) {
HandleScope handle_scope;
Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
@@ -604,7 +621,7 @@ class FunctionInfoListener {
// scopes of this chain.
Scope* outer_scope = scope->outer_scope();
if (outer_scope == NULL) {
- return;
+ return Heap::undefined_value();
}
do {
ZoneList<Variable*> list(10);
@@ -645,17 +662,33 @@ class FunctionInfoListener {
outer_scope = outer_scope->outer_scope();
} while (outer_scope != NULL);
- FunctionInfoWrapper info =
- FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
- info.SetScopeInfo(scope_info_list);
+ return *scope_info_list;
}
+ public:
+ // Saves only function code, because for a script function we
+ // may never create a SharedFunctionInfo object.
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
info.SetFunctionCode(function_code);
}
+ // Saves full information about a function: its code, its scope info
+ // and a SharedFunctionInfo object.
+ void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
+ if (!shared->IsSharedFunctionInfo()) {
+ return;
+ }
+ FunctionInfoWrapper info =
+ FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+ info.SetFunctionCode(Handle<Code>(shared->code()));
+ info.SetSharedFunctionInfo(shared);
+
+ Handle<Object> scope_info_list(SerializeFunctionScope(scope));
+ info.SetScopeInfo(scope_info_list);
+ }
+
Handle<JSArray> GetResult() {
return result_;
}
@@ -806,16 +839,19 @@ static bool IsJSFunctionCode(Code* code) {
}
-void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array) {
+Object* LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+ Handle<JSArray> shared_info_array) {
HandleScope scope;
+ if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+ return Top::ThrowIllegalOperation();
+ }
+
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
if (IsJSFunctionCode(shared_info->code())) {
ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode()));
@@ -833,17 +869,17 @@ void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
shared_info->set_construct_stub(
Builtins::builtin(Builtins::JSConstructStubGeneric));
- // update breakpoints
+
+ return Heap::undefined_value();
}
// TODO(635): Eval caches its scripts (same text -- same compiled info).
// Make sure we clear such caches.
-void LiveEdit::RelinkFunctionToScript(Handle<JSArray> shared_info_array,
- Handle<Script> script_handle) {
- SharedInfoWrapper shared_info_wrapper(shared_info_array);
- Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
-
+void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
+ Handle<Object> script_handle) {
+ Handle<SharedFunctionInfo> shared_info =
+ Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
shared_info->set_script(*script_handle);
}
@@ -998,21 +1034,13 @@ static Handle<Code> PatchPositionsInCode(Handle<Code> code,
}
-static Handle<Object> GetBreakPointObjectsForJS(
- Handle<BreakPointInfo> break_point_info) {
- if (break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> fixed_array(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<Object> array = Factory::NewJSArrayWithElements(fixed_array);
- return array;
- } else {
- return Handle<Object>(break_point_info->break_point_objects());
- }
-}
+Object* LiveEdit::PatchFunctionPositions(
+ Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
+ if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+ return Top::ThrowIllegalOperation();
+ }
-Handle<JSArray> LiveEdit::PatchFunctionPositions(
- Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
@@ -1041,44 +1069,72 @@ Handle<JSArray> LiveEdit::PatchFunctionPositions(
}
}
+ return Heap::undefined_value();
+}
- Handle<JSArray> result = Factory::NewJSArray(0);
- int result_len = 0;
- if (info->debug_info()->IsDebugInfo()) {
- Handle<DebugInfo> debug_info(DebugInfo::cast(info->debug_info()));
- Handle<Code> patched_orig_code =
- PatchPositionsInCode(Handle<Code>(debug_info->original_code()),
- position_change_array);
- if (*patched_orig_code != debug_info->original_code()) {
- // Do not use expensive ReplaceCodeObject for original_code, because we
- // do not expect any other references except this one.
- debug_info->set_original_code(*patched_orig_code);
- }
+static Handle<Script> CreateScriptCopy(Handle<Script> original) {
+ Handle<String> original_source(String::cast(original->source()));
- Handle<FixedArray> break_point_infos(debug_info->break_points());
- for (int i = 0; i < break_point_infos->length(); i++) {
- if (!break_point_infos->get(i)->IsBreakPointInfo()) {
- continue;
- }
- Handle<BreakPointInfo> info(
- BreakPointInfo::cast(break_point_infos->get(i)));
- int old_in_script_position = info->source_position()->value() +
- old_function_start;
- int new_in_script_position = TranslatePosition(old_in_script_position,
- position_change_array);
- info->set_source_position(
- Smi::FromInt(new_in_script_position - new_function_start));
- if (old_in_script_position != new_in_script_position) {
- SetElement(result, result_len,
- Handle<Smi>(Smi::FromInt(new_in_script_position)));
- SetElement(result, result_len + 1,
- GetBreakPointObjectsForJS(info));
- result_len += 2;
+ Handle<Script> copy = Factory::NewScript(original_source);
+
+ copy->set_name(original->name());
+ copy->set_line_offset(original->line_offset());
+ copy->set_column_offset(original->column_offset());
+ copy->set_data(original->data());
+ copy->set_type(original->type());
+ copy->set_context_data(original->context_data());
+ copy->set_compilation_type(original->compilation_type());
+ copy->set_eval_from_shared(original->eval_from_shared());
+ copy->set_eval_from_instructions_offset(
+ original->eval_from_instructions_offset());
+
+ return copy;
+}
+
+
+Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
+ Handle<String> new_source,
+ Handle<Object> old_script_name) {
+ Handle<Object> old_script_object;
+ if (old_script_name->IsString()) {
+ Handle<Script> old_script = CreateScriptCopy(original_script);
+ old_script->set_name(String::cast(*old_script_name));
+ old_script_object = old_script;
+ Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
+ } else {
+ old_script_object = Handle<Object>(Heap::null_value());
+ }
+
+ original_script->set_source(*new_source);
+
+ // Drop line ends so that they will be recalculated.
+ original_script->set_line_ends(Heap::undefined_value());
+
+ return *old_script_object;
+}
+
+
+
+void LiveEdit::ReplaceRefToNestedFunction(
+ Handle<JSValue> parent_function_wrapper,
+ Handle<JSValue> orig_function_wrapper,
+ Handle<JSValue> subst_function_wrapper) {
+
+ Handle<SharedFunctionInfo> parent_shared =
+ Handle<SharedFunctionInfo>::cast(UnwrapJSValue(parent_function_wrapper));
+ Handle<SharedFunctionInfo> orig_shared =
+ Handle<SharedFunctionInfo>::cast(UnwrapJSValue(orig_function_wrapper));
+ Handle<SharedFunctionInfo> subst_shared =
+ Handle<SharedFunctionInfo>::cast(UnwrapJSValue(subst_function_wrapper));
+
+ for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
+ if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
+ if (it.rinfo()->target_object() == *orig_shared) {
+ it.rinfo()->set_target_object(*subst_shared);
}
}
}
- return result;
}
@@ -1362,17 +1418,16 @@ LiveEditFunctionTracker::~LiveEditFunctionTracker() {
}
-void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+void LiveEditFunctionTracker::RecordFunctionInfo(
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionCode(code);
+ active_function_info_listener->FunctionInfo(info, lit->scope());
}
}
-void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
- if (active_function_info_listener != NULL) {
- active_function_info_listener->FunctionScope(scope);
- }
+void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
+ active_function_info_listener->FunctionCode(code);
}
@@ -1393,11 +1448,12 @@ LiveEditFunctionTracker::~LiveEditFunctionTracker() {
}
-void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+void LiveEditFunctionTracker::RecordFunctionInfo(
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
}
-void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
+void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index f66081fa7..d8e2a1375 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -67,8 +67,9 @@ class LiveEditFunctionTracker {
public:
explicit LiveEditFunctionTracker(FunctionLiteral* fun);
~LiveEditFunctionTracker();
- void RecordFunctionCode(Handle<Code> code);
- void RecordFunctionScope(Scope* scope);
+ void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
+ FunctionLiteral* lit);
+ void RecordRootFunctionInfo(Handle<Code> code);
static bool IsActive();
};
@@ -82,17 +83,29 @@ class LiveEdit : AllStatic {
static void WrapSharedFunctionInfos(Handle<JSArray> array);
- static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
- Handle<JSArray> shared_info_array);
+ static Object* ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+ Handle<JSArray> shared_info_array);
- static void RelinkFunctionToScript(Handle<JSArray> shared_info_array,
- Handle<Script> script_handle);
+ // Updates script field in FunctionSharedInfo.
+ static void SetFunctionScript(Handle<JSValue> function_wrapper,
+ Handle<Object> script_handle);
- // Returns an array of pairs (new source position, breakpoint_object/array)
- // so that JS side could update positions in breakpoint objects.
- static Handle<JSArray> PatchFunctionPositions(
+ static Object* PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
+ // For a script updates its source field. If old_script_name is provided
+ // (i.e. is a String), also creates a copy of the script with its original
+ // source and sends notification to debugger.
+ static Object* ChangeScriptSource(Handle<Script> original_script,
+ Handle<String> new_source,
+ Handle<Object> old_script_name);
+
+ // In a code of a parent function replaces original function as embedded
+ // object with a substitution one.
+ static void ReplaceRefToNestedFunction(Handle<JSValue> parent_function_shared,
+ Handle<JSValue> orig_function_shared,
+ Handle<JSValue> subst_function_shared);
+
// Checks listed functions on stack and return array with corresponding
// FunctionPatchabilityStatus statuses; extra array element may
// contain general error message. Modifies the current stack and
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index c82d19417..b0a3fd62f 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -1328,6 +1328,32 @@ bool DescriptorArray::IsSortedNoDuplicates() {
}
+void JSFunctionResultCache::JSFunctionResultCacheVerify() {
+ JSFunction::cast(get(kFactoryIndex))->Verify();
+
+ int size = Smi::cast(get(kCacheSizeIndex))->value();
+ ASSERT(kEntriesIndex <= size);
+ ASSERT(size <= length());
+ ASSERT_EQ(0, size % kEntrySize);
+
+ int finger = Smi::cast(get(kFingerIndex))->value();
+ ASSERT(kEntriesIndex <= finger);
+ ASSERT(finger < size || finger == kEntriesIndex);
+ ASSERT_EQ(0, finger % kEntrySize);
+
+ if (FLAG_enable_slow_asserts) {
+ for (int i = kEntriesIndex; i < size; i++) {
+ ASSERT(!get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ for (int i = size; i < length(); i++) {
+ ASSERT(get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ }
+}
+
+
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 621a3f84a..ae7d2c2a9 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -569,6 +569,22 @@ bool Object::IsSymbolTable() {
}
+bool Object::IsJSFunctionResultCache() {
+ if (!IsFixedArray()) return false;
+ FixedArray* self = FixedArray::cast(this);
+ int length = self->length();
+ if (length < JSFunctionResultCache::kEntriesIndex) return false;
+ if ((length - JSFunctionResultCache::kEntriesIndex)
+ % JSFunctionResultCache::kEntrySize != 0) {
+ return false;
+ }
+#ifdef DEBUG
+ reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
+#endif
+ return true;
+}
+
+
bool Object::IsCompilationCacheTable() {
return IsHashTable();
}
@@ -1594,6 +1610,7 @@ void NumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(MapCache)
@@ -1651,7 +1668,7 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
INT_ACCESSORS(Array, length, kLengthOffset)
-INT_ACCESSORS(String, length, kLengthOffset)
+SMI_ACCESSORS(String, length, kLengthOffset)
uint32_t String::hash_field() {
@@ -1773,14 +1790,12 @@ void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
- uint32_t length = READ_INT_FIELD(this, kLengthOffset);
- return SizeFor(length);
+ return SizeFor(length());
}
int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
- uint32_t length = READ_INT_FIELD(this, kLengthOffset);
- return SizeFor(length);
+ return SizeFor(length());
}
@@ -1838,6 +1853,20 @@ void ExternalTwoByteString::set_resource(
}
+void JSFunctionResultCache::MakeZeroSize() {
+ set(kFingerIndex, Smi::FromInt(kEntriesIndex));
+ set(kCacheSizeIndex, Smi::FromInt(kEntriesIndex));
+}
+
+
+void JSFunctionResultCache::Clear() {
+ int cache_size = Smi::cast(get(kCacheSizeIndex))->value();
+ Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+ MemsetPointer(entries_start, Heap::the_hole_value(), cache_size);
+ MakeZeroSize();
+}
+
+
byte ByteArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2113,6 +2142,20 @@ bool Map::has_non_instance_prototype() {
}
+void Map::set_function_with_prototype(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
+ }
+}
+
+
+bool Map::function_with_prototype() {
+ return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
+}
+
+
void Map::set_is_access_check_needed(bool access_check_needed) {
if (access_check_needed) {
set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
@@ -2568,6 +2611,10 @@ Object* JSFunction::prototype() {
return instance_prototype();
}
+bool JSFunction::should_have_prototype() {
+ return map()->function_with_prototype();
+}
+
bool JSFunction::is_compiled() {
return shared()->is_compiled();
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 252d847fb..459c8aaca 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -4906,6 +4906,7 @@ Object* JSFunction::SetInstancePrototype(Object* value) {
Object* JSFunction::SetPrototype(Object* value) {
+ ASSERT(should_have_prototype());
Object* construct_prototype = value;
// If the value is not a JSObject, store the value in the map's
@@ -4931,6 +4932,14 @@ Object* JSFunction::SetPrototype(Object* value) {
}
+Object* JSFunction::RemovePrototype() {
+ ASSERT(map() == context()->global_context()->function_map());
+ set_map(context()->global_context()->function_without_prototype_map());
+ set_prototype_or_initial_map(Heap::the_hole_value());
+ return this;
+}
+
+
Object* JSFunction::SetInstanceClassName(String* name) {
shared()->set_instance_class_name(name);
return this;
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index cdc249152..dcfb2eeda 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -606,6 +606,7 @@ class Object BASE_EMBEDDED {
inline bool IsHashTable();
inline bool IsDictionary();
inline bool IsSymbolTable();
+ inline bool IsJSFunctionResultCache();
inline bool IsCompilationCacheTable();
inline bool IsCodeCacheHashTable();
inline bool IsMapCache();
@@ -1116,6 +1117,8 @@ class HeapNumber: public HeapObject {
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
+ static const int kMantissaBits = 52;
+ static const int KExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
@@ -2324,6 +2327,16 @@ class JSFunctionResultCache: public FixedArray {
static const int kEntriesIndex = kDummyIndex + 1;
static const int kEntrySize = 2; // key + value
+
+ inline void MakeZeroSize();
+ inline void Clear();
+
+ // Casting
+ static inline JSFunctionResultCache* cast(Object* obj);
+
+#ifdef DEBUG
+ void JSFunctionResultCacheVerify();
+#endif
};
@@ -2854,6 +2867,12 @@ class Map: public HeapObject {
inline void set_non_instance_prototype(bool value);
inline bool has_non_instance_prototype();
+ // Tells whether function has special prototype property. If not, prototype
+ // property will not be created when accessed (will return undefined),
+ // and construction from this function will not be allowed.
+ inline void set_function_with_prototype(bool value);
+ inline bool function_with_prototype();
+
// Tells whether the instance with this map should be ignored by the
// __proto__ accessor.
inline void set_is_hidden_prototype() {
@@ -3030,6 +3049,7 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kIsExtensible = 0;
+ static const int kFunctionWithPrototype = 1;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -3396,6 +3416,11 @@ class JSFunction: public JSObject {
Object* SetInstancePrototype(Object* value);
Object* SetPrototype(Object* value);
+ // After prototype is removed, it will not be created when accessed, and
+ // [[Construct]] from this function will not be allowed.
+ Object* RemovePrototype();
+ inline bool should_have_prototype();
+
// Accessor for this function's initial map's [[class]]
// property. This is primarily used by ECMA native functions. This
// method sets the class_name field of this function's initial map
@@ -4063,7 +4088,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHashFieldOffset = kLengthOffset + kIntSize;
+ static const int kHashFieldOffset = kLengthOffset + kPointerSize;
static const int kSize = kHashFieldOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 1442b98d6..823889ace 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -1450,6 +1450,18 @@ static Object* Runtime_FunctionSetName(Arguments args) {
}
+static Object* Runtime_FunctionRemovePrototype(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ Object* obj = f->RemovePrototype();
+ if (obj->IsFailure()) return obj;
+
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_FunctionGetScript(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -1523,6 +1535,7 @@ static Object* Runtime_FunctionSetPrototype(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, fun, args[0]);
+ ASSERT(fun->should_have_prototype());
Object* obj = Accessors::FunctionSetPrototype(fun, args[1], NULL);
if (obj->IsFailure()) return obj;
return args[0]; // return TOS
@@ -4212,7 +4225,7 @@ static Object* Runtime_GetLocalPropertyNames(Arguments args) {
int length = LocalPrototypeChainLength(*obj);
// Find the number of local properties for each of the objects.
- int* local_property_count = NewArray<int>(length);
+ ScopedVector<int> local_property_count(length);
int total_property_count = 0;
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
@@ -4265,7 +4278,6 @@ static Object* Runtime_GetLocalPropertyNames(Arguments args) {
}
}
- DeleteArray(local_property_count);
return *Factory::NewJSArrayWithElements(names);
}
@@ -5399,7 +5411,7 @@ static Object* Runtime_NumberDiv(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
- return Heap::NewNumberFromDouble(x / y);
+ return Heap::NumberFromDouble(x / y);
}
@@ -5411,8 +5423,8 @@ static Object* Runtime_NumberMod(Arguments args) {
CONVERT_DOUBLE_CHECKED(y, args[1]);
x = modulo(x, y);
- // NewNumberFromDouble may return a Smi instead of a Number object
- return Heap::NewNumberFromDouble(x);
+ // NumberFromDouble may return a Smi instead of a Number object
+ return Heap::NumberFromDouble(x);
}
@@ -6066,7 +6078,8 @@ static Object* Runtime_RoundNumber(Arguments args) {
if (sign && value >= -0.5) return Heap::minus_zero_value();
- return Heap::NumberFromDouble(floor(value + 0.5));
+ // Do not call NumberFromDouble() to avoid extra checks.
+ return Heap::AllocateHeapNumber(floor(value + 0.5));
}
@@ -6541,6 +6554,16 @@ static Object* Runtime_NewObject(Arguments args) {
}
Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
+
+ // If function should not have prototype, construction is not allowed. In this
+ // case generated code bailouts here, since function has no initial_map.
+ if (!function->should_have_prototype()) {
+ Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
+ Handle<Object> type_error =
+ Factory::NewTypeError("not_constructor", arguments);
+ return Top::Throw(*type_error);
+ }
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into constructors if step into is active.
if (Debug::StepInActive()) {
@@ -7749,6 +7772,32 @@ static Object* Runtime_EstimateNumberOfElements(Arguments args) {
}
+static Object* Runtime_SwapElements(Arguments args) {
+ HandleScope handle_scope;
+
+ ASSERT_EQ(3, args.length());
+
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ Handle<Object> key1 = args.at<Object>(1);
+ Handle<Object> key2 = args.at<Object>(2);
+
+ uint32_t index1, index2;
+ if (!Array::IndexFromObject(*key1, &index1)
+ || !Array::IndexFromObject(*key2, &index2)) {
+ return Top::ThrowIllegalOperation();
+ }
+
+ Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
+ Handle<Object> tmp1 = GetElement(jsobject, index1);
+ Handle<Object> tmp2 = GetElement(jsobject, index2);
+
+ SetElement(jsobject, index1, tmp2);
+ SetElement(jsobject, index2, tmp1);
+
+ return Heap::undefined_value();
+}
+
+
// Returns an array that tells you where in the [0, length) interval an array
// might have elements. Can either return keys or intervals. Keys can have
// gaps in (undefined). Intervals can also span over some undefined keys.
@@ -9674,38 +9723,30 @@ static Object* Runtime_LiveEditGatherCompileInfo(Arguments args) {
return result;
}
-// Changes the source of the script to a new_source and creates a new
-// script representing the old version of the script source.
+// Changes the source of the script to a new_source.
+// If old_script_name is provided (i.e. is a String), also creates a copy of
+// the script with its original source and sends notification to debugger.
static Object* Runtime_LiveEditReplaceScript(Arguments args) {
ASSERT(args.length() == 3);
HandleScope scope;
CONVERT_CHECKED(JSValue, original_script_value, args[0]);
CONVERT_ARG_CHECKED(String, new_source, 1);
- CONVERT_ARG_CHECKED(String, old_script_name, 2);
- Handle<Script> original_script =
- Handle<Script>(Script::cast(original_script_value->value()));
-
- Handle<String> original_source(String::cast(original_script->source()));
+ Handle<Object> old_script_name(args[2]);
- original_script->set_source(*new_source);
- Handle<Script> old_script = Factory::NewScript(original_source);
- old_script->set_name(*old_script_name);
- old_script->set_line_offset(original_script->line_offset());
- old_script->set_column_offset(original_script->column_offset());
- old_script->set_data(original_script->data());
- old_script->set_type(original_script->type());
- old_script->set_context_data(original_script->context_data());
- old_script->set_compilation_type(original_script->compilation_type());
- old_script->set_eval_from_shared(original_script->eval_from_shared());
- old_script->set_eval_from_instructions_offset(
- original_script->eval_from_instructions_offset());
+ CONVERT_CHECKED(Script, original_script_pointer,
+ original_script_value->value());
+ Handle<Script> original_script(original_script_pointer);
- // Drop line ends so that they will be recalculated.
- original_script->set_line_ends(Heap::undefined_value());
+ Object* old_script = LiveEdit::ChangeScriptSource(original_script,
+ new_source,
+ old_script_name);
- Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
-
- return *(GetScriptWrapper(old_script));
+ if (old_script->IsScript()) {
+ Handle<Script> script_handle(Script::cast(old_script));
+ return *(GetScriptWrapper(script_handle));
+ } else {
+ return Heap::null_value();
+ }
}
// Replaces code of SharedFunctionInfo with a new one.
@@ -9715,41 +9756,62 @@ static Object* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
- LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
-
- return Heap::undefined_value();
+ return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
}
// Connects SharedFunctionInfo to another script.
-static Object* Runtime_LiveEditRelinkFunctionToScript(Arguments args) {
+static Object* Runtime_LiveEditFunctionSetScript(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
- CONVERT_ARG_CHECKED(JSArray, shared_info_array, 0);
- CONVERT_ARG_CHECKED(JSValue, script_value, 1);
- Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
+ Handle<Object> function_object(args[0]);
+ Handle<Object> script_object(args[1]);
+
+ if (function_object->IsJSValue()) {
+ Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
+ if (script_object->IsJSValue()) {
+ CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
+ script_object = Handle<Object>(script);
+ }
+
+ LiveEdit::SetFunctionScript(function_wrapper, script_object);
+ } else {
+ // Just ignore this. We may not have a SharedFunctionInfo for some functions
+ // and we check it in this function.
+ }
+
+ return Heap::undefined_value();
+}
+
+
+// In a code of a parent function replaces original function as embedded object
+// with a substitution one.
+static Object* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
+ ASSERT(args.length() == 3);
+ HandleScope scope;
- LiveEdit::RelinkFunctionToScript(shared_info_array, script);
+ CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
+ CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
+ CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2);
+
+ LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
+ subst_wrapper);
return Heap::undefined_value();
}
+
// Updates positions of a shared function info (first parameter) according
// to script source change. Text change is described in second parameter as
// array of groups of 3 numbers:
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
-// Returns an array of pairs (new source position, breakpoint_object/array)
-// so that JS side could update positions in breakpoint objects.
static Object* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
- Handle<Object> result =
- LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
-
- return *result;
+ return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 07f51e4be..a7f0bf37b 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -184,6 +184,7 @@ namespace internal {
F(FunctionSetPrototype, 2, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
+ F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScript, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
@@ -232,6 +233,7 @@ namespace internal {
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
+ F(SwapElements, 3, 1) \
\
/* Getters and Setters */ \
F(DefineAccessor, -1 /* 4 or 5 */, 1) \
@@ -337,7 +339,8 @@ namespace internal {
F(LiveEditGatherCompileInfo, 2, 1) \
F(LiveEditReplaceScript, 3, 1) \
F(LiveEditReplaceFunctionCode, 2, 1) \
- F(LiveEditRelinkFunctionToScript, 2, 1) \
+ F(LiveEditFunctionSetScript, 2, 1) \
+ F(LiveEditReplaceRefToNestedFunction, 3, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckAndDropActivations, 2, 1) \
F(LiveEditCompareStringsLinewise, 2, 1) \
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 193001988..be93c4feb 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -471,17 +471,6 @@ function TO_STRING() {
}
-// Specialized version of String.charAt. It assumes string as
-// the receiver type and that the index is a number.
-function STRING_CHAR_AT(pos) {
- var char_code = %_FastCharCodeAt(this, pos);
- if (!%_IsSmi(char_code)) {
- return %StringCharAt(this, pos);
- }
- return %_CharFromCode(char_code);
-}
-
-
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index a3aaf5eb1..f3532533e 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1164,4 +1164,71 @@ Object* ConstructStubCompiler::GetCode() {
}
+CallOptimization::CallOptimization(LookupResult* lookup) {
+ if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+ lookup->type() != CONSTANT_FUNCTION) {
+ Initialize(NULL);
+ } else {
+ // We only optimize constant function calls.
+ Initialize(lookup->GetConstantFunction());
+ }
+}
+
+CallOptimization::CallOptimization(JSFunction* function) {
+ Initialize(function);
+}
+
+
+int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
+ JSObject* holder) const {
+ ASSERT(is_simple_api_call_);
+ if (expected_receiver_type_ == NULL) return 0;
+ int depth = 0;
+ while (object != holder) {
+ if (object->IsInstanceOf(expected_receiver_type_)) return depth;
+ object = JSObject::cast(object->GetPrototype());
+ ++depth;
+ }
+ if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+ return kInvalidProtoDepth;
+}
+
+
+void CallOptimization::Initialize(JSFunction* function) {
+ constant_function_ = NULL;
+ is_simple_api_call_ = false;
+ expected_receiver_type_ = NULL;
+ api_call_info_ = NULL;
+
+ if (function == NULL || !function->is_compiled()) return;
+
+ constant_function_ = function;
+ AnalyzePossibleApiFunction(function);
+}
+
+
+void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
+ SharedFunctionInfo* sfi = function->shared();
+ if (!sfi->IsApiFunction()) return;
+ FunctionTemplateInfo* info = sfi->get_api_func_data();
+
+ // Require a C++ callback.
+ if (info->call_code()->IsUndefined()) return;
+ api_call_info_ = CallHandlerInfo::cast(info->call_code());
+
+ // Accept signatures that either have no restrictions at all or
+ // only have restrictions on the receiver.
+ if (!info->signature()->IsUndefined()) {
+ SignatureInfo* signature = SignatureInfo::cast(info->signature());
+ if (!signature->args()->IsUndefined()) return;
+ if (!signature->receiver()->IsUndefined()) {
+ expected_receiver_type_ =
+ FunctionTemplateInfo::cast(signature->receiver());
+ }
+ }
+
+ is_simple_api_call_ = true;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 57442ffc7..2e0faf6a8 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -615,6 +615,55 @@ class ConstructStubCompiler: public StubCompiler {
};
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+ explicit CallOptimization(LookupResult* lookup);
+
+ explicit CallOptimization(JSFunction* function);
+
+ bool is_constant_call() const {
+ return constant_function_ != NULL;
+ }
+
+ JSFunction* constant_function() const {
+ ASSERT(constant_function_ != NULL);
+ return constant_function_;
+ }
+
+ bool is_simple_api_call() const {
+ return is_simple_api_call_;
+ }
+
+ FunctionTemplateInfo* expected_receiver_type() const {
+ ASSERT(is_simple_api_call_);
+ return expected_receiver_type_;
+ }
+
+ CallHandlerInfo* api_call_info() const {
+ ASSERT(is_simple_api_call_);
+ return api_call_info_;
+ }
+
+ // Returns the depth of the object having the expected type in the
+ // prototype chain between the two arguments.
+ int GetPrototypeDepthOfExpectedType(JSObject* object,
+ JSObject* holder) const;
+
+ private:
+ void Initialize(JSFunction* function);
+
+ // Determines whether the given function can be called using the
+ // fast api call builtin.
+ void AnalyzePossibleApiFunction(JSFunction* function);
+
+ JSFunction* constant_function_;
+ bool is_simple_api_call_;
+ FunctionTemplateInfo* expected_receiver_type_;
+ CallHandlerInfo* api_call_info_;
+};
+
+
typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler,
Object* object,
JSObject* holder,
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 86d3ad87c..fd86ddaa8 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -54,6 +54,7 @@ function InstallFunctions(object, attributes, functions) {
var key = functions[i];
var f = functions[i + 1];
%FunctionSetName(f, key);
+ %FunctionRemovePrototype(f);
%SetProperty(object, key, f, attributes);
}
%ToFastProperties(object);
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index a5676c5df..7563c6934 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 6
+#define BUILD_NUMBER 8
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 9c7f9b618..c9ab6275f 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -89,6 +89,11 @@ void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
}
+void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
void Assembler::emit_rex_64(Register reg, const Operand& op) {
emit(0x48 | reg.high_bit() << 2 | op.rex_);
}
@@ -160,6 +165,12 @@ void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
}
+void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
void Assembler::emit_optional_rex_32(Register rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 2d660eca8..1c00ebca4 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -116,8 +116,10 @@ void CpuFeatures::Probe() {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code =
- Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>());
if (!code->IsCode()) return;
PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
@@ -2040,6 +2042,14 @@ void Assembler::fldz() {
}
+void Assembler::fldpi() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xEB);
+}
+
+
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2398,6 +2408,53 @@ void Assembler::movd(XMMRegister dst, Register src) {
}
+void Assembler::movd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movq(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movq(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x7E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+ ASSERT(is_uint2(imm8));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x17);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2546,12 +2603,23 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
- emit(0x0f);
+ emit(0x0F);
emit(0x57);
emit_sse_operand(dst, src);
}
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2588,6 +2656,10 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
+ emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
+}
+
// Relocation information implementations.
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 842b61194..d0778658b 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1019,6 +1019,7 @@ class Assembler : public Malloced {
void fld1();
void fldz();
+ void fldpi();
void fld_s(const Operand& adr);
void fld_d(const Operand& adr);
@@ -1080,6 +1081,10 @@ class Assembler : public Malloced {
// SSE2 instructions
void movd(XMMRegister dst, Register src);
+ void movd(Register dst, XMMRegister src);
+ void movq(XMMRegister dst, Register src);
+ void movq(Register dst, XMMRegister src);
+ void extractps(Register dst, XMMRegister src, byte imm8);
void movsd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
@@ -1101,6 +1106,7 @@ class Assembler : public Malloced {
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void sqrtsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
@@ -1109,6 +1115,7 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
+ void emit_sse_operand(Register dst, XMMRegister src);
// Use either movsd or movlpd.
// void movdbl(XMMRegister dst, const Operand& src);
@@ -1175,8 +1182,9 @@ class Assembler : public Malloced {
// the top bit of both register codes.
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is set.
- inline void emit_rex_64(Register reg, Register rm_reg);
inline void emit_rex_64(XMMRegister reg, Register rm_reg);
+ inline void emit_rex_64(Register reg, XMMRegister rm_reg);
+ inline void emit_rex_64(Register reg, Register rm_reg);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
@@ -1234,9 +1242,13 @@ class Assembler : public Malloced {
inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
// As for emit_optional_rex_32(Register, Register), except that
- // the registers are XMM registers.
+ // one of the registers is an XMM registers.
inline void emit_optional_rex_32(XMMRegister reg, Register base);
+ // As for emit_optional_rex_32(Register, Register), except that
+ // one of the registers is an XMM registers.
+ inline void emit_optional_rex_32(Register reg, XMMRegister base);
+
// As for emit_optional_rex_32(Register, const Operand&), except that
// the register is an XMM register.
inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 5036637a4..39f543df5 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -191,6 +191,34 @@ class DeferredInlineSmiOperation: public DeferredCode {
};
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiOperationReversed(Token::Value op,
+ Register dst,
+ Smi* value,
+ Register src,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ value_(value),
+ src_(src),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperationReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Smi* value_;
+ Register src_;
+ OverwriteMode overwrite_mode_;
+};
+
+
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading a floating point value. Input value must
@@ -2682,8 +2710,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.GetValue();
}
Load(node->value());
- GenericBinaryOperation(node->binary_op(),
- node->type(),
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
@@ -3531,9 +3560,16 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
overwrite_mode = OVERWRITE_RIGHT;
}
- Load(node->left());
- Load(node->right());
- GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+ if (node->left()->IsTrivial()) {
+ Load(node->right());
+ Result right = frame_->Pop();
+ frame_->Push(node->left());
+ frame_->Push(&right);
+ } else {
+ Load(node->left());
+ Load(node->right());
+ }
+ GenericBinaryOperation(node, overwrite_mode);
}
}
@@ -3863,43 +3899,11 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
- Label slow_case;
- Label end;
- Label not_a_flat_string;
- Label try_again_with_new_string;
- Label ascii_string;
- Label got_char_code;
-
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
- // Get register rcx to use as shift amount later.
- Result shift_amount;
- if (object.is_register() && object.reg().is(rcx)) {
- Result fresh = allocator_->Allocate();
- shift_amount = object;
- object = fresh;
- __ movq(object.reg(), rcx);
- }
- if (index.is_register() && index.reg().is(rcx)) {
- Result fresh = allocator_->Allocate();
- shift_amount = index;
- index = fresh;
- __ movq(index.reg(), rcx);
- }
- // There could be references to ecx in the frame. Allocating will
- // spill them, otherwise spill explicitly.
- if (shift_amount.is_valid()) {
- frame_->Spill(rcx);
- } else {
- shift_amount = allocator()->Allocate(rcx);
- }
- ASSERT(shift_amount.is_register());
- ASSERT(shift_amount.reg().is(rcx));
- ASSERT(allocator_->count(rcx) == 1);
-
// We will mutate the index register and possibly the object register.
// The case where they are somehow the same register is handled
// because we only mutate them in the case where the receiver is a
@@ -3909,89 +3913,34 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
frame_->Spill(object.reg());
frame_->Spill(index.reg());
- // We need a single extra temporary register.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
+ // We need two extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
// There is no virtual frame effect from here up to the final result
// push.
-
- // If the receiver is a smi trigger the slow case.
- __ JumpIfSmi(object.reg(), &slow_case);
-
- // If the index is negative or non-smi trigger the slow case.
- __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
-
- // Untag the index.
- __ SmiToInteger32(index.reg(), index.reg());
-
- __ bind(&try_again_with_new_string);
- // Fetch the instance type of the receiver into rcx.
- __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
- __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the slow case.
- __ testb(rcx, Immediate(kIsNotStringMask));
- __ j(not_zero, &slow_case);
-
- // Check for index out of range.
- __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ j(greater_equal, &slow_case);
- // Reload the instance type (into the temp register this time)..
- __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
- __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-
- // We need special handling for non-flat strings.
- ASSERT_EQ(0, kSeqStringTag);
- __ testb(temp.reg(), Immediate(kStringRepresentationMask));
- __ j(not_zero, &not_a_flat_string);
- // Check for 1-byte or 2-byte string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ testb(temp.reg(), Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the temp register.
- __ movzxwl(temp.reg(), FieldOperand(object.reg(),
- index.reg(),
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- __ bind(&ascii_string);
- // Load the byte into the temp register.
- __ movzxbl(temp.reg(), FieldOperand(object.reg(),
- index.reg(),
- times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ Integer32ToSmi(temp.reg(), temp.reg());
- __ jmp(&end);
-
- // Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(temp.reg(), Immediate(kStringRepresentationMask));
- __ cmpb(temp.reg(), Immediate(kConsStringTag));
- __ j(not_equal, &slow_case);
-
- // ConsString.
- // Check that the right hand side is the empty string (ie if this is really a
- // flat string in a cons string). If that is not the case we would rather go
- // to the runtime system now, to flatten the string.
- __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
- __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
- __ j(not_equal, &slow_case);
- // Get the first of the two strings.
- __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
- __ jmp(&try_again_with_new_string);
+ Label slow_case;
+ Label exit;
+ StringHelper::GenerateFastCharCodeAt(masm_,
+ object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg(),
+ &slow_case,
+ &slow_case,
+ &slow_case,
+ &slow_case);
+ __ jmp(&exit);
__ bind(&slow_case);
// Move the undefined value into the result register, which will
// trigger the slow case.
- __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(result.reg(), Heap::kUndefinedValueRootIndex);
- __ bind(&end);
- frame_->Push(&temp);
+ __ bind(&exit);
+ frame_->Push(&result);
}
@@ -4000,41 +3949,25 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
+
Result code = frame_->Pop();
code.ToRegister();
ASSERT(code.is_valid());
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
-
- JumpTarget slow_case;
- JumpTarget exit;
-
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- Condition is_smi = __ CheckSmi(code.reg());
- slow_case.Branch(NegateCondition(is_smi), &code, not_taken);
-
- __ SmiToInteger32(kScratchRegister, code.reg());
- __ cmpl(kScratchRegister, Immediate(String::kMaxAsciiCharCode));
- slow_case.Branch(above, &code, not_taken);
-
- __ Move(temp.reg(), Factory::single_character_string_cache());
- __ movq(temp.reg(), FieldOperand(temp.reg(),
- kScratchRegister, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
- slow_case.Branch(equal, &code, not_taken);
- code.Unuse();
+ // StringHelper::GenerateCharFromCode may do a runtime call.
+ frame_->SpillAll();
- frame_->Push(&temp);
- exit.Jump();
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
- slow_case.Bind(&code);
- frame_->Push(&code);
- Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
+ StringHelper::GenerateCharFromCode(masm_,
+ code.reg(),
+ result.reg(),
+ scratch.reg(),
+ CALL_FUNCTION);
frame_->Push(&result);
-
- exit.Bind();
}
@@ -4050,22 +3983,230 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-// Generates the Math.pow method - currently just calls runtime.
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->Push(&res);
+
+ Label allocate_return;
+ // Load the two operands while leaving the values on the frame.
+ frame()->Dup();
+ Result exponent = frame()->Pop();
+ exponent.ToRegister();
+ frame()->Spill(exponent.reg());
+ frame()->PushElementAt(1);
+ Result base = frame()->Pop();
+ base.ToRegister();
+ frame()->Spill(base.reg());
+
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ ASSERT(!exponent.reg().is(base.reg()));
+ JumpTarget call_runtime;
+
+ // Save 1 in xmm3 - we need this several times later on.
+ __ movl(answer.reg(), Immediate(1));
+ __ cvtlsi2sd(xmm3, answer.reg());
+
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
+ __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+
+ // Optimized version when y is an integer.
+ Label powi;
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&powi);
+ // exponent is smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // Optimized version of pow if y is an integer.
+ __ bind(&powi);
+ __ SmiToInteger32(exponent.reg(), exponent.reg());
+
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ movl(base.reg(), exponent.reg());
+
+ // Get absolute value of exponent.
+ Label no_neg;
+ __ cmpl(exponent.reg(), Immediate(0));
+ __ j(greater_equal, &no_neg);
+ __ negl(exponent.reg());
+ __ bind(&no_neg);
+
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
+
+ __ bind(&while_true);
+ __ shrl(exponent.reg(), Immediate(1));
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ testl(exponent.reg(), exponent.reg());
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
+
+ // x has the original value of y - if y is negative return 1/result.
+ __ testl(base.reg(), base.reg());
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ movl(answer.reg(), Immediate(0x7FB00000));
+ __ movd(xmm0, answer.reg());
+ __ cvtss2sd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ call_runtime.Branch(equal);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
+
+ // exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ call_runtime.Branch(parity_even);
+
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ JumpIfNotSmi(base.reg(), &base_not_smi);
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&handle_special_cases);
+ __ bind(&base_not_smi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+ __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ call_runtime.Branch(greater_equal);
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ Label not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ movl(answer.reg(), Immediate(0xBF000000));
+ __ movd(xmm2, answer.reg());
+ __ cvtss2sd(xmm2, xmm2);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, &not_minus_half);
+
+ // Calculates reciprocal of square root.
+ // Note that 1/sqrt(x) = sqrt(1/x))
+ __ divsd(xmm3, xmm0);
+ __ movsd(xmm1, xmm3);
+ __ sqrtsd(xmm1, xmm1);
+ __ jmp(&allocate_return);
+
+ // Test for 0.5.
+ __ bind(&not_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ comisd(xmm2, xmm1);
+ call_runtime.Branch(not_equal);
+
+ // Calculates square root.
+ __ movsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
+
+ JumpTarget done;
+ Label failure, success;
+ __ bind(&allocate_return);
+ // Make a copy of the frame to enable us to handle allocation
+ // failure after the JumpTarget jump.
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
+ __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+ // Remove the two original values from the frame - we only need those
+ // in the case where we branch to runtime.
+ frame()->Drop(2);
+ exponent.Unuse();
+ base.Unuse();
+ done.Jump(&answer);
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ // If we experience an allocation failure we branch to runtime.
+ __ bind(&failure);
+ call_runtime.Bind();
+ answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+
+ done.Bind(&answer);
+ frame()->Push(&answer);
}
-// Generates the Math.sqrt method - currently just calls runtime.
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- Result res = frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- frame_->Push(&res);
+
+ // Leave original value on the frame if we need to call runtime.
+ frame()->Dup();
+ Result result = frame()->Pop();
+ result.ToRegister();
+ frame()->Spill(result.reg());
+ Label runtime;
+ Label non_smi;
+ Label load_done;
+ JumpTarget end;
+
+ __ JumpIfNotSmi(result.reg(), &non_smi);
+ __ SmiToInteger32(result.reg(), result.reg());
+ __ cvtlsi2sd(xmm0, result.reg());
+ __ jmp(&load_done);
+ __ bind(&non_smi);
+ __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &runtime);
+ __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+
+ __ bind(&load_done);
+ __ sqrtsd(xmm0, xmm0);
+ // A copy of the virtual frame to allow us to go to runtime after the
+ // JumpTarget jump.
+ Result scratch = allocator()->Allocate();
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
+
+ __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+ frame()->Drop(1);
+ scratch.Unuse();
+ end.Jump(&result);
+ // We only branch to runtime if we have an allocation error.
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ __ bind(&runtime);
+ result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+
+ end.Bind(&result);
+ frame()->Push(&result);
}
@@ -4372,6 +4513,115 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result index2 = frame_->Pop();
+ index2.ToRegister();
+
+ Result index1 = frame_->Pop();
+ index1.ToRegister();
+
+ Result object = frame_->Pop();
+ object.ToRegister();
+
+ Result tmp1 = allocator()->Allocate();
+ tmp1.ToRegister();
+ Result tmp2 = allocator()->Allocate();
+ tmp2.ToRegister();
+
+ frame_->Spill(object.reg());
+ frame_->Spill(index1.reg());
+ frame_->Spill(index2.reg());
+
+ DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+ index1.reg(),
+ index2.reg());
+
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+ deferred->Branch(below);
+ __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+ Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(not_zero);
+
+ // Check the object's elements are in fast case.
+ __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ deferred->Branch(not_equal);
+
+ // Check that both indices are smis.
+ Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
+ deferred->Branch(NegateCondition(both_smi));
+
+ // Bring addresses into index1 and index2.
+ __ SmiToInteger32(index1.reg(), index1.reg());
+ __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+ index1.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(index2.reg(), index2.reg());
+ __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+ index2.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Swap elements.
+ __ movq(object.reg(), Operand(index1.reg(), 0));
+ __ movq(tmp2.reg(), Operand(index2.reg(), 0));
+ __ movq(Operand(index2.reg(), 0), object.reg());
+ __ movq(Operand(index1.reg(), 0), tmp2.reg());
+
+ Label done;
+ __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ movq(tmp2.reg(), tmp1.reg());
+ RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+ __ CallStub(&recordWrite2);
+
+ __ bind(&done);
+
+ deferred->BindExit();
+ frame_->Push(Factory::undefined_value());
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
@@ -4390,19 +4640,19 @@ void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
- frame_->Push(&answer);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and jump to the runtime.
Load(args->at(0));
- Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
- frame_->Push(&answer);
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
}
@@ -5602,7 +5852,7 @@ void CodeGenerator::Comparison(AstNode* node,
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
- __ SmiCompare(result.reg(), Smi::FromInt(0));
+ __ testq(result.reg(), result.reg());
result.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
@@ -5616,8 +5866,8 @@ void CodeGenerator::Comparison(AstNode* node,
// Test string equality and comparison.
if (cc == equal) {
Label comparison_done;
- __ cmpl(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(1));
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
__ j(not_equal, &comparison_done);
uint8_t char_value =
static_cast<uint8_t>(String::cast(*right_val)->Get(0));
@@ -5625,9 +5875,9 @@ void CodeGenerator::Comparison(AstNode* node,
Immediate(char_value));
__ bind(&comparison_done);
} else {
- __ movl(temp2.reg(),
+ __ movq(temp2.reg(),
FieldOperand(left_side.reg(), String::kLengthOffset));
- __ subl(temp2.reg(), Immediate(1));
+ __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
Label comparison;
// If the length is 0 then the subtraction gave -1 which compares less
// than any character.
@@ -5645,8 +5895,8 @@ void CodeGenerator::Comparison(AstNode* node,
__ j(not_equal, &characters_were_different);
// If the first character is the same then the long string sorts after
// the short one.
- __ cmpl(FieldOperand(left_side.reg(), String::kLengthOffset),
- Immediate(1));
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
__ bind(&characters_were_different);
}
temp2.Unuse();
@@ -5699,13 +5949,9 @@ void CodeGenerator::Comparison(AstNode* node,
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
- // Call the compare stub.
- // TODO(whesse@chromium.org): Enable the inlining flag once
- // GenerateInlineNumberComparison is implemented.
- CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- // The result is a Smi, which is negative, zero, or positive.
- __ SmiTest(answer.reg()); // Sets both zero and sign flag.
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
answer.Unuse();
dest->Split(cc);
} else {
@@ -5733,12 +5979,9 @@ void CodeGenerator::Comparison(AstNode* node,
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
- // Call the compare stub.
- // TODO(whesse@chromium.org): Enable the inlining flag once
- // GenerateInlineNumberComparison is implemented.
- CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ SmiTest(answer.reg()); // Sets both zero and sign flags.
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
answer.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
@@ -5755,14 +5998,70 @@ void CodeGenerator::Comparison(AstNode* node,
}
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+ Result* operand,
+ XMMRegister xmm_reg,
+ Result* left_side,
+ Result* right_side,
+ JumpTarget* not_numbers) {
+ Label done;
+ if (operand->type_info().IsDouble()) {
+ // Operand is known to be a heap number, just load it.
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ } else if (operand->type_info().IsSmi()) {
+ // Operand is known to be a smi. Convert it to double and keep the original
+ // smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ } else {
+ // Operand type not known, check for smi or heap number.
+ Label smi;
+ __ JumpIfSmi(operand->reg(), &smi);
+ if (!operand->type_info().IsNumber()) {
+ __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ not_numbers->Branch(not_equal, left_side, right_side, taken);
+ }
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&smi);
+ // Comvert smi to float and keep the original smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ __ jmp(&done);
+ }
+ __ bind(&done);
+}
+
+
void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
ControlDestination* dest) {
ASSERT(left_side->is_register());
ASSERT(right_side->is_register());
- // TODO(whesse@chromium.org): Implement this function, and enable the
- // corresponding flags in the CompareStub.
+
+ JumpTarget not_numbers;
+ // Load left and right operand into registers xmm0 and xmm1 and compare.
+ LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
+ &not_numbers);
+ LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
+ &not_numbers);
+ __ comisd(xmm0, xmm1);
+ // Bail out if a NaN is involved.
+ not_numbers.Branch(parity_even, left_side, right_side);
+
+ // Split to destination targets based on comparison.
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+
+ not_numbers.Bind(left_side, right_side);
}
@@ -5789,9 +6088,72 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() {
+ Label done;
+ if ((op_ == Token::ADD)
+ || (op_ ==Token::SUB)
+ || (op_ == Token::MUL)
+ || (op_ == Token::DIV)) {
+ Label call_runtime;
+ Label left_smi, right_smi, load_right, do_op;
+ __ JumpIfSmi(left_, &left_smi);
+ __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ movq(dst_, left_);
+ }
+ __ jmp(&load_right);
+
+ __ bind(&left_smi);
+ __ SmiToInteger32(left_, left_);
+ __ cvtlsi2sd(xmm0, left_);
+ __ Integer32ToSmi(left_, left_);
+ if (mode_ == OVERWRITE_LEFT) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&load_right);
+ __ JumpIfSmi(right_, &right_smi);
+ __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ movq(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+ __ jmp(&do_op);
+
+ __ bind(&right_smi);
+ __ SmiToInteger32(right_, right_);
+ __ cvtlsi2sd(xmm1, right_);
+ __ Integer32ToSmi(right_, right_);
+ if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&do_op);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+ __ jmp(&done);
+
+ __ bind(&call_runtime);
+ }
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ bind(&done);
}
@@ -5846,10 +6208,10 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
}
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- StaticType* type,
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
Comment cmnt_token(masm_, Token::String(op));
if (op == Token::COMMA) {
@@ -5875,8 +6237,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result answer;
if (left_is_string) {
if (right_is_string) {
- // TODO(lrn): if both are constant strings
- // -- do a compile time cons, if allocation during codegen is allowed.
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
answer = frame_->CallStub(&stub, 2);
} else {
@@ -5915,25 +6275,29 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result answer;
if (left_is_non_smi_constant || right_is_non_smi_constant) {
+ // Go straight to the slow case, with no smi code.
GenericBinaryOpStub stub(op,
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+ false, overwrite_mode);
} else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+ true, overwrite_mode);
} else {
// Set the flags based on the operation, type and loop nesting level.
// Bit operations always assume they likely operate on Smis. Still only
// generate the inline Smi check code if this operation is part of a loop.
// For all other operations only inline the Smi check code for likely smis
// if the operation is part of a loop.
- if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ if (loop_nesting() > 0 &&
+ (Token::IsBitOp(op) ||
+ operands_type.IsInteger32() ||
+ expr->type()->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
} else {
GenericBinaryOpStub stub(op,
overwrite_mode,
@@ -6025,26 +6389,32 @@ void DeferredInlineSmiOperation::Generate() {
}
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+void DeferredInlineSmiOperationReversed::Generate() {
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, value_, src_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> value,
- StaticType* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
-
- // TODO(199): Optimize some special cases of operations involving a
- // smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
overwrite_mode);
} else {
- return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
overwrite_mode);
}
}
@@ -6053,6 +6423,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Token::Value op = expr->op();
Result answer;
switch (op) {
case Token::ADD: {
@@ -6081,7 +6452,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SUB: {
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
overwrite_mode);
} else {
operand->ToRegister();
@@ -6104,7 +6475,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
@@ -6130,7 +6501,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
@@ -6157,9 +6528,45 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SHL:
if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ // Move operand into rcx and also into a second register.
+ // If operand is already in a register, take advantage of that.
+ // This lets us modify rcx, but still bail out to deferred code.
+ Result right;
+ Result right_copy_in_rcx;
+ TypeInfo right_type_info = operand->type_info();
+ operand->ToRegister();
+ if (operand->reg().is(rcx)) {
+ right = allocator()->Allocate();
+ __ movq(right.reg(), rcx);
+ frame_->Spill(rcx);
+ right_copy_in_rcx = *operand;
+ } else {
+ right_copy_in_rcx = allocator()->Allocate(rcx);
+ __ movq(rcx, operand->reg());
+ right = *operand;
+ }
+ operand->Unuse();
+
+ answer = allocator()->Allocate();
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ right.reg(),
+ overwrite_mode);
+ __ movq(answer.reg(), Immediate(int_value));
+ __ SmiToInteger32(rcx, rcx);
+ if (!right_type_info.IsSmi()) {
+ Condition is_smi = masm_->CheckSmi(right.reg());
+ deferred->Branch(NegateCondition(is_smi));
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right.reg(),
+ "Static type info claims non-smi is smi in (const SHL smi).");
+ }
+ __ shl_cl(answer.reg());
+ __ Integer32ToSmi(answer.reg(), answer.reg());
+
+ deferred->BindExit();
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -6265,10 +6672,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
overwrite_mode);
} else {
- answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
overwrite_mode);
}
break;
@@ -6278,10 +6685,11 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
return answer;
}
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
OverwriteMode overwrite_mode) {
+ Token::Value op = expr->op();
Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
@@ -6842,6 +7250,8 @@ void Reference::SetValue(InitState init_state) {
Result tmp = cgen_->allocator_->Allocate();
ASSERT(tmp.is_valid());
+ Result tmp2 = cgen_->allocator_->Allocate();
+ ASSERT(tmp2.is_valid());
// Determine whether the value is a constant before putting it
// in a register.
@@ -6857,32 +7267,42 @@ void Reference::SetValue(InitState init_state) {
key.reg(),
receiver.reg());
- // Check that the value is a smi if it is not a constant.
- // We can skip the write barrier for smis and constants.
- if (!value_is_constant) {
- __ JumpIfNotSmi(value.reg(), deferred->entry_label());
- }
-
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
// Check that the receiver is not a smi.
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ // Check that the key is a smi.
+ if (!key.is_smi()) {
+ __ JumpIfNotSmi(key.reg(), deferred->entry_label());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
+ }
+
// Check that the receiver is a JSArray.
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the
- // length of the JSArray are smis.
+ // length of the JSArray are smis. Use unsigned comparison to handle
+ // negative keys.
__ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
key.reg());
- deferred->Branch(less_equal);
+ deferred->Branch(below_equal);
// Get the elements array from the receiver and check that it
// is a flat array (not a dictionary).
__ movq(tmp.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+
+ // Check whether it is possible to omit the write barrier. If the
+ // elements array is in new space or the value written is a smi we can
+ // safely update the elements array without updating the remembered set.
+ Label in_new_space;
+ __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+ if (!value_is_constant) {
+ __ JumpIfNotSmi(value.reg(), deferred->entry_label());
+ }
+
+ __ bind(&in_new_space);
// Bind the deferred code patch site to be able to locate the
// fixed array map comparison. When debugging, we patch this
// comparison to always fail so that we will hit the IC call
@@ -7095,8 +7515,8 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// String value => false iff empty.
__ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
__ j(above_equal, &not_string);
- __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
- __ testl(rdx, rdx);
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rdx);
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -7207,6 +7627,213 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kPointerSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+ __ bind(&loaded);
+ // ST[0] == double value
+ // rbx = bits of double value.
+ // rdx = also bits of double value.
+ // Compute hash (h is 32 bits, bits are 64):
+ // h = h0 = bits ^ (bits >> 32);
+ // h ^= h >> 16;
+ // h ^= h >> 8;
+ // h = h & (cacheSize - 1);
+ // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+ __ sar(rdx, Immediate(32));
+ __ xorl(rdx, rbx);
+ __ movl(rcx, rdx);
+ __ movl(rax, rdx);
+ __ movl(rdi, rdx);
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
+ __ xorl(rcx, rdx);
+ __ xorl(rax, rdi);
+ __ xorl(rcx, rax);
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+ // ST[0] == double value.
+ // rbx = bits of double value.
+ // rcx = TranscendentalCache::hash(double value).
+ __ movq(rax, ExternalReference::transcendental_cache_array_address());
+ // rax points to cache array.
+ __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // rax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ testq(rax, rax);
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ // Two uint_32's and a pointer per element.
+ CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+ CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+ CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ }
+#endif
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+ __ addl(rcx, rcx);
+ __ lea(rcx, Operand(rax, rcx, times_8, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmpq(rbx, Operand(rcx, 0));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ movq(rax, Operand(rcx, 2 * kIntSize));
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ Label nan_result;
+ GenerateOperation(masm, &nan_result);
+ __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+
+ __ bind(&nan_result);
+ __ fstp(0); // Remove argument from FPU stack.
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ ret(kPointerSize);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+ Label* on_nan_result) {
+ // Registers:
+ // rbx: Bits of input double. Must be preserved.
+ // rcx: Pointer to cache entry. Must be preserved.
+ // st(0): Input double
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ movq(rdi, rbx);
+ // Move exponent and sign bits to low bits.
+ __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+ // Remove sign bit.
+ __ andl(rdi, Immediate((1 << HeapNumber::KExponentBits) - 1));
+ int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+ __ cmpl(rdi, Immediate(supported_exponent_limit));
+ __ j(below, &in_range);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmpl(rdi, Immediate(0x7ff));
+ __ j(equal, on_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ // FPU Stack: input % 2*pi, 2*pi,
+ __ fstp(0);
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
// Get the integer part of a heap number. Surprisingly, all this bit twiddling
// is faster than using the built-in instructions on floating point registers.
// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
@@ -7496,17 +8123,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
// Get the length of the string to rbx.
- __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- // rbx: Length of subject string
+ // rbx: Length of subject string as smi
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
__ movq(rax, Operand(rsp, kPreviousIndexOffset));
- __ SmiToInteger32(rax, rax);
- __ cmpl(rax, rbx);
- __ j(above, &runtime);
+ __ JumpIfNotSmi(rax, &runtime);
+ __ SmiCompare(rax, rbx);
+ __ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
@@ -7659,12 +8286,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 3: Start of string data
Label setup_two_byte, setup_rest;
__ testb(rdi, rdi);
- __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
__ j(zero, &setup_two_byte);
+ __ SmiToInteger32(rdi, rdi);
__ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
+ __ SmiToInteger32(rdi, rdi);
__ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
@@ -7876,6 +8505,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ masm->RecordWriteHelper(object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -7883,14 +8518,15 @@ static int NegativeComparisonResult(Condition cc) {
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
-
+ // The compare stub returns a positive, negative, or zero 64-bit integer
+ // value in rax, corresponding to result of comparing the two inputs.
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
+ // Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
__ cmpq(rax, rdx);
@@ -9395,12 +10031,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_floats;
// rax: y
// rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
- __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
- }
+ if (static_operands_type_.IsNumber() && FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
+ __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
} else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
}
@@ -9797,6 +10431,146 @@ const char* CompareStub::GetName() {
}
+void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
+ Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_smi,
+ Label* index_out_of_range,
+ Label* slow_case) {
+ Label not_a_flat_string;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object, receiver_not_string);
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ testb(result, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index, index_not_smi);
+
+ // Check for index out of range.
+ __ SmiCompare(index, FieldOperand(object, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range);
+
+ __ bind(&try_again_with_new_string);
+ // ----------- S t a t e -------------
+ // -- object : string to access
+ // -- result : instance type of the string
+ // -- scratch : non-negative index < length
+ // -----------------------------------
+
+ // We need special handling for non-flat strings.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &not_a_flat_string);
+
+ // Put untagged index into scratch register.
+ __ SmiToInteger32(scratch, index);
+
+ // Check for 1-byte or 2-byte string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ __ movzxwl(result, FieldOperand(object,
+ scratch,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // Handle non-flat strings.
+ __ bind(&not_a_flat_string);
+ __ and_(result, Immediate(kStringRepresentationMask));
+ __ cmpb(result, Immediate(kConsStringTag));
+ __ j(not_equal, slow_case);
+
+ // ConsString.
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ movq(result, FieldOperand(object, ConsString::kSecondOffset));
+ __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+ __ j(not_equal, slow_case);
+ // Get the first of the two strings and load its instance type.
+ __ movq(object, FieldOperand(object, ConsString::kFirstOffset));
+ __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ jmp(&try_again_with_new_string);
+
+ // ASCII string.
+ __ bind(&ascii_string);
+ // Load the byte into the result register.
+ __ movzxbl(result, FieldOperand(object,
+ scratch,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ Integer32ToSmi(result, result);
+}
+
+
+void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
+ Register code,
+ Register result,
+ Register scratch,
+ InvokeFlag flag) {
+ ASSERT(!code.is(result));
+
+ Label slow_case;
+ Label exit;
+
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ __ JumpIfNotSmi(code, &slow_case);
+ __ SmiToInteger32(scratch, code);
+ __ cmpl(scratch, Immediate(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case);
+
+ __ Move(result, Factory::single_character_string_cache());
+ __ movq(result, FieldOperand(result,
+ scratch,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+ __ jmp(&exit);
+
+ __ bind(&slow_case);
+ if (flag == CALL_FUNCTION) {
+ __ push(code);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result.is(rax)) {
+ __ movq(result, rax);
+ }
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ ASSERT(result.is(rax));
+ __ pop(rax); // Save return address.
+ __ push(code);
+ __ push(rax); // Restore return address.
+ __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
+ }
+
+ __ bind(&exit);
+ if (flag == JUMP_FUNCTION) {
+ ASSERT(result.is(rax));
+ __ ret(0);
+ }
+}
+
+
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
@@ -9824,15 +10598,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
- __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ testl(rcx, rcx);
+ __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
+ __ SmiTest(rcx);
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in rax.
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
- __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
- __ testl(rbx, rbx);
+ __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rbx);
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
@@ -9860,10 +10634,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
// Look at the length of the result of adding the two strings.
- __ addl(rbx, rcx);
+ ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+ __ SmiAdd(rbx, rbx, rcx, NULL);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
- __ cmpl(rbx, Immediate(2));
+ __ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
// Check that both strings are non-external ascii strings.
@@ -9877,8 +10652,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
- GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, r14, r12, rdi, r15,
- &make_two_character_string);
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@@ -9888,11 +10663,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
+ __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ cmpl(rbx, Immediate(String::kMaxLength));
+ __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
__ j(above, &string_add_runtime);
// If result is not supposed to be flat, allocate a cons string object. If
@@ -9912,7 +10687,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+ __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
@@ -9928,11 +10703,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Handle creating a flat result. First check that both strings are not
// external strings.
// rax: first string
- // ebx: length of resulting flat string
+ // ebx: length of resulting flat string as smi
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ bind(&string_add_flat_result);
+ __ SmiToInteger32(rbx, rbx);
__ movl(rcx, r8);
__ and_(rcx, Immediate(kStringRepresentationMask));
__ cmpl(rcx, Immediate(kExternalStringTag));
@@ -9962,22 +10738,24 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument
- __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ SmiToInteger32(rdi, rdi);
__ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second string
// rdi: length of first argument
- GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
// Locate first character of second argument.
- __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ SmiToInteger32(rdi, rdi);
__ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
- GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@@ -9999,22 +10777,24 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument.
- __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ SmiToInteger32(rdi, rdi);
__ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second argument
// rdi: length of first argument
- GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
// Locate first character of second argument.
- __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ SmiToInteger32(rdi, rdi);
__ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
- GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@@ -10025,11 +10805,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -10050,11 +10830,11 @@ void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
}
-void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
// Copy characters using rep movs of doublewords. Align destination on 4 byte
// boundary before starting rep movs. Copy remaining characters after running
// rep movs.
@@ -10105,14 +10885,14 @@ void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&done);
}
-void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found) {
// Register scratch3 is the general scratch register in this function.
Register scratch = scratch3;
@@ -10190,7 +10970,8 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ j(equal, not_found);
// If length is not 2 the string is not a candidate.
- __ cmpl(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+ __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
+ Smi::FromInt(2));
__ j(not_equal, &next_probe[i]);
// We use kScratchRegister as a temporary register in assumption that
@@ -10223,10 +11004,10 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
// hash = character + (character << 10);
__ movl(hash, character);
__ shll(hash, Immediate(10));
@@ -10238,10 +11019,10 @@ void StringStubBase::GenerateHashInit(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
// hash += character;
__ addl(hash, character);
// hash += hash << 10;
@@ -10255,9 +11036,9 @@ void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
}
-void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
// hash += hash << 3;
__ movl(scratch, hash);
__ shll(scratch, Immediate(3));
@@ -10334,8 +11115,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Try to lookup two character string in symbol table.
Label make_two_character_string;
- GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, rax, rdx, rdi, r14,
- &make_two_character_string);
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
@@ -10376,7 +11157,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
- GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, rdx); // Restore rsi.
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(kArgumentsSize);
@@ -10411,7 +11192,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
- GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, rdx); // Restore esi.
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(kArgumentsSize);
@@ -10434,9 +11215,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
- __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movl(scratch4, scratch1);
- __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
+ __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movq(scratch4, scratch1);
+ __ SmiSub(scratch4,
+ scratch4,
+ FieldOperand(right, String::kLengthOffset),
+ NULL);
// Register scratch4 now holds left.length - right.length.
const Register length_difference = scratch4;
Label left_shorter;
@@ -10444,16 +11228,18 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// The right string isn't longer that the left one.
// Get the right string's length by subtracting the (non-negative) difference
// from the left string's length.
- __ subl(scratch1, length_difference);
+ __ SmiSub(scratch1, scratch1, length_difference, NULL);
__ bind(&left_shorter);
// Register scratch1 now holds Min(left.length, right.length).
const Register min_length = scratch1;
Label compare_lengths;
// If min-length is zero, go directly to comparing lengths.
- __ testl(min_length, min_length);
+ __ SmiTest(min_length);
__ j(zero, &compare_lengths);
+ __ SmiToInteger32(min_length, min_length);
+
// Registers scratch2 and scratch3 are free.
Label result_not_equal;
Label loop;
@@ -10484,7 +11270,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Completed loop without finding different characters.
// Compare lengths (precomputed).
__ bind(&compare_lengths);
- __ testl(length_difference, length_difference);
+ __ SmiTest(length_difference);
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index a894a5fdd..5d9861ba6 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -457,10 +457,8 @@ class CodeGenerator: public AstVisitor {
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
- void GenericBinaryOperation(
- Token::Value op,
- StaticType* type,
- OverwriteMode overwrite_mode);
+ void GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
@@ -469,17 +467,16 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(Token::Value op,
+ Result ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> constant_operand,
- StaticType* type,
bool reversed,
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(Token::Value op,
+ Result LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
@@ -594,6 +591,11 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements. Takes three expressions, the object and two
+ // indices. This should only be used if the indices are known to be
+ // non-negative and within bounds of the elements array at the call site.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
@@ -672,6 +674,22 @@ class CodeGenerator: public AstVisitor {
};
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+};
+
+
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -816,54 +834,88 @@ class GenericBinaryOpStub: public CodeStub {
}
};
-
-class StringStubBase: public CodeStub {
+class StringHelper : public AllStatic {
public:
+ // Generates fast code for getting a char code out of a string
+ // object at the given index. May bail out for four reasons (in the
+ // listed order):
+ // * Receiver is not a string (receiver_not_string label).
+ // * Index is not a smi (index_not_smi label).
+ // * Index is out of range (index_out_of_range).
+ // * Some other reason (slow_case label). In this case it's
+ // guaranteed that the above conditions are not violated,
+ // e.g. it's safe to assume the receiver is a string and the
+ // index is a non-negative smi < length.
+ // When successful, object, index, and scratch are clobbered.
+ // Otherwise, scratch and result are clobbered.
+ static void GenerateFastCharCodeAt(MacroAssembler* masm,
+ Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_smi,
+ Label* index_out_of_range,
+ Label* slow_case);
+
+ // Generates code for creating a one-char string from the given char
+ // code. May do a runtime call, so any register can be clobbered
+ // and, if the given invoke flag specifies a call, an internal frame
+ // is required. In tail call mode the result must be rax register.
+ static void GenerateCharFromCode(MacroAssembler* masm,
+ Register code,
+ Register result,
+ Register scratch,
+ InvokeFlag flag);
+
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
// overhead. Copying of overlapping regions is not supported.
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
// Generate code for copying characters using the rep movs instruction.
// Copies rcx characters from rsi to rdi. Copying of overlapping regions is
// not supported.
- void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register rax.
- void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found);
// Generate string hash.
- void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
@@ -874,7 +926,7 @@ enum StringAddFlags {
};
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -891,7 +943,7 @@ class StringAddStub: public StringStubBase {
};
-class SubStringStub: public StringStubBase {
+class SubStringStub: public CodeStub {
public:
SubStringStub() {}
@@ -962,6 +1014,42 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, address and scratch) OOOOAAAASSSS.
+ class ScratchBits : public BitField<uint32_t, 0, 4> {};
+ class AddressBits : public BitField<uint32_t, 4, 4> {};
+ class ObjectBits : public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index c44a3fcf2..bd912cdd2 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -996,23 +996,44 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
if (operand_size_ == 0x66) {
// 0x66 0x0F prefix.
int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x6E) {
- AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else {
- const char* mnemonic = "?";
- if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x2E) {
- mnemonic = "comisd";
- } else if (opcode == 0x2F) {
- mnemonic = "ucomisd";
+ if (opcode == 0x3A) {
+ byte third_byte = *current;
+ current = data + 3;
+ if (third_byte == 0x17) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("extractps "); // reg/m32, xmm, imm8
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
+ current += 1;
} else {
UnimplementedInstruction();
}
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ } else {
+ get_modrm(*current, &mod, &regop, &rm);
+ if (opcode == 0x6E) {
+ AppendToBuffer("mov%c %s,",
+ rex_w() ? 'q' : 'd',
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ } else if (opcode == 0x7E) {
+ AppendToBuffer("mov%c %s,",
+ rex_w() ? 'q' : 'd',
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else {
+ const char* mnemonic = "?";
+ if (opcode == 0x57) {
+ mnemonic = "xorpd";
+ } else if (opcode == 0x2E) {
+ mnemonic = "comisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "ucomisd";
+ } else {
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ }
}
} else if (group_1_prefix_ == 0xF2) {
// Beginning of instructions with prefix 0xF2.
@@ -1035,7 +1056,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current);
- } else if ((opcode & 0xF8) == 0x58) {
+ } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
@@ -1126,6 +1147,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "cvtsi2sd";
case 0x31:
return "rdtsc";
+ case 0x51: // F2 prefix.
+ return "sqrtsd";
case 0x58: // F2 prefix.
return "addsd";
case 0x59: // F2 prefix.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 7bfccd5e9..88fcfd140 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -523,30 +523,69 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
+ Label miss;
+ Label index_not_smi;
+ Label index_out_of_range;
+ Label slow_char_code;
+ Label got_char_code;
- Label miss, index_ok;
-
- // Check that the receiver isn't a smi.
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rcx, &miss);
+ Register receiver = rdx;
+ Register index = rax;
+ Register code = rbx;
+ Register scratch = rcx;
+
+ __ movq(index, Operand(rsp, 1 * kPointerSize));
+ __ movq(receiver, Operand(rsp, 2 * kPointerSize));
+
+ StringHelper::GenerateFastCharCodeAt(masm,
+ receiver,
+ index,
+ scratch,
+ code,
+ &miss, // When not a string.
+ &index_not_smi,
+ &index_out_of_range,
+ &slow_char_code);
+ // If we didn't bail out, code register contains smi tagged char
+ // code.
+ __ bind(&got_char_code);
+ StringHelper::GenerateCharFromCode(masm, code, rax, scratch, JUMP_FUNCTION);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from char from code tail call");
+#endif
+
+ // Check if key is a heap number.
+ __ bind(&index_not_smi);
+ __ CompareRoot(FieldOperand(index, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &miss);
- // Check that the receiver is a string.
- Condition is_string = masm->IsObjectStringType(rcx, rax, rbx);
- __ j(NegateCondition(is_string), &miss);
+ // Push receiver and key on the stack (now that we know they are a
+ // string and a number), and call runtime.
+ __ bind(&slow_char_code);
+ __ EnterInternalFrame();
+ __ push(receiver);
+ __ push(index);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ ASSERT(!code.is(rax));
+ __ movq(code, rax);
+ __ LeaveInternalFrame();
- // Check if key is a smi or a heap number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfSmi(rax, &index_ok);
- __ CheckMap(rax, Factory::heap_number_map(), &miss, false);
+ // Check if the runtime call returned NaN char code. If yes, return
+ // undefined. Otherwise, we can continue.
+ if (FLAG_debug_code) {
+ ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(code, &got_char_code);
+ __ CompareRoot(FieldOperand(code, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ Assert(equal, "StringCharCodeAt must return smi or heap number");
+ }
+ __ CompareRoot(code, Heap::kNanValueRootIndex);
+ __ j(not_equal, &got_char_code);
- __ bind(&index_ok);
- // Duplicate receiver and key since they are expected on the stack after
- // the KeyedLoadIC call.
- __ pop(rbx); // return address
- __ push(rcx); // receiver
- __ push(rax); // key
- __ push(rbx); // return address
- __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ ret(0);
__ bind(&miss);
GenerateMiss(masm);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e9aa7a253..a1976ec3f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -72,37 +72,46 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
-static void RecordWriteHelper(MacroAssembler* masm,
- Register object,
- Register addr,
- Register scratch) {
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
ASSERT(is_int32(~Page::kPageAlignmentMask));
- masm->and_(object,
- Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+ and_(object,
+ Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
- masm->subq(addr, page_start);
- masm->shr(addr, Immediate(kPointerSizeLog2));
+ subq(addr, page_start);
+ shr(addr, Immediate(kPointerSizeLog2));
Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
- masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
- masm->j(less, &fast);
+ cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
+ j(below, &fast);
+
+ // We have a large object containing pointers. It must be a FixedArray.
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Load the array length into 'scratch'.
- masm->movl(scratch,
- Operand(page_start,
- Page::kObjectStartOffset + FixedArray::kLengthOffset));
+ movl(scratch,
+ Operand(page_start,
+ Page::kObjectStartOffset + FixedArray::kLengthOffset));
Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at
@@ -111,59 +120,17 @@ static void RecordWriteHelper(MacroAssembler* masm,
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
- masm->lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
+ lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- masm->bind(&fast);
- masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
-}
-
-
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits of three registers (object, address and
- // scratch) OOOOAAAASSSS.
- class ScratchBits : public BitField<uint32_t, 0, 4> {};
- class AddressBits : public BitField<uint32_t, 4, 4> {};
- class ObjectBits : public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- RecordWriteHelper(masm, object_, addr_, scratch_);
- masm->ret(0);
+ bind(&fast);
+ bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
@@ -184,7 +151,7 @@ void MacroAssembler::RecordWrite(Register object,
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits.
+ // for the remembered set bits).
Label done;
JumpIfSmi(value, &done);
@@ -219,12 +186,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
- movq(scratch, object);
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
- movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(scratch, kScratchRegister);
- j(equal, &done);
+ InNewSpace(object, scratch, equal, &done);
// The offset is relative to a tagged or untagged HeapObject pointer,
// so either offset or offset + kHeapObjectTag must be a
@@ -237,11 +199,11 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
- // Compute the bit offset in the remembered set, leave it in 'value'.
+ // Compute the bit offset in the remembered set, leave it in 'scratch'.
lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
- shr(scratch, Immediate(kObjectAlignmentBits));
+ shr(scratch, Immediate(kPointerSizeLog2));
// Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended).
@@ -260,15 +222,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
- lea(dst, Operand(object,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ lea(dst, FieldOperand(object,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
- RecordWriteHelper(this, object, dst, scratch);
+ RecordWriteHelper(object, dst, scratch);
} else {
RecordWriteStub stub(object, dst, scratch);
CallStub(&stub);
@@ -287,6 +249,41 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask());
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask());
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ j(cc, branch);
+ }
+}
+
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -599,6 +596,11 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
}
+void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+ cmpq(dst, src);
+}
+
+
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
cmpq(dst, src);
}
@@ -734,7 +736,17 @@ void MacroAssembler::SmiAdd(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible.
+ if (dst.is(src1)) {
+ addq(dst, src2);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ }
+ Assert(no_overflow, "Smi addition onverflow");
+ } else if (dst.is(src1)) {
addq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -781,6 +793,35 @@ void MacroAssembler::SmiSub(Register dst,
}
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Operand const& src2,
+ Label* on_not_smi_result) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ }
+ Assert(no_overflow, "Smi substraction onverflow");
+ } else if (dst.is(src1)) {
+ subq(dst, src2);
+ Label smi_result;
+ j(no_overflow, &smi_result);
+ // Restore src1.
+ addq(src1, src2);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
void MacroAssembler::SmiMul(Register dst,
Register src1,
Register src2,
@@ -2172,6 +2213,7 @@ Register MacroAssembler::CheckMaps(JSObject* object,
JSObject* holder,
Register holder_reg,
Register scratch,
+ int save_at_depth,
Label* miss) {
// Make sure there's no overlap between scratch and the other
// registers.
@@ -2181,7 +2223,11 @@ Register MacroAssembler::CheckMaps(JSObject* object,
// iteration, reg is an alias for object_reg, on later iterations,
// it is an alias for holder_reg.
Register reg = object_reg;
- int depth = 1;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ movq(Operand(rsp, kPointerSize), object_reg);
+ }
// Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks.
@@ -2231,6 +2277,10 @@ Register MacroAssembler::CheckMaps(JSObject* object,
Move(reg, Handle<JSObject>(prototype));
}
+ if (save_at_depth == depth) {
+ movq(Operand(rsp, kPointerSize), reg);
+ }
+
// Go to the next object in the prototype chain.
object = prototype;
}
@@ -2240,7 +2290,7 @@ Register MacroAssembler::CheckMaps(JSObject* object,
j(not_equal, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth));
+ LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
@@ -2326,7 +2376,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
- ASSERT(scratch.is(no_reg));
+ ASSERT(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
movq(kScratchRegister, new_space_allocation_top);
@@ -2336,14 +2386,17 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
return;
}
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- movq(kScratchRegister, new_space_allocation_top);
- movq(result, Operand(kScratchRegister, 0));
- } else {
+ // Move address of new object to result. Use scratch register if available,
+ // and keep address in scratch until call to UpdateAllocationTopHelper.
+ if (scratch.is_valid()) {
ASSERT(!scratch.is(result_end));
movq(scratch, new_space_allocation_top);
movq(result, Operand(scratch, 0));
+ } else if (result.is(rax)) {
+ load_rax(new_space_allocation_top);
+ } else {
+ movq(kScratchRegister, new_space_allocation_top);
+ movq(result, Operand(kScratchRegister, 0));
}
}
@@ -2364,11 +2417,11 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
store_rax(new_space_allocation_top);
} else {
// Register required - use scratch provided if available.
- if (scratch.is(no_reg)) {
+ if (scratch.is_valid()) {
+ movq(Operand(scratch, 0), result_end);
+ } else {
movq(kScratchRegister, new_space_allocation_top);
movq(Operand(kScratchRegister, 0), result_end);
- } else {
- movq(Operand(scratch, 0), result_end);
}
}
}
@@ -2388,16 +2441,29 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
- lea(result_end, Operand(result, object_size));
+
+ Register top_reg = result_end.is_valid() ? result_end : result;
+
+ if (top_reg.is(result)) {
+ addq(top_reg, Immediate(object_size));
+ } else {
+ lea(top_reg, Operand(result, object_size));
+ }
movq(kScratchRegister, new_space_allocation_limit);
- cmpq(result_end, Operand(kScratchRegister, 0));
+ cmpq(top_reg, Operand(kScratchRegister, 0));
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch);
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
+ if (top_reg.is(result)) {
+ if ((flags & TAG_OBJECT) != 0) {
+ subq(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ subq(result, Immediate(object_size));
+ }
+ } else if ((flags & TAG_OBJECT) != 0) {
+ // Tag the result if requested.
addq(result, Immediate(kHeapObjectTag));
}
}
@@ -2504,11 +2570,16 @@ void MacroAssembler::AllocateTwoByteString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
+ kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+ lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ kHeaderAlignment));
and_(scratch1, Immediate(~kObjectAlignmentMask));
+ if (kHeaderAlignment > 0) {
+ subq(scratch1, Immediate(kHeaderAlignment));
+ }
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -2523,7 +2594,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- movl(FieldOperand(result, String::kLengthOffset), length);
+ Integer32ToSmi(scratch1, length);
+ movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -2537,11 +2609,15 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask));
+ addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
and_(scratch1, Immediate(~kObjectAlignmentMask));
+ if (kHeaderAlignment > 0) {
+ subq(scratch1, Immediate(kHeaderAlignment));
+ }
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -2556,7 +2632,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- movl(FieldOperand(result, String::kLengthOffset), length);
+ Integer32ToSmi(scratch1, length);
+ movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 10110d35e..32e1f4972 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -66,6 +66,21 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
+ // Set the remebered set bit for an address which points into an
+ // object. RecordWriteHelper only works if the object is not in new
+ // space.
+ void RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch);
+
+ // Check if object is in new space. The condition cc can be equal or
+ // not_equal. If it is equal a jump will be done if the object is on new
+ // space. The register scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch);
+
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@@ -87,7 +102,6 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
-
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@@ -197,6 +211,7 @@ class MacroAssembler: public Assembler {
// Simple comparison of smis.
void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);
+ void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src);
// Sets sign and zero flags depending on value of smi in register.
@@ -287,7 +302,8 @@ class MacroAssembler: public Assembler {
Label* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result. No testing on the result is done.
+ // result. No testing on the result is done. Sets the N and Z flags
+ // based on the value of the resulting integer.
void SmiSubConstant(Register dst, Register src, Smi* constant);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -319,6 +335,11 @@ class MacroAssembler: public Assembler {
Register src2,
Label* on_not_smi_result);
+ void SmiSub(Register dst,
+ Register src1,
+ Operand const& src2,
+ Label* on_not_smi_result);
+
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
@@ -529,9 +550,14 @@ class MacroAssembler: public Assembler {
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
+ // The function can optionally (when save_at_depth !=
+ // kInvalidProtoDepth) save the object at the given depth by moving
+ // it to [rsp + kPointerSize].
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
- Register scratch, Label* miss);
+ Register scratch,
+ int save_at_depth,
+ Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
@@ -765,10 +791,17 @@ class MacroAssembler: public Assembler {
void LeaveFrame(StackFrame::Type type);
// Allocation support helpers.
+ // Loads the top of new-space into the result register.
+ // If flags contains RESULT_CONTAINS_TOP then result_end is valid and
+ // already contains the top of new-space, and scratch is invalid.
+ // Otherwise the address of the new-space top is loaded into scratch (if
+ // scratch is valid), and the new-space top is loaded into result.
void LoadAllocationTopHelper(Register result,
Register result_end,
Register scratch,
AllocationFlags flags);
+ // Update allocation top with value in result_end register.
+ // If scratch is valid, it contains the address of the allocation top.
void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a78f8b11b..7d4410cb0 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -327,8 +327,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
- __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -340,8 +339,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// directly if it is.
__ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movl(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
__ ret(0);
}
@@ -555,76 +553,234 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
};
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument in the internal frame of the caller
+ // -----------------------------------
+ __ movq(scratch, Operand(rsp, 0));
+ __ subq(rsp, Immediate(4 * kPointerSize));
+ __ movq(Operand(rsp, 0), scratch);
+ __ Move(scratch, Smi::FromInt(0));
+ __ movq(Operand(rsp, 1 * kPointerSize), scratch);
+ __ movq(Operand(rsp, 2 * kPointerSize), scratch);
+ __ movq(Operand(rsp, 3 * kPointerSize), scratch);
+ __ movq(Operand(rsp, 4 * kPointerSize), scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : last fast api call extra argument
+ // -- ...
+ // -- rsp[32] : first fast api call extra argument
+ // -- rsp[40] : last argument in the internal frame
+ // -----------------------------------
+ __ movq(scratch, Operand(rsp, 0));
+ __ movq(Operand(rsp, 4 * kPointerSize), scratch);
+ __ addq(rsp, Immediate(kPointerSize * 4));
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : object passing the type check
+ // (last fast api call extra argument,
+ // set by CheckPrototypes)
+ // -- rsp[16] : api call data
+ // -- rsp[24] : api callback
+ // -- rsp[32] : api function
+ // (first fast api call extra argument)
+ // -- rsp[40] : last argument
+ // -- ...
+ // -- rsp[(argc + 5) * 8] : first argument
+ // -- rsp[(argc + 6) * 8] : receiver
+ // -----------------------------------
+
+ // Get the function and setup the context.
+ JSFunction* function = optimization.constant_function();
+ __ Move(rdi, Handle<JSFunction>(function));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Pass the additional arguments FastHandleApiCall expects.
+ __ movq(Operand(rsp, 4 * kPointerSize), rdi);
+ bool info_loaded = false;
+ Object* callback = optimization.api_call_info()->callback();
+ if (Heap::InNewSpace(callback)) {
+ info_loaded = true;
+ __ Move(rcx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kCallbackOffset));
+ __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ } else {
+ __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(callback));
+ }
+ Object* call_data = optimization.api_call_info()->data();
+ if (Heap::InNewSpace(call_data)) {
+ if (!info_loaded) {
+ __ Move(rcx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+ }
+ __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rbx);
+ } else {
+ __ Move(Operand(rsp, 2 * kPointerSize), Handle<Object>(call_data));
+ }
+
+ // Set the number of arguments.
+ __ movq(rax, Immediate(argc + 4));
+
+ // Jump to the fast api call builtin (tail call).
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::FastHandleApiCall));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(const ParameterCount& arguments, Register name)
- : arguments_(arguments), name_(name) {}
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name) {}
+
+ void Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ CallOptimization optimization(lookup);
+
+ if (optimization.is_constant_call()) {
+ CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
+ } else {
+ CompileRegular(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ name,
+ holder,
+ miss);
+ }
+ }
+ private:
void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
+ JSObject* object,
Register receiver,
- Register holder,
Register scratch1,
Register scratch2,
JSObject* holder_obj,
LookupResult* lookup,
String* name,
+ const CallOptimization& optimization,
Label* miss_label) {
- JSFunction* function = 0;
- bool optimize = false;
- // So far the most popular case for failed interceptor is
- // CONSTANT_FUNCTION sitting below.
- if (lookup->type() == CONSTANT_FUNCTION) {
- function = lookup->GetConstantFunction();
- // JSArray holder is a special case for call constant function
- // (see the corresponding code).
- if (function->is_compiled() && !holder_obj->IsJSArray()) {
- optimize = true;
+ ASSERT(optimization.is_constant_call());
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ int depth1 = kInvalidProtoDepth;
+ int depth2 = kInvalidProtoDepth;
+ bool can_do_fast_api_call = false;
+ if (optimization.is_simple_api_call() &&
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+ lookup->holder());
}
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+ (depth2 != kInvalidProtoDepth);
}
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
- return;
+ __ IncrementCounter(&Counters::call_const_interceptor, 1);
+
+ if (can_do_fast_api_call) {
+ __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ ReserveSpaceForFastApiCall(masm, scratch1);
}
- ASSERT(!lookup->holder()->IsGlobalObject());
+ Label miss_cleanup;
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+ scratch1, scratch2, name,
+ depth1, miss);
- __ EnterInternalFrame();
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ Label regular_invoke;
+ LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ // Generate code for the failed interceptor case.
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- Label invoke;
- __ j(not_equal, &invoke);
+ // Check the lookup is still valid.
+ stub_compiler_->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(),
+ scratch1, scratch2, name,
+ depth2, miss);
- stub_compiler->CheckPrototypes(holder_obj, receiver,
- lookup->holder(), scratch1,
- scratch2,
- name,
- miss_label);
+ if (can_do_fast_api_call) {
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ } else {
+ __ InvokeFunction(optimization.constant_function(), arguments_,
+ JUMP_FUNCTION);
+ }
- __ InvokeFunction(function, arguments_, JUMP_FUNCTION);
+ if (can_do_fast_api_call) {
+ __ bind(&miss_cleanup);
+ FreeSpaceForFastApiCall(masm, scratch1);
+ __ jmp(miss_label);
+ }
- __ bind(&invoke);
+ __ bind(&regular_invoke);
+ if (can_do_fast_api_call) {
+ FreeSpaceForFastApiCall(masm, scratch1);
+ }
}
void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
Register receiver,
- Register holder,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
+ String* name,
JSObject* holder_obj,
Label* miss_label) {
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+ scratch1, scratch2, name,
+ miss_label);
+
__ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -639,11 +795,35 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
5);
+ // Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame();
}
- private:
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Label* interceptor_succeeded) {
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(not_equal, interceptor_succeeded);
+ }
+
+ StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
};
@@ -673,119 +853,6 @@ static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
-
-Object* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // TODO(639): faster implementation.
- ASSERT(check == RECEIVER_MAP_CHECK);
-
- Label miss;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- String* function_name = NULL;
- if (function->shared()->name()->IsString()) {
- function_name = String::cast(function->shared()->name());
- }
- return GetCode(CONSTANT_FUNCTION, function_name);
-}
-
-
-Object* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // TODO(642): faster implementation.
- ASSERT(check == RECEIVER_MAP_CHECK);
-
- Label miss;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- Handle<Code> ic = ComputeCallMiss(arguments().immediate());
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- String* function_name = NULL;
- if (function->shared()->name()->IsString()) {
- function_name = String::cast(function->shared()->name());
- }
- return GetCode(CONSTANT_FUNCTION, function_name);
-}
-
-
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -805,10 +872,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
- return generator(this, object, holder, function, name, check);
+ Object* result = generator(this, object, holder, function, name, check);
+ // undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) {
+ return result;
+ }
}
- Label miss;
+ Label miss_in_smi_check;
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -816,22 +887,39 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
+ __ JumpIfSmi(rdx, &miss_in_smi_check);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+ CallOptimization optimization(function);
+ int depth = kInvalidProtoDepth;
+ Label miss;
+
switch (check) {
case RECEIVER_MAP_CHECK:
+ __ IncrementCounter(&Counters::call_const, 1);
+
+ if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+ depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ }
+
+ if (depth != kInvalidProtoDepth) {
+ __ IncrementCounter(&Counters::call_const_fast_api, 1);
+ ReserveSpaceForFastApiCall(masm(), rax);
+ }
+
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, name, &miss);
+ rbx, rax, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
+ ASSERT(depth == kInvalidProtoDepth);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
@@ -901,10 +989,20 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ if (depth != kInvalidProtoDepth) {
+ GenerateFastApiCall(masm(), optimization, argc);
+ } else {
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ }
// Handle call cache miss.
__ bind(&miss);
+ if (depth != kInvalidProtoDepth) {
+ FreeSpaceForFastApiCall(masm(), rax);
+ }
+
+ // Handle call cache miss.
+ __ bind(&miss_in_smi_check);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -969,6 +1067,257 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
}
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray()) {
+ return Heap::undefined_value();
+ }
+
+ Label miss;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(JSObject::cast(object),
+ rdx,
+ holder,
+ rbx,
+ rax,
+ name,
+ &miss);
+
+ if (argc == 0) {
+ // Noop, return the length.
+ __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ ret((argc + 1) * kPointerSize);
+ } else {
+ // Get the elements array of the object.
+ __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ j(not_equal, &miss);
+
+ if (argc == 1) { // Otherwise fall through to call builtin.
+ Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+
+ // Get the array's length into rax and calculate new length.
+ __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ SmiAddConstant(rax, rax, Smi::FromInt(argc));
+
+ // Get the element's length into rcx.
+ __ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ Integer32ToSmi(rcx, rcx);
+
+ // Check if we could survive without allocation.
+ __ SmiCompare(rax, rcx);
+ __ j(greater, &attempt_to_grow_elements);
+
+ // Save new length.
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ // Push the element.
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ SmiIndex index =
+ masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
+ __ lea(rdx, FieldOperand(rbx,
+ index.reg, index.scale,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ movq(Operand(rdx, 0), rcx);
+
+ // Check if value is a smi.
+ __ JumpIfNotSmi(rcx, &with_rset_update);
+
+ __ bind(&exit);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&with_rset_update);
+
+ __ InNewSpace(rbx, rcx, equal, &exit);
+
+ RecordWriteStub stub(rbx, rdx, rcx);
+ __ CallStub(&stub);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&attempt_to_grow_elements);
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+
+ const int kAllocationDelta = 4;
+ // Load top.
+ __ movq(rcx, new_space_allocation_top);
+ __ movq(rcx, Operand(rcx, 0));
+
+ // Check if it's the end of elements.
+ index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
+ __ lea(rdx, FieldOperand(rbx,
+ index.reg, index.scale,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ cmpq(rdx, rcx);
+ __ j(not_equal, &call_builtin);
+ __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+ __ movq(kScratchRegister, new_space_allocation_limit);
+ __ cmpq(rcx, Operand(kScratchRegister, 0));
+ __ j(above, &call_builtin);
+
+ // We fit and could grow elements.
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+
+ // Push the argument...
+ __ movq(Operand(rdx, 0), rcx);
+ // ... and fill the rest with holes.
+ __ Move(kScratchRegister, Factory::the_hole_value());
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
+ }
+
+ // Restore receiver to rdx as finish sequence assumes it's here.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Increment element's and array's sizes.
+ __ addq(FieldOperand(rbx, FixedArray::kLengthOffset),
+ Immediate(kAllocationDelta));
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+ // Elements are in new space, so no remembered set updates are necessary.
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+ }
+
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ argc + 1,
+ 1);
+ }
+
+ __ bind(&miss);
+
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray()) {
+ return Heap::undefined_value();
+ }
+
+ Label miss, return_undefined, call_builtin;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(JSObject::cast(object), rdx,
+ holder, rbx,
+ rax, name, &miss);
+
+ // Get the elements array of the object.
+ __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &miss);
+
+ // Get the array's length into rcx and calculate new length.
+ __ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(1));
+ __ SmiTest(rcx);
+ __ j(negative, &return_undefined);
+
+ // Get the last element.
+ __ Move(r9, Factory::the_hole_value());
+ SmiIndex index =
+ masm()->SmiToIndex(r8, rcx, times_pointer_size);
+ __ movq(rax, FieldOperand(rbx,
+ index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ // Check if element is already the hole.
+ __ cmpq(rax, r9);
+ __ j(equal, &call_builtin);
+
+ // Set the array's length.
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+
+ // Fill with the hole and return original value..
+ __ movq(FieldOperand(rbx,
+ index.reg, index.scale,
+ FixedArray::kHeaderSize),
+ r9);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&return_undefined);
+
+ __ Move(rax, Factory::undefined_value());
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+ argc + 1,
+ 1);
+ __ bind(&miss);
+
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
@@ -992,18 +1341,16 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments(), rcx);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- &miss);
+ CallInterceptorCompiler compiler(this, arguments(), rcx);
+ compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ rdx,
+ rbx,
+ rdi,
+ &miss);
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1822,12 +2169,15 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
String* name,
int save_at_depth,
Label* miss) {
- // TODO(602): support object saving.
- ASSERT(save_at_depth == kInvalidProtoDepth);
-
// Check that the maps haven't changed.
Register result =
- __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+ masm()->CheckMaps(object,
+ object_reg,
+ holder,
+ holder_reg,
+ scratch,
+ save_at_depth,
+ miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index f612051d9..1e4374b07 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -226,6 +226,31 @@ void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
}
+void VirtualFrame::Push(Expression* expr) {
+ ASSERT(expr->IsTrivial());
+
+ Literal* lit = expr->AsLiteral();
+ if (lit != NULL) {
+ Push(lit->handle());
+ return;
+ }
+
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL) {
+ Slot* slot = proxy->var()->slot();
+ if (slot->type() == Slot::LOCAL) {
+ PushLocalAt(slot->index());
+ return;
+ }
+ if (slot->type() == Slot::PARAMETER) {
+ PushParameterAt(slot->index());
+ return;
+ }
+ }
+ UNREACHABLE();
+}
+
+
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index eba904770..7cda18186 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -415,6 +415,10 @@ class VirtualFrame : public ZoneObject {
result->Unuse();
}
+ // Pushing an expression expects that the expression is trivial (according
+ // to Expression::IsTrivial).
+ void Push(Expression* expr);
+
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).